diff --git a/README.md b/README.md index 1a011dd1..7b4f8359 100644 --- a/README.md +++ b/README.md @@ -77,7 +77,8 @@ and device, creating anything that is missing from NetBox while skipping entries > > `NETBOX_TOKEN=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx` > -> For v2 tokens, you need to include prefix "nbt_", the bearer key (represented here by capital X-es), a dot, and finally the secret token (represented by lowercase x-es): +> For v2 tokens, you need to include prefix "nbt_", the bearer key (represented here by capital +> X-es), a dot, and finally the secret token (represented by lowercase x-es): > >`NETBOX_TOKEN=nbt_XXXXXXXXXXXX.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx` @@ -105,6 +106,21 @@ uv run nb-dt-import.py --slugs ap4433a,ap7526 # Imports two specific PDUs uv run nb-dt-import.py --vendors "Palo Alto" --slugs 440 ``` +#### All Arguments + +| Argument | Default | Description | +| --- | --- | --- | +| `--vendors` | all | Comma- or space-separated list of vendors to import (e.g. `apc cisco`) | +| `--slugs` | all | Comma- or space-separated device-type slug substrings to filter (partial match) | +| `--url` / `--git` | community library | Git URL of the device-type library to clone | +| `--branch` | `master` | Git branch to check out from the repo | +| `--verbose` | off | Print verbose output (individual create/update messages) | +| `--show-remaining-time` | off | Show estimated remaining time in progress bars | +| `--only-new` | off | Only create new types, skip all existing ones (mutually exclusive with `--update`) | +| `--update` | off | Update existing types with changes from the repo (mutually exclusive with `--only-new`) | +| `--remove-components` | off | Delete components missing from YAML when used with `--update`. **Destructive.** | +| `--force-resolve-conflicts` | off | Automatically resolve NetBox constraint failures during `--update`. **Destructive.** See below. | + #### Update Mode By default, the script only creates new device types and skips existing ones. To update @@ -147,6 +163,39 @@ no longer present in the YAML definition. - Review the change detection report before enabling component removal - Test on a staging NetBox instance first if possible +#### Conflict Resolution (Use with Caution) + +> **WARNING**: `--force-resolve-conflicts` performs destructive NetBox operations automatically. + +Some NetBox business-logic constraints block updates even when no live device instances use the +affected type. For example, changing a device type's `subdevice_role` from `parent` to `child` +requires deleting all device-bay templates first. To allow the script to perform that remediation +automatically: + +```shell +uv run nb-dt-import.py --update --force-resolve-conflicts +``` + +**What it does**: + +- When a PATCH fails with a constraint error, the script checks whether any live devices + reference the affected type +- If **no** live devices reference it, the blocking objects (e.g. device-bay templates) are + deleted and the PATCH is retried +- If live devices **do** reference it, the update is skipped and logged as a failure — no + destructive action is taken + +**Safety guarantees**: + +- Never deletes blocking objects when live device instances exist +- Requires `--update` (will error without it) +- All auto-resolved and skipped items appear in the run summary + +**When to use**: + +- After converting device types from parent to child (or vice versa) +- When the script reports constraint failures that block property updates + ## Contributing We're happy about any pull requests! diff --git a/core/__init__.py b/core/__init__.py index e69de29b..27f230a5 100644 --- a/core/__init__.py +++ b/core/__init__.py @@ -0,0 +1 @@ +"""Core package for the Device Type Library Import tool.""" diff --git a/core/change_detector.py b/core/change_detector.py index 72482cb8..6fa273df 100644 --- a/core/change_detector.py +++ b/core/change_detector.py @@ -4,11 +4,14 @@ in the repository and existing data in NetBox, supporting the --update workflow. """ +import os from dataclasses import dataclass, field from typing import Any, List, Optional from enum import Enum from core.normalization import normalize_values +from core.formatting import log_property_diffs +from core.schema_reader import load_properties_for_type class ChangeType(Enum): @@ -72,8 +75,12 @@ class ChangeReport: unchanged_count: int = 0 -# Device type properties that can be compared and updated -DEVICE_TYPE_PROPERTIES = [ +# Device type properties that can be compared and updated. +# Loaded from the cloned devicetype-library schema at runtime; the list below +# serves as a fallback when the schema is not yet available. Identity fields +# (manufacturer, model, slug) and image fields (front_image, rear_image) are +# excluded by the schema reader — images are handled separately via IMAGE_PROPERTIES. +_DEVICE_TYPE_PROPERTIES_FALLBACK = [ "u_height", "part_number", "is_full_depth", @@ -81,9 +88,54 @@ class ChangeReport: "airflow", "weight", "weight_unit", + "description", "comments", ] +_DEVICE_TYPE_SCHEMA_EXCLUDE = {"manufacturer", "model", "slug", "front_image", "rear_image"} + + +def _load_device_type_properties(): + """Load device type scalar properties from the schema, falling back to hardcoded list.""" + try: + from core import settings as _settings + + props = load_properties_for_type( + os.path.join(_settings.REPO_PATH, "schema"), + "devicetype", + exclude=_DEVICE_TYPE_SCHEMA_EXCLUDE, + ) + return props if props else list(_DEVICE_TYPE_PROPERTIES_FALLBACK) + except (ImportError, AttributeError): + return list(_DEVICE_TYPE_PROPERTIES_FALLBACK) + + +_CACHED_DEVICE_TYPE_PROPERTIES = None + + +def get_device_type_properties(): + """Lazily resolve and cache the device-type schema properties. + + Resolved at first call rather than at import time so the schema lookup + sees a populated repo even when ``change_detector`` is imported before + the repo is cloned (e.g., test bootstrap, fresh CI environments). + """ + global _CACHED_DEVICE_TYPE_PROPERTIES + if _CACHED_DEVICE_TYPE_PROPERTIES is None: + _CACHED_DEVICE_TYPE_PROPERTIES = _load_device_type_properties() + return _CACHED_DEVICE_TYPE_PROPERTIES + + +# Backwards-compatible eager constant. Prefer ``get_device_type_properties()`` +# in code paths that may run before the repo schema is available. +DEVICE_TYPE_PROPERTIES = _load_device_type_properties() + +# Sentinel used to distinguish "attribute missing from record" from a genuine +# None/null value returned by NetBox. When a property is in the schema-derived +# comparison list but was not fetched by the GraphQL query, getattr returns this +# sentinel and the property is skipped to avoid false-positive change detection. +_MISSING = object() + # Image properties: YAML uses boolean flags, NetBox stores URL strings. # Only existence is compared (YAML=true vs NetBox=empty). IMAGE_PROPERTIES = ["front_image", "rear_image"] @@ -92,35 +144,24 @@ class ChangeReport: COMPONENT_TYPES = { "interfaces": ( "interface_templates", - ["name", "type", "mgmt_only", "label", "enabled", "poe_mode", "poe_type"], + ["name", "type", "mgmt_only", "label", "enabled", "poe_mode", "poe_type", "description", "rf_role"], ), "power-ports": ( "power_port_templates", - ["name", "type", "maximum_draw", "allocated_draw", "label"], + ["name", "type", "maximum_draw", "allocated_draw", "label", "description"], ), - "console-ports": ("console_port_templates", ["name", "type", "label"]), - "power-outlets": ("power_outlet_templates", ["name", "type", "feed_leg", "label"]), + "console-ports": ("console_port_templates", ["name", "type", "label", "description"]), + "power-outlets": ("power_outlet_templates", ["name", "type", "feed_leg", "label", "description"]), "console-server-ports": ( "console_server_port_templates", - ["name", "type", "label"], + ["name", "type", "label", "description"], ), - "rear-ports": ("rear_port_templates", ["name", "type", "positions", "label"]), - "front-ports": ("front_port_templates", ["name", "type", "_mappings", "label"]), - "device-bays": ("device_bay_templates", ["name", "label"]), - "module-bays": ("module_bay_templates", ["name", "position", "label"]), -} - -# Aliases for YAML keys that map to the same component type. -# alias -> canonical key -COMPONENT_ALIASES = { - "power-port": "power-ports", + "rear-ports": ("rear_port_templates", ["name", "type", "positions", "label", "description", "color"]), + "front-ports": ("front_port_templates", ["name", "type", "_mappings", "label", "description", "color"]), + "device-bays": ("device_bay_templates", ["name", "label", "description"]), + "module-bays": ("module_bay_templates", ["name", "position", "label", "description"]), } -# canonical key -> list of aliases -COMPONENT_ALIASES_BY_CANONICAL = {} -for alias, canonical_key in COMPONENT_ALIASES.items(): - COMPONENT_ALIASES_BY_CANONICAL.setdefault(canonical_key, []).append(alias) - class ChangeDetector: """Detects changes between YAML device types and NetBox cached data.""" @@ -198,15 +239,20 @@ def _compare_device_type_properties(self, yaml_data: dict, netbox_dt) -> List[Pr """ changes = [] - for prop in DEVICE_TYPE_PROPERTIES: + for prop in get_device_type_properties(): # Only compare properties explicitly present in YAML; # an omitted property means the YAML doesn't manage it, # matching the component semantics (absent key != removal). if prop not in yaml_data: continue + # Only compare properties that were actually fetched from NetBox. + # If the GraphQL query doesn't include a field yet, skip it to + # avoid false-positive change detections. + netbox_value = getattr(netbox_dt, prop, _MISSING) + if netbox_value is _MISSING: + continue yaml_value = yaml_data.get(prop) - netbox_value = getattr(netbox_dt, prop, None) yaml_value, netbox_value = normalize_values(yaml_value, netbox_value) @@ -278,14 +324,6 @@ def _compare_components( for yaml_key, (cache_name, properties) in COMPONENT_TYPES.items(): yaml_components = list(yaml_data.get(yaml_key) or []) - # Check whether the canonical key or any alias is actually present in YAML - aliases_for_key = COMPONENT_ALIASES_BY_CANONICAL.get(yaml_key, []) - key_present = yaml_key in yaml_data or any(alias in yaml_data for alias in aliases_for_key) - - # Merge components from any aliases that map to this canonical key - for alias in aliases_for_key: - yaml_components.extend(yaml_data.get(alias) or []) - # Get cached components for this device type cached = self.device_types.cached_components.get(cache_name, {}) existing_components = cached.get(cache_key, {}) @@ -296,7 +334,7 @@ def _compare_components( # Check for removed components (exist in NetBox but not in YAML) # Only flag removals when the YAML explicitly defines this component type; # a missing key means the YAML doesn't manage this type at all. - if key_present: + if yaml_key in yaml_data: for existing_name in existing_components.keys(): if existing_name not in yaml_component_names: changes.append( @@ -371,7 +409,11 @@ def _compare_component_properties( ) for m in yaml_mappings ) - canonical = getattr(netbox_comp, "_mappings_canonical", None) or [] + canonical = getattr(netbox_comp, "_mappings_canonical", None) + if canonical is None: + # GraphQL response lacked both mappings and rear_port_position; + # treat as unmanaged to avoid a false COMPONENT_CHANGED. + continue has_names = any(m.get("rear_port_name") is not None for m in canonical) if has_names: # NetBox >= 4.5: compare with rear port names @@ -415,7 +457,12 @@ def _compare_component_properties( continue yaml_value = yaml_comp.get(prop) - netbox_value = getattr(netbox_comp, prop, None) + netbox_value = getattr(netbox_comp, prop, _MISSING) + if netbox_value is _MISSING: + # NetBox version / GraphQL selection didn't return this field; + # treat it as unmanaged to avoid a false COMPONENT_CHANGED that + # would PATCH an unsupported attribute. + continue yaml_value, netbox_value = normalize_values(yaml_value, netbox_value) @@ -430,14 +477,11 @@ def _compare_component_properties( return changes - def _log_modified_summary(self, report: ChangeReport) -> int: + def _log_modified_summary(self, report: ChangeReport) -> None: """Compute category counts and log the summary section for modified device types. Args: report: The ChangeReport containing modified_device_types to summarise. - - Returns: - The count of device types that have removed components. """ ct_props = 0 ct_images = 0 @@ -471,20 +515,13 @@ def _log_modified_summary(self, report: ChangeReport) -> int: if parts: self.handle.log(f" Breakdown: {', '.join(parts)}") - return ct_removed - def _log_property_diffs(self, prop_changes: List[PropertyChange], indent: str) -> None: """Emit diff-u style lines for *prop_changes* at the given *indent*.""" - if not prop_changes: - return - pad = min(max(len(pc.property_name) for pc in prop_changes), 30) - for pc in prop_changes: - name = f"{pc.property_name}:{'':{max(0, pad - len(pc.property_name))}}" - blank = " " * len(name) - for i, line in enumerate(str(pc.old_value).splitlines() or [""]): - self.handle.verbose_log(f"{indent}- {name if i == 0 else blank} {line}") - for i, line in enumerate(str(pc.new_value).splitlines() or [""]): - self.handle.verbose_log(f"{indent}+ {name if i == 0 else blank} {line}") + log_property_diffs( + [(pc.property_name, pc.old_value, pc.new_value) for pc in prop_changes], + self.handle.verbose_log, + indent, + ) def _log_modified_device_details(self, dt: DeviceTypeChange): """Log the per-device detail section for a single modified device type. @@ -496,14 +533,24 @@ def _log_modified_device_details(self, dt: DeviceTypeChange): changed = [c for c in dt.component_changes if c.change_type == ChangeType.COMPONENT_CHANGED] removed = [c for c in dt.component_changes if c.change_type == ChangeType.COMPONENT_REMOVED] - if removed: - self.handle.log(f" ~ {dt.manufacturer_slug}/{dt.model}") - else: - self.handle.verbose_log(f" ~ {dt.manufacturer_slug}/{dt.model}") - prop_changes = [pc for pc in dt.property_changes if pc.property_name not in IMAGE_PROPERTIES] image_changes = [pc for pc in dt.property_changes if pc.property_name in IMAGE_PROPERTIES] + # Build a short inline summary so the name line is informative even without --verbose. + parts = [] + if prop_changes: + parts.append(f"{len(prop_changes)} prop") + if image_changes: + parts.append(f"{len(image_changes)} image") + if added: + parts.append(f"+{len(added)} component") + if changed: + parts.append(f"~{len(changed)} component") + if removed: + parts.append(f"-{len(removed)} component") + suffix = f" [{', '.join(parts)}]" if parts else "" + self.handle.log(f" ~ {dt.manufacturer_slug}/{dt.model}{suffix}") + if prop_changes or image_changes: self.handle.verbose_log(" Properties:") self._log_property_diffs(prop_changes, " ") @@ -512,16 +559,14 @@ def _log_modified_device_details(self, dt: DeviceTypeChange): self.handle.verbose_log(f" ~ {label}: missing in NetBox (YAML defines image)") if added: - self.handle.verbose_log(f" + {len(added)} new component(s)") for comp in added: self.handle.verbose_log(f" + {comp.component_type}: {comp.component_name}") if changed: - self.handle.verbose_log(f" ~ {len(changed)} changed component(s)") for comp in changed: self.handle.verbose_log(f" ~ {comp.component_type}: {comp.component_name}") self._log_property_diffs(comp.property_changes, " ") if removed: - self.handle.log(f" - {len(removed)} removed component(s) (not deleted without --remove-components)") + self.handle.log(f" - {len(removed)} component(s) not in YAML (deleted with --remove-components)") for comp in removed: self.handle.verbose_log(f" - {comp.component_type}: {comp.component_name}") @@ -535,16 +580,14 @@ def log_change_report(self, report: ChangeReport): self.handle.log(f"Unchanged device types: {report.unchanged_count}") if report.modified_device_types: - ct_removed = self._log_modified_summary(report) + self._log_modified_summary(report) self.handle.log("-" * 60) self.handle.log("MODIFIED DEVICE TYPES:") for dt in report.modified_device_types: self._log_modified_device_details(dt) - - verbose_only = len(report.modified_device_types) - ct_removed - if verbose_only > 0 and not self.handle.args.verbose: - self.handle.log(f" ({verbose_only} more without removals — use --verbose to list)") + if not self.handle.args.verbose: + self.handle.log(" (use --verbose for property diffs and component names)") else: self.handle.log("Modified device types: 0") diff --git a/core/compat.py b/core/compat.py new file mode 100644 index 00000000..62c33bf1 --- /dev/null +++ b/core/compat.py @@ -0,0 +1,61 @@ +"""NetBox version-compatibility helpers. + +Centralises filter parameter names that changed between NetBox releases so +that every caller uses the same logic and drift is impossible. + +NetBox 4.1 renamed several filter keys on the DCIM endpoints: + + devicetype_id → device_type_id + moduletype_id → module_type_id + +Any code that constructs endpoint filter kwargs should call the helpers +here rather than inlining ``"device_type_id" if new_filters else "devicetype_id"``. +""" + +from __future__ import annotations + + +def device_type_filter_key(new_filters: bool) -> str: + """Return the correct filter parameter name for device-type component queries. + + Args: + new_filters: ``True`` for NetBox ≥ 4.1 (returns ``"device_type_id"``); + ``False`` for older releases (returns ``"devicetype_id"``). + """ + return "device_type_id" if new_filters else "devicetype_id" + + +def module_type_filter_key(new_filters: bool) -> str: + """Return the correct filter parameter name for module-type component queries. + + Args: + new_filters: ``True`` for NetBox ≥ 4.1 (returns ``"module_type_id"``); + ``False`` for older releases (returns ``"moduletype_id"``). + """ + return "module_type_id" if new_filters else "moduletype_id" + + +def device_type_filter_kwargs(device_type_id: int, *, new_filters: bool) -> dict: + """Return filter kwargs for querying components of a device type. + + Args: + device_type_id: NetBox ID of the device type. + new_filters: ``True`` for NetBox ≥ 4.1; ``False`` for older releases. + + Returns: + A dict suitable for unpacking into ``endpoint.filter(**kwargs)``. + """ + return {device_type_filter_key(new_filters): device_type_id} + + +def module_type_filter_kwargs(module_type_id: int, *, new_filters: bool) -> dict: + """Return filter kwargs for querying components of a module type. + + Args: + module_type_id: NetBox ID of the module type. + new_filters: ``True`` for NetBox ≥ 4.1; ``False`` for older releases. + + Returns: + A dict suitable for unpacking into ``endpoint.filter(**kwargs)``. + """ + return {module_type_filter_key(new_filters): module_type_id} diff --git a/core/formatting.py b/core/formatting.py new file mode 100644 index 00000000..18cdb5be --- /dev/null +++ b/core/formatting.py @@ -0,0 +1,35 @@ +"""Shared output-formatting helpers.""" + + +def _display(val): + """Normalise a value for diff display. + + Empty / whitespace-only strings are shown as ``None`` — the same + canonical "not set" representation used by the comparison layer — so + that a NetBox field returning ``""`` and one returning ``None`` look + identical in the diff output. + """ + if isinstance(val, str): + val = val.rstrip() or None + return str(val) + + +def log_property_diffs(triples, log_fn, indent=" "): + """Emit diff-u style lines for a set of property changes. + + Args: + triples: Iterable of ``(property_name, old_value, new_value)``. + log_fn: Callable that accepts a single string; used to emit each line. + indent (str): Prefix prepended to every emitted line (default: 6 spaces). + """ + triples = list(triples) + if not triples: + return + pad = min(max(len(name) for name, _, _ in triples), 30) + for name, old_val, new_val in triples: + padded = f"{name}:{'':{max(0, pad - len(name))}}" + blank = " " * len(padded) + for i, line in enumerate(_display(old_val).splitlines() or [""]): + log_fn(f"{indent}- {padded if i == 0 else blank} {line}") + for i, line in enumerate(_display(new_val).splitlines() or [""]): + log_fn(f"{indent}+ {padded if i == 0 else blank} {line}") diff --git a/core/graphql_client.py b/core/graphql_client.py index 425654c6..201d931c 100644 --- a/core/graphql_client.py +++ b/core/graphql_client.py @@ -6,6 +6,7 @@ """ import threading +import time import requests @@ -14,6 +15,15 @@ class GraphQLError(Exception): """Raised when a GraphQL query fails (HTTP error or GraphQL-level errors).""" +class GraphQLCountMismatchError(GraphQLError): + """Raised when the number of records returned by GraphQL does not match the REST count. + + This indicates a silent truncation in the GraphQL response — e.g. the server + returned far fewer records than it reports via the REST API. The run is aborted + to prevent processing an incomplete cache. + """ + + class DotDict(dict): """Dict subclass that supports attribute access, matching pynetbox Record patterns. @@ -93,6 +103,8 @@ def _to_dotdict(obj): "enabled", "poe_mode", "poe_type", + "description", + "rf_role", ], "power_port_templates": [ "id", @@ -101,24 +113,30 @@ def _to_dotdict(obj): "maximum_draw", "allocated_draw", "label", + "description", ], - "console_port_templates": ["id", "name", "type", "label"], - "console_server_port_templates": ["id", "name", "type", "label"], - "power_outlet_templates": ["id", "name", "type", "feed_leg", "label"], - "rear_port_templates": ["id", "name", "type", "positions", "label"], + "console_port_templates": ["id", "name", "type", "label", "description"], + "console_server_port_templates": ["id", "name", "type", "label", "description"], + "power_outlet_templates": ["id", "name", "type", "feed_leg", "label", "description"], + "rear_port_templates": ["id", "name", "type", "positions", "label", "description", "color"], "front_port_templates": [ "id", "name", "type", "label", + "description", + "color", "mappings { id front_port_position rear_port_position rear_port { id name } }", ], - "device_bay_templates": ["id", "name", "label"], - "module_bay_templates": ["id", "name", "position", "label"], + "device_bay_templates": ["id", "name", "label", "description"], + "module_bay_templates": ["id", "name", "position", "label", "description"], } # Endpoints whose GraphQL schema has no ``module_type`` parent field. -_NO_MODULE_TYPE = {"device_bay_templates", "module_bay_templates"} +# Note: module_bay_templates is intentionally excluded from this set — NetBox's +# module_bay_template_list DOES support module_type { id }, so we must include it +# in the query to correctly cache module bays owned by module types. +_NO_MODULE_TYPE = {"device_bay_templates"} class NetBoxGraphQLClient: @@ -182,16 +200,22 @@ def close(self): self._session.close() def __enter__(self): + """Return *self* to support use as a context manager.""" return self def __exit__(self, exc_type, exc, tb): + """Close the session on context-manager exit.""" self.close() # ── Low-level ────────────────────────────────────────────────────────── - def query(self, graphql_query, variables=None): + def query(self, graphql_query, variables=None, _retries=3): """Execute a single GraphQL query and return the ``data`` portion. + Retries up to *_retries* times (with exponential back-off) on transient + connection errors so that a single dropped connection during a long + paginated fetch does not silently empty a component cache. + Raises: GraphQLError: On HTTP errors or if the response contains GraphQL errors. """ @@ -199,32 +223,43 @@ def query(self, graphql_query, variables=None): if variables is not None: payload["variables"] = variables - try: - response = self._session.post( - self.graphql_url, - json=payload, - timeout=60, - ) - response.raise_for_status() - body = response.json() - except requests.exceptions.HTTPError as exc: - if exc.response is not None and exc.response.status_code == 403: - raise GraphQLError( - f"403 Forbidden from {self.graphql_url}\n" - "Hint: Verify that your API token has the required permissions " - "and that GraphQL is enabled in the NetBox configuration." - ) from exc - raise GraphQLError(str(exc)) from exc - except requests.RequestException as exc: - raise GraphQLError(str(exc)) from exc - except ValueError as exc: - raise GraphQLError(f"Invalid JSON response from NetBox GraphQL endpoint: {exc}") from exc - - if "errors" in body: - messages = "; ".join(e.get("message", str(e)) for e in body["errors"]) - raise GraphQLError(messages) - - return body.get("data", {}) + for attempt in range(1 + _retries): + try: + response = self._session.post( + self.graphql_url, + json=payload, + timeout=60, + ) + response.raise_for_status() + body = response.json() + except requests.exceptions.HTTPError as exc: + status = exc.response.status_code if exc.response is not None else None + if status == 403: + raise GraphQLError( + f"403 Forbidden from {self.graphql_url}\n" + "Hint: Verify that your API token has the required permissions " + "and that GraphQL is enabled in the NetBox configuration." + ) from exc + if status in {429, 502, 503, 504} and attempt < _retries: + backoff = 2**attempt + time.sleep(backoff) + continue + # Non-transient HTTP errors are not retried. + raise GraphQLError(str(exc)) from exc + except requests.RequestException as exc: + if attempt < _retries: + backoff = 2**attempt + time.sleep(backoff) + continue + raise GraphQLError(str(exc)) from exc + except ValueError as exc: + raise GraphQLError(f"Invalid JSON response from NetBox GraphQL endpoint: {exc}") from exc + + if "errors" in body: + messages = "; ".join(e.get("message", str(e)) for e in body["errors"]) + raise GraphQLError(messages) + + return body.get("data", {}) def query_all(self, graphql_query, list_key, page_size=None, variables=None, on_page=None): """Auto-paginate a GraphQL list query using offset/limit. @@ -345,6 +380,7 @@ def get_device_types(self): airflow weight weight_unit + description comments front_image { url } rear_image { url } @@ -384,6 +420,12 @@ def get_module_types(self): module_type_list(pagination: $pagination) { id model + part_number + airflow + description + comments + weight + weight_unit manufacturer { id name diff --git a/core/log_handler.py b/core/log_handler.py index 322bab0d..538a7827 100644 --- a/core/log_handler.py +++ b/core/log_handler.py @@ -1,3 +1,5 @@ +"""Logging utilities for the Device Type Library Import tool.""" + from datetime import datetime from sys import exit as system_exit diff --git a/core/netbox_api.py b/core/netbox_api.py index 212e8f20..bc06427e 100644 --- a/core/netbox_api.py +++ b/core/netbox_api.py @@ -1,5 +1,8 @@ +"""NetBox REST and GraphQL API client for importing device and module type libraries.""" + from collections import Counter import concurrent.futures +from functools import lru_cache import itertools import queue import re @@ -11,9 +14,17 @@ import glob from pathlib import Path -from core.change_detector import COMPONENT_ALIASES, ChangeType -from core.graphql_client import GraphQLError, NetBoxGraphQLClient +from core.change_detector import ChangeDetector, ChangeType +from core.compat import device_type_filter_kwargs, module_type_filter_kwargs, module_type_filter_key +from core.formatting import log_property_diffs +from core.graphql_client import GraphQLCountMismatchError, GraphQLError, NetBoxGraphQLClient from core.normalization import values_equal +from core.outcomes import EntityKind, Outcome, OutcomeRegistry +from core.schema_reader import load_properties_for_type +from core.update_failure_resolver import ( + FailureKind, + classify_device_type_update_failure, +) def _build_auth_header(token): @@ -50,6 +61,49 @@ def _retry_on_connection_error(func, *args, **kwargs): time.sleep(wait) +# Module type scalar properties that can be compared and updated. +# Loaded from the cloned devicetype-library schema at runtime; the list below +# serves as a fallback when the schema is not yet available (e.g. before the +# first repo clone). Identity fields (manufacturer, model) and complex objects +# (attribute_data) are excluded by the schema reader. +_MODULE_TYPE_PROPERTIES_FALLBACK = [ + "part_number", + "description", + "comments", + "airflow", + "weight", + "weight_unit", +] + +_MODULE_TYPE_SCHEMA_EXCLUDE = {"manufacturer", "model", "attribute_data", "profile"} + + +@lru_cache(maxsize=1) +def _load_module_type_properties(): + """Load module type scalar properties from the schema, falling back to hardcoded list. + + The result is cached after the first call, which happens after the repo checkout + so the schema files are available. + """ + try: + from core import settings as _settings + + props = load_properties_for_type( + os.path.join(_settings.REPO_PATH, "schema"), + "moduletype", + exclude=_MODULE_TYPE_SCHEMA_EXCLUDE, + ) + return props if props else list(_MODULE_TYPE_PROPERTIES_FALLBACK) + except (ImportError, AttributeError): + return list(_MODULE_TYPE_PROPERTIES_FALLBACK) + + +# Sentinel used to distinguish "attribute missing from record" from a genuine +# None/null value returned by NetBox. When a property is in the schema-derived +# comparison list but was not fetched by the GraphQL query, getattr returns this +# sentinel and the property is skipped to avoid false-positive change detection. +_MISSING = object() + # Supported image file extensions for module-type image uploads IMAGE_EXTENSIONS = { ".png", @@ -101,6 +155,21 @@ def _image_dir_for_yaml(src_file: str, src_segment: str, dst_segment: str) -> "P # from pynetbox import RequestError as APIRequestError +def _count_actionable_component_changes(changes, remove_components): + """Return the count of changes in *changes* that will issue an API call. + + Non-removal changes always qualify; removal changes only qualify when + *remove_components* is enabled. Removal-only diffs with the flag off + issue zero API calls and must not be treated as attempted. + """ + return sum( + 1 + for c in changes + if c.change_type in (ChangeType.COMPONENT_CHANGED, ChangeType.COMPONENT_ADDED) + or (remove_components and c.change_type == ChangeType.COMPONENT_REMOVED) + ) + + class NetBox: """Interface to the NetBox API for importing device and module types.""" @@ -116,13 +185,18 @@ def __init__(self, settings, handle): components_added=0, manufacturer=0, module_added=0, + module_updated=0, + module_update_failed=0, + module_partial_update=0, rack_type_added=0, rack_type_updated=0, images=0, properties_updated=0, components_updated=0, components_removed=0, + device_types_failed=0, ) + self.outcomes = OutcomeRegistry() self.url = settings.NETBOX_URL self.token = settings.NETBOX_TOKEN self.handle = handle @@ -132,6 +206,7 @@ def __init__(self, settings, handle): self.new_filters = False self.m2m_front_ports = False # True for NetBox >= 4.5 (M2M port mappings) self.rack_types = False + self.force_resolve_conflicts = False self.connect_api() self.verify_compatibility() self.graphql = NetBoxGraphQLClient( @@ -158,6 +233,14 @@ def __init__(self, settings, handle): ) except GraphQLError as e: system_exit(f"GraphQL error fetching device types: {e}") + self._change_detector: ChangeDetector | None = None + + @property + def change_detector(self) -> "ChangeDetector": + """Lazily initialised, reused :class:`ChangeDetector` instance.""" + if self._change_detector is None: + self._change_detector = ChangeDetector(self.device_types, self.handle) + return self._change_detector def connect_api(self): """Connect to the NetBox API using the stored URL and token credentials.""" @@ -202,6 +285,17 @@ def verify_compatibility(self): f"Connection error while connecting to NetBox at {self.url}: {e}\n" f"Hint: Verify that NetBox is running and reachable at {self.url}." ) + except pynetbox.core.query.RequestError as e: + endpoint = getattr(e, "base", self.url) + status = getattr(e.req, "status_code", "?") if hasattr(e, "req") else "?" + reason = getattr(e.req, "reason", "") if hasattr(e, "req") else "" + body = str(getattr(e, "error", "") or "").strip()[:500] + details = f"HTTP {status} {reason}".strip() + msg = f"NetBox returned an error connecting to {endpoint} ({details})." + if body: + msg += f"\nResponse body (may be from an intermediate proxy):\n{body}" + msg += f"\nHint: Verify that {self.url} is reachable and not blocked by a proxy." + system_exit(msg) _raw = [int(re.sub(r"\D.*", "", x.strip()) or "0") for x in nb_version.split(".")] version_split = (_raw + [0, 0])[:2] # pad to (major, minor) to guard against single-component strings @@ -306,6 +400,200 @@ def _resolve_image_paths(self, device_type, src_file): del device_type[i] return saved_images + def _try_resolve_and_retry_device_type_update(self, dt, device_type, updates, error): + """Classify a failed device-type PATCH and, if safe, remediate then retry. + + Inspects *error* via :func:`classify_device_type_update_failure`. When + the failure is a recognised constraint, blocking templates exist, AND + no live devices reference this type, AND the operator has opted in via + ``--force-resolve-conflicts``, the remediation steps are executed and + the original PATCH is retried once. Otherwise an actionable hint is + logged and ``False`` is returned (the caller will count this as a + failure via :meth:`_log_device_type_change_outcome`). + + Args: + dt: pynetbox device-type record being updated. + device_type (dict): Parsed YAML device-type dict. + updates (dict): PATCH payload that previously failed. + error: ``pynetbox.RequestError`` instance from the failed PATCH. + + Returns: + tuple[bool, FailureResolution | None]: ``(retry_succeeded, resolution)`` + where ``resolution`` is the classifier's output (or ``None`` if the + classifier itself raised), useful for downstream reporting. + """ + try: + resolution = classify_device_type_update_failure( + error.error, + netbox=self.netbox, + device_type_id=dt.id, + device_type_yaml=device_type, + new_filters=self.new_filters, + ) + except Exception as exc: # defensive: classifier must never break the run + self.handle.verbose_log(f"Failure classifier raised {type(exc).__name__}: {exc}") + return False, None + + if resolution.kind == FailureKind.UNHANDLED: + return False, resolution + + # Build a structured operator-facing log so the constraint and its + # remediation path are crystal clear. + if resolution.blocking_objects: + blockers = ", ".join(resolution.blocking_objects[:10]) + if len(resolution.blocking_objects) > 10: + blockers += f", … (+{len(resolution.blocking_objects) - 10} more)" + self.handle.log(f"Constraint analysis for {dt.model}: blocked by {blockers}") + if resolution.description: + self.handle.log(f" {resolution.description}") + if resolution.hint: + self.handle.log(f" Hint: {resolution.hint}") + + if resolution.kind == FailureKind.MANUAL_REQUIRED or not resolution.is_actionable: + return False, resolution + + if not self.force_resolve_conflicts: + return False, resolution + + # Opt-in destructive remediation. + self.handle.log( + f"Auto-resolving constraint for {dt.model} " + f"(--force-resolve-conflicts; {len(resolution.remediation_steps)} step(s))" + ) + try: + for step in resolution.remediation_steps: + step() + except Exception as exc: + self.handle.log(f"Auto-resolve failed for {dt.model}: {exc}") + return False, resolution + + # Retry the original PATCH exactly once. + try: + _retry_on_connection_error( + self.netbox.dcim.device_types.update, + [{"id": dt.id, **updates}], + ) + dt.update(updates) + return True, resolution + except pynetbox.RequestError as e: + self.handle.log(f"Retry after auto-resolve still failed for {dt.model}: {e.error}") + return False, resolution + except _RETRYABLE_EXCEPTIONS as e: + self.handle.log(f"Connection error during retry after auto-resolve for {dt.model}: {e}") + return False, resolution + + def _log_device_type_change_outcome( + self, + dt, + dt_change, + *, + property_attempted, + property_succeeded, + component_delta, + actionable_count, + failure_resolution=None, + ): + """Emit the right post-update log for an existing device type. + + Distinguishes "actually updated", "partial update" (property PATCH + failed but components ran, or only some component changes succeeded), + and "completely failed" (PATCH was the only action and it failed, or + component API calls were issued but all failed) so the operator-visible + log no longer reports "Device Type Updated" when nothing was applied. + + When the operation failed or was partial, also records a structured + outcome into :attr:`outcomes` so the end-of-run summary can render an + itemised report. + + Args: + dt: pynetbox device-type record. + dt_change: ChangeEntry for this device-type. + property_attempted (bool): True if a property PATCH was issued. + property_succeeded (bool): True if the property PATCH (or its retry) + applied cleanly. + component_delta (int): Number of component operations that succeeded + (sum of counter deltas for components_updated, components_added, + components_removed after the API calls). + actionable_count (int): Number of component changes that issued API + calls (non-removal changes, or removals with --remove-components + enabled). + failure_resolution: Optional :class:`FailureResolution` whose + ``description``, ``blocking_objects`` and ``hint`` will be + attached to the registry record when the update failed. + """ + identity = f"{dt.manufacturer.name}/{dt.model}" + component_attempted = actionable_count > 0 + component_succeeded = component_delta > 0 + something_applied = property_succeeded or component_succeeded + if something_applied: + is_full_success = (not property_attempted or property_succeeded) and ( + not component_attempted or component_delta == actionable_count + ) + if is_full_success: + if component_succeeded and not property_succeeded: + # Component-only update: no property change was attempted or needed. + self.counter.update({"device_types_component_updates": 1}) + prop_count = 1 if property_succeeded else 0 + comp_suffix = "; skipping component creation." if component_delta == 0 else "." + self.handle.verbose_log( + f"Device Type Updated: {dt.manufacturer.name} - {dt.model} - {dt.id}. " + f"Applied {prop_count} property and {component_delta} component change(s)" + comp_suffix + ) + else: + if component_delta > 0: + self.counter.update({"device_types_component_updates": 1}) + reason_parts = [] + if property_attempted and not property_succeeded: + reason_parts.append("Property PATCH failed") + if component_attempted: + if component_delta < actionable_count: + reason_parts.append(f"applied {component_delta} of {actionable_count} component change(s)") + else: + reason_parts.append(f"applied {component_delta} component change(s)") + reason = "; ".join(reason_parts) + "." if reason_parts else "Partial update." + self.handle.verbose_log( + f"Device Type Partially Updated: {dt.manufacturer.name} - {dt.model} - {dt.id}. {reason}" + ) + self.outcomes.record( + EntityKind.DEVICE_TYPE, + identity, + Outcome.PARTIAL, + reason=reason, + blocking_objects=(failure_resolution.blocking_objects if failure_resolution else None), + hint=(failure_resolution.hint if failure_resolution else None), + ) + elif property_attempted or component_attempted: + self.counter.update({"device_types_failed": 1}) + self.handle.log( + f"Device Type Update Failed: {dt.manufacturer.name} - {dt.model} - {dt.id}. " + f"Attempted {1 if property_attempted else 0} property PATCH and " + f"{actionable_count} component change(s); " + "no changes were applied (see error above)." + ) + self.outcomes.record( + EntityKind.DEVICE_TYPE, + identity, + Outcome.FAILED, + reason=( + failure_resolution.description + if failure_resolution + else ( + "Property PATCH and component updates failed." + if property_attempted and component_attempted + else "Property PATCH failed." + if property_attempted + else "Component updates failed." + ) + ), + blocking_objects=(failure_resolution.blocking_objects if failure_resolution else None), + hint=(failure_resolution.hint if failure_resolution else None), + ) + else: + self.handle.verbose_log( + f"Device Type Cached: {dt.manufacturer.name} - {dt.model} - {dt.id}. " + "No property or component changes applied." + ) + def _handle_existing_device_type( self, dt, @@ -352,6 +640,12 @@ def _handle_existing_device_type( return if dt_change is not None: + property_attempted = False + property_succeeded = False + component_delta = 0 + actionable_count = 0 + failure_resolution = None + # Apply property changes (exclude image properties — uploads are handled separately) if dt_change.property_changes: updates = { @@ -360,13 +654,24 @@ def _handle_existing_device_type( if pc.property_name not in ("front_image", "rear_image") } if updates: + property_attempted = True try: _retry_on_connection_error(self.netbox.dcim.device_types.update, [{"id": dt.id, **updates}]) dt.update(updates) # keep local cache in sync self.counter.update({"properties_updated": 1}) + property_succeeded = True self.handle.verbose_log(f"Updated device type {dt.model} properties: {list(updates.keys())}") except pynetbox.RequestError as e: self.handle.log(f"Error updating device type {dt.model}: {e.error}") + retried_ok, failure_resolution = self._try_resolve_and_retry_device_type_update( + dt, device_type, updates, e + ) + if retried_ok: + self.counter.update({"properties_updated": 1}) + property_succeeded = True + self.handle.verbose_log( + f"Updated device type {dt.model} properties after auto-resolve: {list(updates.keys())}" + ) except _RETRYABLE_EXCEPTIONS as e: self.handle.log( f"Connection error updating device type {dt.model} after {_MAX_RETRIES} retries: {e}" @@ -374,6 +679,12 @@ def _handle_existing_device_type( # Apply component changes if dt_change.component_changes: + actionable_count = _count_actionable_component_changes(dt_change.component_changes, remove_components) + before_components = ( + self.counter["components_updated"], + self.counter["components_added"], + self.counter["components_removed"], + ) self.device_types.update_components( device_type, dt.id, @@ -382,13 +693,22 @@ def _handle_existing_device_type( ) if remove_components: self.device_types.remove_components(dt.id, dt_change.component_changes, parent_type="device") - - if dt_change is not None: - self.handle.verbose_log( - f"Device Type Updated: {dt.manufacturer.name} - {dt.model} - {dt.id}. " - f"Applied {len(dt_change.property_changes or [])} property and " - f"{len(dt_change.component_changes or [])} component change(s); " - "skipping component creation." + after_components = ( + self.counter["components_updated"], + self.counter["components_added"], + self.counter["components_removed"], + ) + component_delta = sum(after_components) - sum(before_components) + + # Distinguish full update, partial, and complete failure. + self._log_device_type_change_outcome( + dt, + dt_change, + property_attempted=property_attempted, + property_succeeded=property_succeeded, + component_delta=component_delta, + actionable_count=actionable_count, + failure_resolution=failure_resolution, ) else: self.handle.verbose_log( @@ -441,8 +761,6 @@ def _create_device_type_components(self, device_type, dt_id, src_file, saved_ima self.device_types.create_interfaces(device_type["interfaces"], dt_id) if "power-ports" in device_type: self.device_types.create_power_ports(device_type["power-ports"], dt_id) - if "power-port" in device_type: - self.device_types.create_power_ports(device_type["power-port"], dt_id) if "console-ports" in device_type: self.device_types.create_console_ports(device_type["console-ports"], dt_id) if "power-outlets" in device_type: @@ -676,12 +994,64 @@ def filter_new_module_types(module_types, all_module_types): new_module_types.append(module_type) return new_module_types + def _log_module_property_diffs(self, mfr_slug, model, fields_info, component_changes=None): + """Emit diff-u style lines for changed module type properties and component changes. + + Args: + mfr_slug (str): Manufacturer slug. + model (str): Module type model name. + fields_info (list[tuple]): List of ``(field, old_val, new_val)`` tuples. + component_changes (list | None): Optional list of ComponentChange objects. + """ + self.handle.verbose_log(f" ~ {mfr_slug}/{model}") + if fields_info: + self.handle.verbose_log(" Properties:") + log_property_diffs(fields_info, self.handle.verbose_log) + if component_changes: + added = [c for c in component_changes if c.change_type == ChangeType.COMPONENT_ADDED] + changed = [c for c in component_changes if c.change_type == ChangeType.COMPONENT_CHANGED] + removed = [c for c in component_changes if c.change_type == ChangeType.COMPONENT_REMOVED] + if added: + self.handle.verbose_log(f" + {len(added)} new component(s)") + for comp in added: + self.handle.verbose_log(f" + {comp.component_type}: {comp.component_name}") + if changed: + self.handle.verbose_log(f" ~ {len(changed)} changed component(s)") + for comp in changed: + self.handle.verbose_log(f" ~ {comp.component_type}: {comp.component_name}") + log_property_diffs( + [(pc.property_name, pc.old_value, pc.new_value) for pc in comp.property_changes], + self.handle.verbose_log, + " ", + ) + if removed: + self.handle.verbose_log(f" - {len(removed)} component(s) present in NetBox but absent from YAML") + for comp in removed: + self.handle.verbose_log(f" - {comp.component_type}: {comp.component_name}") + + def _module_type_has_missing_components(self, module_type, existing_module, component_keys): + """Return True if any YAML-defined components are absent from the existing module type in NetBox.""" + for component_key in component_keys: + components = module_type.get(component_key) + if not components: + continue + endpoint_attr, cache_name = ENDPOINT_CACHE_MAP[component_key] + endpoint = getattr(self.netbox.dcim, endpoint_attr) + existing_components = self.device_types._get_cached_or_fetch( + cache_name, existing_module.id, "module", endpoint + ) + requested_names = {c.get("name") for c in components if c.get("name")} + if any(name not in existing_components for name in requested_names): + return True + return False + def filter_actionable_module_types(self, module_types, all_module_types, only_new=False): """Determine which module types need to be created or updated in NetBox. For ``only_new=True``, returns only module types absent from NetBox. Otherwise, - bulk-preloads component caches for all existing module types and includes any - whose images or components differ from NetBox. + ensures the component cache is populated via the global GraphQL preload (running + it on demand if device-type processing already ran it) and includes any module + types whose images, scalar properties, or components differ from NetBox. Args: module_types (list[dict]): Parsed YAML module-type dicts. @@ -689,39 +1059,40 @@ def filter_actionable_module_types(self, module_types, all_module_types, only_ne only_new (bool): If True, skip change detection and return only truly new entries. Returns: - tuple[list[dict], dict]: Actionable module types and existing-image mapping - ``{module_type_id: set_of_image_names}``. + tuple[list[dict], dict, list]: Three-element tuple: + + - Actionable module types (list[dict]) to be created or updated. + - Existing-image mapping ``{module_type_id: set_of_image_names}``. + - Changed-property log: list of ``(mfr_slug, model, fields_info, + comp_changes)`` tuples, one entry per modified module type, used + for diff-u output via :meth:`log_module_type_changes`. """ if not module_types: - return [], {} + return [], {}, [] if only_new: - return self.filter_new_module_types(module_types, all_module_types), {} + return self.filter_new_module_types(module_types, all_module_types), {}, [] module_type_existing_images = self._fetch_module_type_existing_images() actionable_module_types = [] - component_keys = ( - "interfaces", - "power-ports", - "console-ports", - "power-outlets", - "console-server-ports", - "rear-ports", - "front-ports", - ) + # Collects (mfr_slug, model, [(field, old_val, new_val)]) for diff-u logging. + changed_property_log = [] + + # Ensure the component cache is populated with GraphQL data (which carries correct + # mappings for front-port templates). The global preload already ran during device-type + # processing in normal mode; this call is a no-op then. When no device types were + # present (e.g. vendor-filtered runs or --only-new was used for device types) the + # preload is triggered here so module-type comparisons still hit accurate cache data. + if not self.device_types._global_preload_done: + self.device_types.preload_all_components() - # Bulk-preload components for all existing module types so the per-module - # loop below hits the cache instead of issuing individual API calls. existing_module_map = {} - existing_module_ids = set() for module_type in module_types: existing_module = self._find_existing_module_type(module_type, all_module_types) existing_module_map[id(module_type)] = existing_module - if existing_module is not None: - existing_module_ids.add(existing_module.id) - if existing_module_ids: - self.device_types.preload_module_type_components(existing_module_ids, component_keys) + + detector = self.change_detector for module_type in module_types: existing_module = existing_module_map[id(module_type)] @@ -731,30 +1102,49 @@ def filter_actionable_module_types(self, module_types, all_module_types, only_ne existing_images = module_type_existing_images.get(existing_module.id, set()) image_files = self._discover_module_image_files(module_type.get("src", "")) - if any(os.path.splitext(os.path.basename(path))[0] not in existing_images for path in image_files): - actionable_module_types.append(module_type) - continue + image_changed = any( + os.path.splitext(os.path.basename(path))[0] not in existing_images for path in image_files + ) - has_missing_components = False - for component_key in component_keys: - components = module_type.get(component_key) - if not components: + changed_fields_info = [] + for f in _load_module_type_properties(): + if f not in module_type: continue - - endpoint_attr, cache_name = ENDPOINT_CACHE_MAP[component_key] - endpoint = getattr(self.netbox.dcim, endpoint_attr) - existing_components = self.device_types._get_cached_or_fetch( - cache_name, existing_module.id, "module", endpoint + nb_val = getattr(existing_module, f, _MISSING) + if nb_val is _MISSING: + # Field not fetched from NetBox yet; skip to avoid false positives. + continue + if not values_equal(module_type[f], nb_val): + changed_fields_info.append((f, nb_val, module_type[f])) + + component_changes = detector._compare_components(module_type, existing_module.id, parent_type="module") + + if changed_fields_info or component_changes: + changed_property_log.append( + ( + module_type["manufacturer"]["slug"], + module_type["model"], + changed_fields_info, + component_changes, + ) ) - requested_names = {component.get("name") for component in components if component.get("name")} - if any(name not in existing_components for name in requested_names): - has_missing_components = True - break - if has_missing_components: + if image_changed or changed_fields_info or component_changes: actionable_module_types.append(module_type) - return actionable_module_types, module_type_existing_images + return actionable_module_types, module_type_existing_images, changed_property_log + + def log_module_type_changes(self, changed_property_log): + """Emit verbose diff output for modified module types. + + Args: + changed_property_log: List of ``(mfr_slug, model, fields_info, comp_changes)`` + tuples as returned by :meth:`filter_actionable_module_types`. + """ + if changed_property_log: + self.handle.verbose_log("MODIFIED MODULE TYPES:") + for mfr_slug, model, fields_info, comp_changes in changed_property_log: + self._log_module_property_diffs(mfr_slug, model, fields_info, comp_changes) def _fetch_module_type_existing_images(self): """Query NetBox for all image attachments on module types via GraphQL and return a mapping. @@ -768,27 +1158,188 @@ def _fetch_module_type_existing_images(self): ) return module_type_existing_images - def _process_single_module_type(self, curr_mt, src_file, all_module_types, module_type_existing_images, only_new): - """Find or create a single module type and create its component templates. + def _try_update_module_type(self, curr_mt, module_type_res, src_file): + """Apply pending field updates to an existing module type in NetBox. + + Returns: + tuple[bool, bool]: ``(success, updated)`` where *success* is False on error and + *updated* is True when at least one field was actually patched. + """ + updates = {} + for field in _load_module_type_properties(): + if field not in curr_mt: + continue + current_value = getattr(module_type_res, field, _MISSING) + if current_value is _MISSING: + continue + if not values_equal(curr_mt[field], current_value): + updates[field] = curr_mt[field] + if not updates: + return True, False + try: + _retry_on_connection_error(self.netbox.dcim.module_types.update, [{"id": module_type_res.id, **updates}]) + self.handle.verbose_log( + f"Module Type Updated: {module_type_res.manufacturer.name} - " + f"{module_type_res.model} - {module_type_res.id} " + f"(changed: {list(updates.keys())})" + ) + except pynetbox.RequestError as excep: + self.handle.log(f"Error updating Module Type: {excep.error} (Context: {src_file})") + return False, False + except _RETRYABLE_EXCEPTIONS as e: + self.handle.log( + f"Connection error updating Module Type after {_MAX_RETRIES} retries: {e} (Context: {src_file})" + ) + return False, False + return True, True + + def _create_module_type_components(self, curr_mt, module_type_id, src_file): + """Create all component templates for a newly created module type. + + Args: + curr_mt (dict): Parsed YAML module-type dict. + module_type_id (int): ID of the newly created module type in NetBox. + src_file (str): Source file path for error context. + """ + component_map = { + "interfaces": self.device_types.create_module_interfaces, + "power-ports": self.device_types.create_module_power_ports, + "console-ports": self.device_types.create_module_console_ports, + "power-outlets": self.device_types.create_module_power_outlets, + "console-server-ports": self.device_types.create_module_console_server_ports, + "rear-ports": self.device_types.create_module_rear_ports, + "front-ports": self.device_types.create_module_front_ports, + } + for key, create_fn in component_map.items(): + if key in curr_mt: + create_fn(curr_mt[key], module_type_id, context=src_file) + + def _apply_module_type_component_updates( + self, curr_mt, module_type_res, properties_updated, remove_components, patch_ok=True + ): + """Detect and apply component changes for an existing module type in update mode. + + Args: + curr_mt (dict): Parsed YAML module-type dict. + module_type_res: NetBox module type record. + properties_updated (bool): Whether scalar properties were already patched (used to + avoid double-counting the module as updated). + remove_components (bool): When True, removed components are deleted from NetBox. + patch_ok (bool): Whether the preceding scalar PATCH call succeeded (or was a no-op). + When False the property drift is still present; a component-only reconciliation + must not be recorded as a full ``module_updated`` success. + """ + if not self.device_types._global_preload_done: + self.device_types.preload_all_components() + identity = f"{module_type_res.manufacturer.name}/{module_type_res.model}" + component_changes = self.change_detector._compare_components(curr_mt, module_type_res.id, parent_type="module") + if component_changes: + actionable_count = _count_actionable_component_changes(component_changes, remove_components) + before_updated = self.counter["components_updated"] + before_added = self.counter["components_added"] + before_removed = self.counter["components_removed"] + self.device_types.update_components(curr_mt, module_type_res.id, component_changes, parent_type="module") + if remove_components: + self.device_types.remove_components(module_type_res.id, component_changes, parent_type="module") + component_delta = ( + self.counter["components_updated"] + - before_updated + + self.counter["components_added"] + - before_added + + self.counter["components_removed"] + - before_removed + ) + if actionable_count == 0: + if properties_updated and patch_ok: + self.counter["module_updated"] += 1 + elif not patch_ok: + self.counter["module_update_failed"] += 1 + self.outcomes.record( + EntityKind.MODULE_TYPE, + identity, + Outcome.FAILED, + reason="Scalar PATCH failed; no component changes were actionable.", + ) + elif component_delta == 0: + if properties_updated and patch_ok: + # Properties patched successfully; components were attempted but + # none changed — treat as a partial success, not a full failure. + self.counter["module_partial_update"] += 1 + else: + self.counter["module_update_failed"] += 1 + reason = ( + "Scalar PATCH failed; component reconciliation ran but applied 0 changes." + if not patch_ok + else "Component reconciliation ran but applied 0 changes." + ) + self.outcomes.record( + EntityKind.MODULE_TYPE, + identity, + Outcome.FAILED, + reason=reason, + ) + elif component_delta == actionable_count and patch_ok: + self.counter["module_updated"] += 1 + else: + self.counter["module_partial_update"] += 1 + elif properties_updated and patch_ok: + self.counter["module_updated"] += 1 + elif not patch_ok: + self.counter["module_update_failed"] += 1 + self.outcomes.record( + EntityKind.MODULE_TYPE, + identity, + Outcome.FAILED, + reason="Scalar PATCH failed; no component changes detected.", + ) + + def _process_single_module_type( + self, curr_mt, src_file, all_module_types, module_type_existing_images, only_new, remove_components=False + ): + """Find or create a single module type and create or update its component templates. + + For new module types all component templates are created directly. For existing + module types in update mode (``only_new=False``) scalar properties are patched and + component changes (additions, modifications) are applied via + :meth:`DeviceTypes.update_components`. Args: curr_mt (dict): Parsed YAML module-type dict (with ``src`` key already removed). src_file (str): Source file path for error messages and image discovery. all_module_types (dict): Existing module types cache; updated in-place on creation. module_type_existing_images (dict): Existing image map by module type ID. - only_new (bool): When True, skip component creation for existing module types. + only_new (bool): When True, skip all updates for existing module types. + remove_components (bool): When True, components absent from the YAML are deleted. Returns: bool: False if an error occurred and the caller should skip to the next iteration; True otherwise. """ is_new = False + properties_updated = False + patch_ok = True module_type_res = self._find_existing_module_type(curr_mt, all_module_types) if module_type_res is not None: self.handle.verbose_log( f"Module Type Cached: {module_type_res.manufacturer.name} - " + f"{module_type_res.model} - {module_type_res.id}" ) + # Upload images before the scalar PATCH so attachments are created + # even if the property update later fails (module already exists in + # NetBox so the attachment POST can reference its id immediately). + self._upload_module_type_images(module_type_res, src_file, module_type_existing_images) + if not only_new: + ok, properties_updated = self._try_update_module_type(curr_mt, module_type_res, src_file) + patch_ok = ok + if not ok: + # Scalar PATCH failed; continue with component reconciliation so a + # transient property update failure does not block component sync. + # Outcome counter is determined by _apply_module_type_component_updates. + self.handle.verbose_log( + f"Scalar PATCH failed for module type " + f"{module_type_res.manufacturer.name} - {module_type_res.model}; " + "continuing with component reconciliation." + ) else: try: module_type_res = _retry_on_connection_error(self.netbox.dcim.module_types.create, curr_mt) @@ -809,33 +1360,20 @@ def _process_single_module_type(self, curr_mt, src_file, all_module_types, modul ) return False - # Upload images for both cached and newly created module types - self._upload_module_type_images(module_type_res, src_file, module_type_existing_images) - if only_new and not is_new: return True - # Module component keys often use hyphens in YAML - if "interfaces" in curr_mt: - self.device_types.create_module_interfaces(curr_mt["interfaces"], module_type_res.id, context=src_file) - if "power-ports" in curr_mt: - self.device_types.create_module_power_ports(curr_mt["power-ports"], module_type_res.id, context=src_file) - if "console-ports" in curr_mt: - self.device_types.create_module_console_ports( - curr_mt["console-ports"], module_type_res.id, context=src_file - ) - if "power-outlets" in curr_mt: - self.device_types.create_module_power_outlets( - curr_mt["power-outlets"], module_type_res.id, context=src_file - ) - if "console-server-ports" in curr_mt: - self.device_types.create_module_console_server_ports( - curr_mt["console-server-ports"], module_type_res.id, context=src_file + if is_new: + # New module type: upload images and create all component templates directly. + self._upload_module_type_images(module_type_res, src_file, module_type_existing_images) + self._create_module_type_components(curr_mt, module_type_res.id, src_file) + else: + # Existing module type in update mode: detect and apply component changes. + # The global GraphQL cache is already populated, so _compare_components is a + # pure dict-lookup with no API calls. + self._apply_module_type_component_updates( + curr_mt, module_type_res, properties_updated, remove_components, patch_ok=patch_ok ) - if "rear-ports" in curr_mt: - self.device_types.create_module_rear_ports(curr_mt["rear-ports"], module_type_res.id, context=src_file) - if "front-ports" in curr_mt: - self.device_types.create_module_front_ports(curr_mt["front-ports"], module_type_res.id, context=src_file) return True def create_module_types( @@ -845,6 +1383,7 @@ def create_module_types( only_new=False, all_module_types=None, module_type_existing_images=None, + remove_components=False, ): """Create or update module types and their component templates in NetBox. @@ -858,6 +1397,7 @@ def create_module_types( only_new (bool): If True, skip component updates for existing module types. all_module_types (dict | None): Existing module types cache; fetched if None. module_type_existing_images (dict | None): Existing image map; fetched if None. + remove_components (bool): When True, components absent from the YAML are deleted. """ if not module_types: return @@ -879,6 +1419,7 @@ def create_module_types( all_module_types, module_type_existing_images, only_new, + remove_components=remove_components, ): continue @@ -1036,7 +1577,6 @@ def _upload_module_type_images(self, module_type_res, src_file, module_type_exis ENDPOINT_CACHE_MAP = { "interfaces": ("interface_templates", "interface_templates"), "power-ports": ("power_port_templates", "power_port_templates"), - "power-port": ("power_port_templates", "power_port_templates"), "console-ports": ("console_port_templates", "console_port_templates"), "power-outlets": ("power_outlet_templates", "power_outlet_templates"), "console-server-ports": ( @@ -1070,6 +1610,12 @@ class _FrontPortRecordWithMappings: __slots__ = ("_record", "_mappings_canonical") def __init__(self, record): + """Wrap *record* and pre-compute a canonical mappings list for ChangeDetector compatibility. + + Normalises the ``mappings`` field (NetBox >= 4.5 list of ``PortTemplateMapping`` objects) + or the ``rear_port_position`` scalar (NetBox < 4.5) into a uniform list of dicts stored + in ``_mappings_canonical``. + """ object.__setattr__(self, "_record", record) mappings_raw = getattr(record, "mappings", None) if mappings_raw is not None: @@ -1103,11 +1649,12 @@ def __init__(self, record): } ] if rp_pos is not None - else [] + else None # Both mappings and rear_port_position absent; skip comparison. ) object.__setattr__(self, "_mappings_canonical", canonical) def __getattr__(self, name): + """Delegate attribute access to the wrapped record.""" return getattr(self._record, name) @@ -1147,6 +1694,7 @@ def __init__( self.m2m_front_ports = m2m_front_ports self.max_threads = max_threads self.cached_components = {} + self._global_preload_done = False self._image_progress = None self.existing_device_types, self.existing_device_types_by_slug = self.get_device_types() @@ -1181,19 +1729,59 @@ def _component_preload_targets(): ("module_bay_templates", "Module Bays"), ] + def _get_rest_component_count(self, endpoint_name): + """Return the REST API count for *endpoint_name*, or ``None`` on failure. + + Issues a single lightweight ``?limit=1`` REST call that returns just the + total record count — no item data is transferred. Used to validate that + subsequent GraphQL fetches returned the expected number of records. + + Args: + endpoint_name (str): Component template endpoint name (e.g. ``"interface_templates"``). + + Returns: + int | None: Total record count, or ``None`` if the request fails. + """ + try: + return getattr(self.netbox.dcim, endpoint_name).count() + except Exception as exc: + self.handle.verbose_log( + f"REST count unavailable for {endpoint_name}; skipping GraphQL count verification: {exc}" + ) + return None + def _get_endpoint_totals(self, components): - """Return placeholder totals for component endpoints. + """Fetch REST record counts for each component endpoint in parallel. - With GraphQL, counts are not fetched upfront — progress bars will - adjust when results arrive. + Issues one lightweight ``?limit=1`` REST call per endpoint to obtain the + expected total before the GraphQL fetch begins. These totals are later + used to detect silent truncation in :meth:`_fetch_global_endpoint_records`. + + REST-only endpoints (see :attr:`REST_ONLY_ENDPOINTS`) are excluded because + their counts will be determined by the REST fetch itself. Args: components: Iterable of ``(endpoint_name, label)`` tuples. Returns: - dict: ``{endpoint_name: 0}`` for all endpoints. + dict: ``{endpoint_name: count_or_None}`` for graphql endpoints (``None`` + preserves the "count unavailable" sentinel from + :meth:`_get_rest_component_count`), and ``0`` for REST-only endpoints + (whose authoritative count is established by the REST fetch itself). """ - return {endpoint_name: 0 for endpoint_name, _label in components} + graphql_endpoints = [ep for ep, _label in components if ep not in self.REST_ONLY_ENDPOINTS] + totals = {ep: (0 if ep in self.REST_ONLY_ENDPOINTS else None) for ep, _label in components} + + max_workers = max(1, min(len(graphql_endpoints), self.max_threads)) + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_ep = {executor.submit(self._get_rest_component_count, ep): ep for ep in graphql_endpoints} + for future in concurrent.futures.as_completed(future_to_ep): + ep = future_to_ep[future] + result = future.result() + if isinstance(result, int) and not isinstance(result, bool): + totals[ep] = result + + return totals def start_component_preload(self, progress=None): """Start concurrent component prefetch and return a preload job handle.""" @@ -1210,12 +1798,13 @@ def start_component_preload(self, progress=None): task_ids = { endpoint_name: progress.add_task( f"Caching {label}", - total=None, + total=endpoint_totals.get(endpoint_name), ) for endpoint_name, label in components } def update_progress(endpoint_name, advance): + """Put a progress update onto the queue for the main thread to consume.""" progress_updates.put((endpoint_name, advance)) futures = { @@ -1223,7 +1812,7 @@ def update_progress(endpoint_name, advance): self._fetch_global_endpoint_records, endpoint_name, update_progress, - endpoint_totals.get(endpoint_name, 0), + endpoint_totals.get(endpoint_name), ) for endpoint_name, _label in components } @@ -1291,9 +1880,18 @@ def _apply_progress_updates(progress_updates, progress, task_ids, allowed_endpoi break for endpoint_name, advance in updates.items(): + if advance == 0: + continue task_id = task_ids.get(endpoint_name) if task_id is not None: - progress.update(task_id, advance=advance) + if advance < 0: + # Rewind on retry: clamp completed at 0 to avoid negative bars. + task = next((t for t in progress.tasks if t.id == task_id), None) + if task is not None: + new_completed = max(0, task.completed + advance) + progress.update(task_id, completed=new_completed) + else: + progress.update(task_id, advance=advance) advanced = True return advanced @@ -1368,9 +1966,11 @@ def preload_all_components( preload_job=preload_job, progress=progress, ) + self._global_preload_done = True return self._preload_global(components, progress_wrapper, progress=progress) + self._global_preload_done = True def _preload_track_progress( self, @@ -1408,13 +2008,15 @@ def _preload_track_progress( for endpoint_name in already_done: try: records_by_endpoint[endpoint_name] = future_map[endpoint_name].result() + except GraphQLCountMismatchError: + raise except Exception as exc: self.handle.log(f"Preload failed for {endpoint_name}: {exc}") - records_by_endpoint[endpoint_name] = [] + raise if endpoint_name in task_ids: try: final_total = max( - endpoint_totals.get(endpoint_name, 0), + endpoint_totals.get(endpoint_name) or 0, len(records_by_endpoint[endpoint_name]), 1, ) @@ -1475,11 +2077,13 @@ def _drain_pending( pending.remove(endpoint_name) try: records_by_endpoint[endpoint_name] = future_map[endpoint_name].result() + except GraphQLCountMismatchError: + raise except Exception as exc: self.handle.log(f"Preload failed for {endpoint_name}: {exc}") - records_by_endpoint[endpoint_name] = [] + raise final_total = max( - endpoint_totals.get(endpoint_name, 0), + endpoint_totals.get(endpoint_name) or 0, len(records_by_endpoint[endpoint_name]), 1, ) @@ -1495,8 +2099,17 @@ def _drain_pending( # Drop: endpoint already finalised; no task to advance. continue task_id = task_ids.get(endpoint_name) - if task_id is not None: - progress.update(task_id, advance=advance) + if task_id is not None and advance != 0: + if advance < 0: + task = next( + (t for t in progress.tasks if t.id == task_id), + None, + ) + if task is not None: + new_completed = max(0, task.completed + advance) + progress.update(task_id, completed=new_completed) + else: + progress.update(task_id, advance=advance) except queue.Empty: pass else: @@ -1524,9 +2137,11 @@ def _preload_no_progress(self, components, futures): self.handle.verbose_log(f"Pre-fetching {label}...") try: records_by_endpoint[endpoint] = futures[endpoint].result() + except GraphQLCountMismatchError: + raise except Exception as exc: self.handle.log(f"Preload failed for {label}: {exc}") - records_by_endpoint[endpoint] = [] + raise return records_by_endpoint def _preload_global(self, components, progress_wrapper=None, preload_job=None, progress=None): @@ -1551,6 +2166,7 @@ def _preload_global(self, components, progress_wrapper=None, preload_job=None, p progress_updates = queue.Queue() def update_progress(endpoint_name, advance): + """Put a progress update onto the queue for the main-thread pump to consume.""" progress_updates.put((endpoint_name, advance)) futures = { @@ -1558,7 +2174,7 @@ def update_progress(endpoint_name, advance): self._fetch_global_endpoint_records, endpoint, update_progress, - endpoint_totals.get(endpoint, 0), + endpoint_totals.get(endpoint), ) for endpoint, _label in components } @@ -1568,7 +2184,7 @@ def update_progress(endpoint_name, advance): self._fetch_global_endpoint_records, endpoint, None, - endpoint_totals.get(endpoint, 0), + endpoint_totals.get(endpoint), ) for endpoint, _label in components } @@ -1578,7 +2194,7 @@ def update_progress(endpoint_name, advance): task_ids = { endpoint: progress.add_task( f"Caching {label}", - total=None, + total=endpoint_totals.get(endpoint), ) for endpoint, label in components } @@ -1623,8 +2239,15 @@ def _fetch_global_endpoint_records(self, endpoint_name, progress_callback=None, Args: endpoint_name (str): Component template endpoint name (e.g. ``"interface_templates"``). - progress_callback (callable | None): Called with ``(endpoint_name, advance)`` once when done. - expected_total (int | None): Expected record count; reserved for callers, unused here. + progress_callback (callable | None): Called with ``(endpoint_name, advance)`` + once per page during the GraphQL fetch (or once after the batch fetch + completes for REST endpoints). *advance* is a positive integer equal to + the number of records on that page. On a count-mismatch retry the + callback is invoked once with a **negative** advance to rewind the + progress bar by the same amount, keeping the display consistent across + attempts. + expected_total (int | None): Expected record count obtained from the REST API before the + GraphQL fetch. If provided and the fetched count differs, a warning is logged. Returns: list: All component template records. @@ -1638,10 +2261,46 @@ def _fetch_global_endpoint_records(self, endpoint_name, progress_callback=None, progress_callback(endpoint_name, len(records)) return records - on_page = (lambda n: progress_callback(endpoint_name, n)) if progress_callback is not None else None - records = self.graphql.get_component_templates(endpoint_name, on_page=on_page) - if endpoint_name == "front_port_templates": - records = [_FrontPortRecordWithMappings(r) for r in records] + for attempt in range(_MAX_RETRIES + 1): + # Forward per-page advances LIVE so the progress bar moves while a + # large endpoint (e.g. interfaces, 100k+ records) is fetching. Track + # fetched_this_attempt so that on a count-mismatch retry we can + # rewind by the same amount, keeping the bar consistent. + fetched_this_attempt = 0 + + def _live_advance(n): + nonlocal fetched_this_attempt + fetched_this_attempt += n + if progress_callback is not None and n: + progress_callback(endpoint_name, n) + + on_page = _live_advance if progress_callback is not None else None + records = self.graphql.get_component_templates(endpoint_name, on_page=on_page) + if endpoint_name == "front_port_templates": + records = [_FrontPortRecordWithMappings(r) for r in records] + + if expected_total is not None and len(records) != expected_total: + if attempt < _MAX_RETRIES: + backoff = _RETRY_BACKOFF[attempt] + self.handle.log( + f"WARNING: GraphQL returned {len(records)} {endpoint_name} " + f"but REST API expected {expected_total}. " + f"Retrying in {backoff}s (attempt {attempt + 1}/{_MAX_RETRIES})…" + ) + if progress_callback is not None and fetched_this_attempt: + # Rewind the bar so the next attempt's live advances do + # not double-count. + progress_callback(endpoint_name, -fetched_this_attempt) + time.sleep(backoff) + continue + raise GraphQLCountMismatchError( + f"GraphQL returned {len(records)} {endpoint_name} " + f"but REST API expected {expected_total} " + f"after {_MAX_RETRIES} retries. " + "Run aborted to prevent processing an incomplete component cache." + ) + break + return records @staticmethod @@ -1682,7 +2341,8 @@ def _build_component_cache(items): def _get_filter_kwargs(self, parent_id, parent_type="device"): """Build endpoint filter keyword arguments for the given parent type and ID. - Selects the correct parameter name based on the NetBox version (``self.new_filters``). + Delegates to :mod:`core.compat` helpers so the version-compat logic + lives in exactly one place. Args: parent_id (int): ID of the device type or module type. @@ -1692,12 +2352,9 @@ def _get_filter_kwargs(self, parent_id, parent_type="device"): dict: Filter kwargs to pass to a pynetbox endpoint's ``filter()`` method. """ if parent_type == "device": - key = "device_type_id" if self.new_filters else "devicetype_id" - return {key: parent_id} + return device_type_filter_kwargs(parent_id, new_filters=self.new_filters) else: - # Module types: module_type_id (new) vs moduletype_id (old) - key = "module_type_id" if self.new_filters else "moduletype_id" - return {key: parent_id} + return module_type_filter_kwargs(parent_id, new_filters=self.new_filters) def _get_cached_or_fetch(self, cache_name, parent_id, parent_type, endpoint): """Return cached components or fall back to a targeted REST filter. @@ -1736,7 +2393,7 @@ def preload_module_type_components(self, module_type_ids, component_keys): ``filter()`` call per chunk of up to ``FILTER_CHUNK_SIZE`` module-type IDs (filtering by module_type_id=[...]) and distributes the returned items into per-module-type cache entries so that subsequent ``_get_cached_or_fetch`` - calls hit the cache. + calls hit the cache. All component types are fetched in parallel. """ if not module_type_ids: return @@ -1750,21 +2407,36 @@ def preload_module_type_components(self, module_type_ids, component_keys): seen_endpoints.add(endpoint_attr) targets.append((endpoint_attr, cache_name)) - filter_key = "module_type_id" if self.new_filters else "moduletype_id" + filter_key = module_type_filter_key(self.new_filters) id_list = sorted(module_type_ids) - for endpoint_attr, cache_name in targets: - endpoint = getattr(self.netbox.dcim, endpoint_attr) + # Pre-populate empty entries so cache hits return {} for IDs with no components. + for _, cache_name in targets: cache = self.cached_components.setdefault(cache_name, {}) - # Pre-populate empty entries so cache hits return {} for IDs with no components for mid in id_list: cache.setdefault(("module", mid), {}) + + def _fetch_one(endpoint_attr, cache_name): + """Fetch all module-type component records for *endpoint_attr* and populate *cache_name*.""" + endpoint = getattr(self.netbox.dcim, endpoint_attr) + results = [] for chunk in _chunked(id_list, FILTER_CHUNK_SIZE): for item in endpoint.filter(**{filter_key: chunk}): module_type = getattr(item, "module_type", None) if module_type is None: continue - mid = module_type.id + if cache_name == "front_port_templates": + item = _FrontPortRecordWithMappings(item) + results.append((module_type.id, item)) + return cache_name, results + + max_workers = max(1, min(len(targets), self.max_threads)) + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = {executor.submit(_fetch_one, ea, cn): (ea, cn) for ea, cn in targets} + for future in concurrent.futures.as_completed(futures): + cache_name, results = future.result() + cache = self.cached_components[cache_name] + for mid, item in results: cache.setdefault(("module", mid), {})[item.name] = item def _create_generic( @@ -2022,11 +2694,6 @@ def _apply_additions_for_type(self, comp_type, changes, yaml_data, device_type_i yaml_key = None if comp_type in yaml_data: yaml_key = comp_type - else: - for alias, canonical in COMPONENT_ALIASES.items(): - if canonical == comp_type and alias in yaml_data: - yaml_key = alias - break if yaml_key is None: return @@ -2241,6 +2908,7 @@ def create_power_outlets(self, power_outlets, device_type, context=None): """ def link_ports(items, pid): + """Resolve power-port name references in *items* and persist the outlet templates for device type *pid*.""" existing_pp = self._get_cached_or_fetch( "power_port_templates", pid, @@ -2336,6 +3004,7 @@ def _build_link_rear_ports(self, parent_type, label, context=None): m2m = self.m2m_front_ports def link_rear_ports(items, pid): + """Resolve rear-port position references in *items* and persist the front port templates for *pid*.""" existing_rp = self._get_cached_or_fetch( "rear_port_templates", pid, @@ -2498,6 +3167,7 @@ def create_module_power_outlets(self, power_outlets, module_type, context=None): """Create power outlet templates for a module type, resolving power-port name references.""" def link_ports(items, pid): + """Resolve power-port name references in *items* and persist the outlet templates for module type *pid*.""" existing_pp = self._get_cached_or_fetch( "power_port_templates", pid, diff --git a/core/outcomes.py b/core/outcomes.py new file mode 100644 index 00000000..cf0b75e8 --- /dev/null +++ b/core/outcomes.py @@ -0,0 +1,155 @@ +"""Centralised tracking of per-entity outcomes for end-of-run reporting. + +The legacy ``Counter`` on :class:`core.netbox_api.NetBox` aggregates counts but +loses the per-entity context needed to tell operators *which* device types +failed and *why*. :class:`OutcomeRegistry` records one row per processed +entity so the summary can render a structured failure report alongside the +existing counts. + +Designed to be additive: existing call sites keep using the legacy ``Counter``; +only failure paths must call :meth:`OutcomeRegistry.record` to populate the +new report. Future PRs can migrate count call sites to the registry and +derive the legacy ``Counter`` from it. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from enum import Enum +from typing import Dict, List, Optional + + +class EntityKind(str, Enum): + """Categorisation of which kind of NetBox entity an outcome refers to.""" + + DEVICE_TYPE = "device_type" + MODULE_TYPE = "module_type" + RACK_TYPE = "rack_type" + COMPONENT = "component" + IMAGE = "image" + MANUFACTURER = "manufacturer" + + +class Outcome(str, Enum): + """Terminal outcomes for a single processed entity.""" + + CREATED = "created" + UPDATED = "updated" + UNCHANGED = "unchanged" + FAILED = "failed" + PARTIAL = "partial" + SKIPPED = "skipped" + + +@dataclass +class OutcomeRecord: + """Single recorded outcome for one entity.""" + + kind: EntityKind + identity: str # human-readable identifier (e.g. "Supermicro/SuperServer-6028TR-HTR") + outcome: Outcome + reason: Optional[str] = None + blocking_objects: List[str] = field(default_factory=list) + hint: Optional[str] = None + + +class OutcomeRegistry: + """Append-only registry of entity outcomes. + + Failure-path call sites should use :meth:`record` after they decide an + entity failed, so the end-of-run summary can render an itemised report. + """ + + def __init__(self) -> None: + """Initialise an empty registry.""" + self._records: List[OutcomeRecord] = [] + + def record( + self, + kind: EntityKind, + identity: str, + outcome: Outcome, + *, + reason: Optional[str] = None, + blocking_objects: Optional[List[str]] = None, + hint: Optional[str] = None, + ) -> None: + """Append a new outcome record.""" + self._records.append( + OutcomeRecord( + kind=kind, + identity=identity, + outcome=outcome, + reason=reason, + blocking_objects=list(blocking_objects) if blocking_objects else [], + hint=hint, + ) + ) + + @property + def records(self) -> List[OutcomeRecord]: + """All recorded outcomes (read-only view).""" + return list(self._records) + + def failures(self) -> List[OutcomeRecord]: + """Return only the FAILED records.""" + return [r for r in self._records if r.outcome == Outcome.FAILED] + + def partials(self) -> List[OutcomeRecord]: + """Return only the PARTIAL records (some but not all changes applied).""" + return [r for r in self._records if r.outcome == Outcome.PARTIAL] + + def summary_by_kind(self) -> Dict[EntityKind, Dict[Outcome, int]]: + """Aggregate counts grouped by ``(kind, outcome)``.""" + agg: Dict[EntityKind, Dict[Outcome, int]] = {} + for r in self._records: + agg.setdefault(r.kind, {}).setdefault(r.outcome, 0) + agg[r.kind][r.outcome] += 1 + return agg + + def render_failure_report(self) -> List[str]: + """Render a multi-line operator-facing failure report. + + Returns an empty list when no failures or partials were recorded. + Each string is one log line, ready to pass to ``handle.log``. + """ + failures = self.failures() + partials = self.partials() + if not failures and not partials: + return [] + + lines: List[str] = [] + lines.append("=" * 60) + lines.append("FAILED / PARTIAL UPDATE REPORT") + lines.append("=" * 60) + + if failures: + lines.append(f"Failed entities: {len(failures)}") + for r in failures: + lines.append(f" ✗ [{r.kind.value}] {r.identity}") + if r.reason: + lines.append(f" reason: {r.reason}") + if r.blocking_objects: + blockers = ", ".join(r.blocking_objects[:5]) + if len(r.blocking_objects) > 5: + blockers += f", … (+{len(r.blocking_objects) - 5} more)" + lines.append(f" blocked by: {blockers}") + if r.hint: + lines.append(f" hint: {r.hint}") + + if partials: + lines.append(f"Partial updates: {len(partials)}") + for r in partials: + lines.append(f" ~ [{r.kind.value}] {r.identity}") + if r.reason: + lines.append(f" reason: {r.reason}") + if r.blocking_objects: + blockers = ", ".join(r.blocking_objects[:5]) + if len(r.blocking_objects) > 5: + blockers += f", … (+{len(r.blocking_objects) - 5} more)" + lines.append(f" blocked by: {blockers}") + if r.hint: + lines.append(f" hint: {r.hint}") + + lines.append("=" * 60) + return lines diff --git a/core/repo.py b/core/repo.py index 08e195bf..0bb6eb49 100644 --- a/core/repo.py +++ b/core/repo.py @@ -1,3 +1,5 @@ +"""Git repository helpers for cloning, updating, and parsing the device-type library.""" + import os from glob import glob from re import sub as re_sub @@ -258,6 +260,10 @@ def __init__(self, args, repo_path, exception_handler): """ self.handle = exception_handler self.yaml_extensions = ["yaml", "yml"] + # Duplicate (manufacturer_slug, model) definitions found while parsing. + # Populated by parse_files; consumed by the run summary so users can fix upstream. + # Each entry: {"manufacturer": str, "model": str, "kept": str, "ignored": [str, ...]} + self.duplicate_definitions = [] self.url = args.url self.repo_path = repo_path self.branch = args.branch @@ -346,7 +352,7 @@ def pull_repo(self): # -B creates the branch if absent or resets it to the remote ref if present self.repo.git.checkout("-B", self.branch, f"origin/{self.branch}") - self.handle.verbose_log(f"Updated repo from {self.repo.remotes.origin.url}") + self.handle.verbose_log(f"Updated repo from {self.repo.remotes.origin.url} (branch: {self.branch})") except exc.GitCommandError as git_error: self.handle.exception("GitCommandError", self.repo.remotes.origin.url, git_error) except Exception as git_error: @@ -441,4 +447,41 @@ def parse_files(self, files: list, slugs: list = None, progress=None): executor.shutdown(wait=False, cancel_futures=True) raise - return deviceTypes + # Deduplicate by (manufacturer_slug, model). The upstream devicetype-library + # occasionally contains two YAML files (e.g. ``Foo.yaml`` and ``Foo.yml`` or two + # different filenames) that resolve to the same NetBox key. Loading both causes + # them to overwrite each other on every run, so the entry oscillates and is + # reported as "modified" forever. Keep the first occurrence (sorted by source + # path for determinism) and record the rest so the run summary can list them + # for the user to fix upstream. + deduped = [] + seen = {} # key -> kept item + groups = {} # key -> list of all srcs in sorted order + for item in sorted(deviceTypes, key=lambda d: d.get("src", "")): + try: + key = (item["manufacturer"]["slug"], item.get("model")) + except (KeyError, TypeError): + deduped.append(item) + continue + groups.setdefault(key, []).append(item.get("src", "?")) + if key not in seen: + seen[key] = item + deduped.append(item) + + for key, srcs in groups.items(): + if len(srcs) > 1: + mfr_slug, model = key + self.handle.log( + f"WARNING: duplicate definition for {mfr_slug}/{model} — " + f"keeping {srcs[0]}, ignoring {', '.join(srcs[1:])}" + ) + self.duplicate_definitions.append( + { + "manufacturer": mfr_slug, + "model": model, + "kept": srcs[0], + "ignored": srcs[1:], + } + ) + + return deduped diff --git a/core/schema_reader.py b/core/schema_reader.py new file mode 100644 index 00000000..171d6167 --- /dev/null +++ b/core/schema_reader.py @@ -0,0 +1,103 @@ +"""Utilities for loading comparable property names from devicetype-library JSON schemas. + +Reads the JSON schemas bundled in the cloned devicetype-library repository and +extracts scalar (non-array, non-object) property names that can be used for +change detection comparison. +""" + +import json +import os + +# All JSON Schema primitive types that are safe to compare and PATCH as scalars. +_SCALAR_TYPES = frozenset({"string", "integer", "number", "boolean", "null"}) + + +def load_scalar_properties(schema_path, exclude=None): + """Read a JSON schema file and return names of comparable scalar properties. + + A property is considered *scalar* (and therefore comparable) when it is + **explicitly** one of: + + * A ``$ref`` (references are assumed to resolve to a scalar choice/enum) + * A plain scalar type: ``string``, ``integer``, ``number``, ``boolean``, or + ``null`` + * A ``type`` union (list) whose every member is one of the scalar types above + + Everything else — arrays, objects, ``anyOf``/``oneOf``/``allOf``, or entries + with no recognisable type — is excluded. This explicit allowlist prevents + bogus PATCH attempts for properties whose schema representation is more + complex than a single scalar value. + + Args: + schema_path (str): Absolute path to the JSON schema file. + exclude (set | None): Property names to exclude from the result. + + Returns: + list[str]: Property names in schema definition order. + + Raises: + FileNotFoundError: If *schema_path* does not exist. + ValueError: If the file is not valid JSON or lacks a ``properties`` key. + """ + exclude = set(exclude or []) + + try: + with open(schema_path) as f: + schema = json.load(f) + except json.JSONDecodeError as exc: + raise ValueError(f"Invalid JSON in {schema_path}: {exc}") from exc + + if not isinstance(schema, dict): + raise ValueError(f"Schema {schema_path} root is not a JSON object") + if "properties" not in schema: + raise ValueError(f"Schema {schema_path} has no 'properties' key") + if not isinstance(schema["properties"], dict): + raise ValueError(f"Schema {schema_path} has non-object 'properties'") + + result = [] + for name, defn in schema["properties"].items(): + if name in exclude: + continue + if not isinstance(defn, dict): + # Malformed property entry; skip silently rather than raising. + continue + # $ref entries (enum choices, foreign-key slugs, etc.) are scalar by + # convention in the NetBox device-type library schemas. + if "$ref" in defn: + result.append(name) + continue + prop_type = defn.get("type") + if isinstance(prop_type, list): + # JSON Schema allows "type": ["string", "null"] union types. + if set(prop_type) <= _SCALAR_TYPES: + result.append(name) + elif prop_type in _SCALAR_TYPES: + result.append(name) + # All other entries (anyOf, oneOf, allOf, missing type, object, array) + # are intentionally excluded. + + return result + + +def load_properties_for_type(schema_dir, type_name, exclude=None): + """Load scalar properties for a named schema type from the schema directory. + + Falls back to an empty list if the schema file is missing or unreadable, + so callers can safely fall back to their own hardcoded lists. + + Args: + schema_dir (str): Directory containing the schema JSON files (e.g. + ``/path/to/repo/schema``). + type_name (str): Schema file basename without extension, e.g. + ``"moduletype"``, ``"devicetype"``, ``"racktype"``. + exclude (set | None): Property names to exclude (forwarded to + :func:`load_scalar_properties`). + + Returns: + list[str]: Scalar property names, or ``[]`` if the schema is unavailable. + """ + schema_path = os.path.join(schema_dir, f"{type_name}.json") + try: + return load_scalar_properties(schema_path, exclude=exclude) + except (OSError, ValueError): + return [] diff --git a/core/settings.py b/core/settings.py index 0b836b19..b03e6be0 100644 --- a/core/settings.py +++ b/core/settings.py @@ -1,3 +1,5 @@ +"""Environment-variable settings for the Device Type Library Import tool.""" + import os from dotenv import load_dotenv diff --git a/core/update_failure_resolver.py b/core/update_failure_resolver.py new file mode 100644 index 00000000..5223db09 --- /dev/null +++ b/core/update_failure_resolver.py @@ -0,0 +1,303 @@ +"""Classify NetBox PATCH/POST failures and (when safe) propose remediation. + +This module exists to translate raw ``pynetbox.RequestError`` exceptions — +which carry NetBox's business-logic constraint messages as JSON strings — into +structured outcomes the caller can act on: + +* identify well-known constraints (e.g. ``subdevice_role`` parent→child blocked + by existing device-bay templates); +* inspect NetBox to determine whether the affected type is *in use* by live + devices (in which case automated remediation is unsafe); +* return an ordered sequence of remediation steps the caller may execute when + the operator has explicitly opted in via ``--force-resolve-conflicts``. + +The classifier is intentionally narrow: only constraints we can recognize with +high confidence are mapped to actionable resolutions. Everything else is +returned as :data:`FailureKind.UNHANDLED` so the caller falls back to plain +error logging. +""" + +from __future__ import annotations + +import json +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Callable, List, Optional + +from core.compat import device_type_filter_kwargs + + +class FailureKind(str, Enum): + """High-level classification of a NetBox update failure.""" + + UNHANDLED = "unhandled" + SUBDEVICE_ROLE_FLIP = "subdevice_role_flip" + MANUAL_REQUIRED = "manual_required" + + +@dataclass +class FailureResolution: + """Structured outcome of classifying a NetBox update failure. + + Attributes: + kind: Which constraint we recognised, or :data:`FailureKind.UNHANDLED`. + description: Human-readable summary safe to log to the operator. + blocking_objects: Names/labels of NetBox objects that are blocking the + update (e.g. device-bay template names). May be empty. + dependent_devices_count: Number of live ``Device`` records that + reference this device-type / module-type. ``None`` when not + queried; ``0`` confirmed safe; ``> 0`` means automated remediation + is forbidden regardless of the ``--force-resolve-conflicts`` flag. + dependent_devices_sample: Up to 5 device names referencing this type, + for inclusion in the operator-facing log. + remediation_steps: Ordered list of zero-arg callables that perform the + remediation. Only executed when the operator has opted in AND + ``dependent_devices_count == 0``. + hint: Operator-facing one-line hint advising how to proceed (e.g. + "re-run with --force-resolve-conflicts"). + """ + + kind: FailureKind + description: str = "" + blocking_objects: List[str] = field(default_factory=list) + dependent_devices_count: Optional[int] = None + dependent_devices_sample: List[str] = field(default_factory=list) + remediation_steps: List[Callable[[], None]] = field(default_factory=list) + hint: str = "" + + @property + def is_actionable(self) -> bool: + """True when remediation steps exist and no live devices block them.""" + return ( + self.kind not in (FailureKind.UNHANDLED, FailureKind.MANUAL_REQUIRED) + and bool(self.remediation_steps) + and (self.dependent_devices_count == 0) + ) + + +_SUBDEVICE_ROLE_MARKERS = ( + "device bay templates", + "declassifying it as a parent device", +) + + +def _extract_error_payload(error: Any) -> Any: + """Best-effort decode of ``pynetbox.RequestError.error`` into a dict/str. + + pynetbox surfaces the body either pre-parsed (dict) or as a JSON string; + occasionally as a raw bytes payload. Normalise to a dict when possible, + otherwise return the original object. + """ + if isinstance(error, dict): + return error + if isinstance(error, (bytes, bytearray)): + try: + error = error.decode("utf-8", errors="replace") + except Exception: + return error + if isinstance(error, str): + try: + return json.loads(error) + except (ValueError, TypeError): + return error + return error + + +def _matches_subdevice_role_constraint(payload: Any) -> bool: + """Return True if *payload* describes the parent→child device-bay block.""" + if isinstance(payload, dict): + msgs = payload.get("subdevice_role") + if not msgs: + return False + if isinstance(msgs, str): + text = msgs + else: + try: + text = " ".join(str(m) for m in msgs) + except TypeError: + text = str(msgs) + return all(marker in text for marker in _SUBDEVICE_ROLE_MARKERS) + if isinstance(payload, str): + return all(marker in payload for marker in _SUBDEVICE_ROLE_MARKERS) + return False + + +def _count_dependent_devices(netbox: Any, device_type_id: int, *, new_filters: bool = False) -> tuple[int, List[str]]: + """Query NetBox for devices using *device_type_id*. + + Returns ``(count, sample_names)`` where ``sample_names`` is up to 5 names + for inclusion in operator-facing logs. Defensive: any pynetbox/network + failure is reported as an UNKNOWN large count (``-1``) so the caller treats + the type as unsafe to auto-resolve. + + Args: + netbox: pynetbox API client. + device_type_id: ID of the device type to query. + new_filters: When True, use ``device_type_id`` filter name (NetBox ≥ 4.1); + otherwise use the legacy ``devicetype_id`` name. + """ + filter_kwargs = device_type_filter_kwargs(device_type_id, new_filters=new_filters) + try: + devices = list(netbox.dcim.devices.filter(**filter_kwargs, limit=5)) + except Exception: + return -1, [] + sample = [getattr(d, "name", None) or str(getattr(d, "id", "?")) for d in devices[:5]] + if len(devices) < 5: + return len(devices), sample + # We capped at limit=5; query the real total separately. + try: + total = netbox.dcim.devices.count(**filter_kwargs) + except Exception: + total = len(devices) + return total, sample + + +def _list_device_bay_templates(netbox: Any, device_type_id: int, *, new_filters: bool = False) -> Optional[List[Any]]: + """Return all ``DeviceBayTemplate`` records attached to *device_type_id*. + + Returns ``None`` when the NetBox query itself fails (network error, 5xx, etc.) + so the caller can distinguish "no templates" from "lookup failed". + + Args: + netbox: pynetbox API client. + device_type_id: ID of the device type to query. + new_filters: When True, use ``device_type_id`` filter name (NetBox ≥ 4.1); + otherwise use the legacy ``devicetype_id`` name. + """ + try: + return list( + netbox.dcim.device_bay_templates.filter( + **device_type_filter_kwargs(device_type_id, new_filters=new_filters) + ) + ) + except Exception: + return None + + +def classify_device_type_update_failure( + error: Any, + *, + netbox: Any, + device_type_id: int, + device_type_yaml: dict, + new_filters: bool = False, +) -> FailureResolution: + """Classify a ``pynetbox.RequestError`` raised while updating a device type. + + Args: + error: ``RequestError.error`` payload (dict or JSON string). + netbox: pynetbox API client used to query for dependent devices and + blocking templates. + device_type_id: ID of the device-type being updated. + device_type_yaml: Parsed YAML dict for this device-type (used to detect + whether the YAML *also* lists device bays — in which case we cannot + blindly delete them). + new_filters: When True, use updated filter parameter names (NetBox ≥ 4.1). + + Returns: + A :class:`FailureResolution` describing the constraint and (when safe) + the steps required to clear it. + """ + payload = _extract_error_payload(error) + + if not _matches_subdevice_role_constraint(payload): + return FailureResolution( + kind=FailureKind.UNHANDLED, + description=str(payload), + ) + + # SUBDEVICE_ROLE_FLIP path ------------------------------------------------- + blocking_templates = _list_device_bay_templates(netbox, device_type_id, new_filters=new_filters) + if blocking_templates is None: + return FailureResolution( + kind=FailureKind.MANUAL_REQUIRED, + description="Cannot inspect blocking device-bay templates: the NetBox lookup failed.", + hint="Retry after restoring NetBox connectivity, then re-run the update.", + ) + blocking_names = [getattr(t, "name", str(getattr(t, "id", "?"))) for t in blocking_templates] + + dep_count, dep_sample = _count_dependent_devices(netbox, device_type_id, new_filters=new_filters) + + # YAML must NOT redefine device-bays — otherwise deleting them would just + # cause our own component-creation step to fail or thrash. This catches + # the edge case where the user intends a parent->child flip but their YAML + # still declares device-bays. + yaml_has_device_bays = bool(device_type_yaml.get("device-bays")) + + if dep_count != 0: + # Live devices reference this type — auto-resolve forbidden. + sample = ", ".join(dep_sample) if dep_sample else "(unknown)" + if dep_count < 0: + count_text = "unknown number of" + elif dep_count > len(dep_sample): + count_text = f"{dep_count} (showing first {len(dep_sample)})" + else: + count_text = str(dep_count) + return FailureResolution( + kind=FailureKind.MANUAL_REQUIRED, + description=( + "Cannot change subdevice_role: device bay templates exist AND " + f"{count_text} device(s) currently use this type." + ), + blocking_objects=blocking_names, + dependent_devices_count=dep_count, + dependent_devices_sample=dep_sample, + hint=( + f"Live devices use this type ({sample}). Resolve manually in NetBox " + "(remove devices or convert them) before re-running." + ), + ) + + if yaml_has_device_bays: + return FailureResolution( + kind=FailureKind.MANUAL_REQUIRED, + description=( + "Cannot auto-resolve subdevice_role flip while YAML still declares " + "device-bays — removing them would create a churn loop." + ), + blocking_objects=blocking_names, + dependent_devices_count=0, + hint="Remove the 'device-bays' section from the YAML or revert the subdevice_role change.", + ) + + # Safe path: build remediation steps (delete each blocking template). + if not blocking_templates: + # The PATCH failed with the subdevice_role constraint but there are no + # blocking templates (race condition, prior run cleaned them, or the + # filter returned nothing). Advertising --force-resolve-conflicts would + # be a no-op, so give a manual-inspection hint instead. + return FailureResolution( + kind=FailureKind.SUBDEVICE_ROLE_FLIP, + description=( + "subdevice_role parent→child blocked but no device-bay templates found " + "(may be a transient state or templates were already removed)." + ), + blocking_objects=[], + dependent_devices_count=0, + hint=( + "Inspect the NetBox database for residual device-bay templates " + "and remove them manually if present, then re-run." + ), + ) + + def _make_deleter(template): + def _delete(): + template.delete() + + return _delete + + steps = [_make_deleter(t) for t in blocking_templates] + + return FailureResolution( + kind=FailureKind.SUBDEVICE_ROLE_FLIP, + description=( + f"subdevice_role parent→child blocked by {len(blocking_templates)} " + "device-bay template(s); no live devices use this type." + ), + blocking_objects=blocking_names, + dependent_devices_count=0, + remediation_steps=steps, + hint=( + "Re-run with --force-resolve-conflicts to delete the blocking device-bay templates and retry the update." + ), + ) diff --git a/nb-dt-import.py b/nb-dt-import.py index 35a5a120..e644e059 100644 --- a/nb-dt-import.py +++ b/nb-dt-import.py @@ -1,4 +1,6 @@ #!/usr/bin/env python3 +"""Entry-point script for importing NetBox device and module types from the community library.""" + from datetime import datetime import concurrent.futures import os @@ -9,7 +11,9 @@ from core.netbox_api import NetBox from core.log_handler import LogHandler from core.repo import DTLRepo -from core.change_detector import ChangeDetector, IMAGE_PROPERTIES +from core.change_detector import ChangeDetector, ChangeType, IMAGE_PROPERTIES +from core.graphql_client import GraphQLError +from pynetbox.core.query import RequestError as NetBoxRequestError import sys @@ -124,6 +128,7 @@ def get_progress_wrapper(progress, iterable, desc=None, total=None, on_step=None task_id = progress.add_task(description, total=total) def iterator(): + """Yield items from *iterable* while advancing the progress task.""" count = 0 try: for item in iterable: @@ -304,6 +309,11 @@ def log_run_mode(handle, args): "Mode: will not remove components from existing models; use --remove-components with " "--update to change this." ) + if getattr(args, "force_resolve_conflicts", False): + handle.log( + "Mode: --force-resolve-conflicts enabled; constraint failures will trigger destructive " + "remediation when no live device references the affected type." + ) else: handle.log("Mode: --update not set; changed properties/components will not be applied (use --update).") @@ -330,6 +340,7 @@ def _image_progress_scope(progress, device_types, total=0): _img_task = progress.add_task("Uploading Images", total=total) def _adv_img(count=1): + """Advance the image-upload progress task by *count* steps.""" progress.update(_img_task, advance=count) device_types._image_progress = _adv_img @@ -533,27 +544,55 @@ def _process_module_types( module_only_new = should_only_create_new_modules(args) existing_module_types = netbox.get_existing_module_types() - module_types_to_process, module_type_existing_images = netbox.filter_actionable_module_types( + # Always run full change detection (unless --only-new is explicitly set) so that + # modified module types are reported even without --update. + module_types_to_process, module_type_existing_images, changed_property_log = netbox.filter_actionable_module_types( module_types, existing_module_types, - only_new=module_only_new, + only_new=args.only_new, ) new_module_count = len(NetBox.filter_new_module_types(module_types, existing_module_types)) - if module_only_new: - handle.log("============================================================") + # Count modules whose only diff is REMOVED components — they will require + # --remove-components to converge. We count groups that have at least one removed + # component change; the advisory is informational so over-counting modules that also + # have other changes is fine. + pending_removal_modules = 0 + pending_removal_components = 0 + for _slug, _model, fields_info, comp_changes in changed_property_log: + removed_in_group = [ + c for c in (comp_changes or []) if getattr(c, "change_type", None) == ChangeType.COMPONENT_REMOVED + ] + if removed_in_group: + pending_removal_modules += 1 + pending_removal_components += len(removed_in_group) + handle.log("============================================================") + handle.log("MODULE TYPE CHANGE DETECTION") + handle.log("============================================================") + if args.only_new: handle.log(f"New module types: {new_module_count}") - handle.log("============================================================") else: - module_changed_count = len(module_types_to_process) - new_module_count + module_changed_count = len(changed_property_log) module_unchanged_count = len(module_types) - len(module_types_to_process) - handle.log("============================================================") - handle.log("MODULE TYPE CHANGE DETECTION") - handle.log("============================================================") + # Modules with only missing image attachments — handled in default mode, so + # they are NOT included in the "modified" count and do NOT trigger the + # `--update` hint. + image_only_count = max(0, len(module_types_to_process) - new_module_count - module_changed_count) handle.log(f"New module types: {new_module_count}") handle.log(f"Unchanged module types: {module_unchanged_count}") handle.log(f"Modified module types: {module_changed_count}") - handle.log("------------------------------------------------------------") + if image_only_count: + handle.log(f"Image-only updates: {image_only_count}") + if module_changed_count and not args.update: + handle.log(" (Run with --update to apply changes to existing module types)") + if pending_removal_modules and not args.remove_components: + remove_hint = "--remove-components" if args.update else "--update --remove-components" + handle.log( + f" (Run with {remove_hint} to remove {pending_removal_components} stale " + f"component(s) across {pending_removal_modules} module type(s))" + ) + handle.log("------------------------------------------------------------") + netbox.log_module_type_changes(changed_property_log) if module_types_to_process: netbox.create_manufacturers(module_vendors) @@ -567,6 +606,7 @@ def _process_module_types( only_new=module_only_new, all_module_types=existing_module_types, module_type_existing_images=module_type_existing_images, + remove_components=args.remove_components, ) else: handle.verbose_log("No module type changes to process.") @@ -635,29 +675,62 @@ def _process_rack_types(args, netbox, dtl_repo, handle, progress, selected_vendo ) -def _log_run_summary(handle, netbox, start_time): +def _log_run_summary(handle, netbox, start_time, dtl_repo=None): """Log the final import summary counters to *handle*. Args: handle (LogHandler): Logging handler for output. netbox (NetBox): NetBox API wrapper whose ``counter`` is read. start_time (datetime): Timestamp from the start of the run for elapsed-time reporting. + dtl_repo (DTLRepo, optional): Repository helper; if provided, any duplicate + ``(manufacturer, model)`` definitions detected during YAML parsing are listed + so the user can fix them upstream. """ handle.log("---") handle.verbose_log(f"Script took {(datetime.now() - start_time)} to run") handle.log(f"{netbox.counter['added']} device types created") handle.log(f"{netbox.counter['properties_updated']} device types updated") + component_updates = netbox.counter.get("device_types_component_updates", 0) + if component_updates: + handle.log(f"{component_updates} device types had component-only updates") + failed = netbox.counter.get("device_types_failed", 0) + if failed: + handle.log(f"{failed} device types FAILED to update (see error log above)") handle.log(f"{netbox.counter['components_updated']} components updated") handle.log(f"{netbox.counter['components_added']} components added") handle.log(f"{netbox.counter['components_removed']} components removed") handle.verbose_log(f"{netbox.counter['images']} images uploaded") handle.log(f"{netbox.counter['manufacturer']} manufacturers created") - if settings.NETBOX_FEATURES["modules"]: + if netbox.modules: handle.log(f"{netbox.counter['module_added']} modules created") + handle.log(f"{netbox.counter['module_updated']} modules updated") + if netbox.counter["module_update_failed"]: + handle.log(f"{netbox.counter['module_update_failed']} modules failed to update") + if netbox.counter["module_partial_update"]: + handle.log(f"{netbox.counter['module_partial_update']} modules partially updated") if netbox.rack_types: handle.log(f"{netbox.counter['rack_type_added']} rack types created") handle.log(f"{netbox.counter['rack_type_updated']} rack types updated") + # Structured failure / partial-update report (replaces the "see error log + # above" hand-wave with itemised per-entity context). + failure_lines = netbox.outcomes.render_failure_report() + for line in failure_lines: + handle.log(line) + + if dtl_repo is not None and dtl_repo.duplicate_definitions: + handle.log("---") + handle.log( + f"WARNING: {len(dtl_repo.duplicate_definitions)} duplicate " + "(manufacturer, model) definition(s) detected in the source repository:" + ) + for dup in dtl_repo.duplicate_definitions: + handle.log(f" {dup['manufacturer']}/{dup['model']}") + handle.log(f" kept: {dup['kept']}") + for ignored in dup["ignored"]: + handle.log(f" ignored: {ignored}") + handle.log("These duplicates would otherwise oscillate on every run. Please report/fix them upstream.") + def main(): """Orchestrate importing device- and module-types from a Git repository into NetBox. @@ -668,7 +741,7 @@ def main(): """ startTime = datetime.now() - parser = ArgumentParser(description="Import Netbox Device Types") + parser = ArgumentParser(description="Import Netbox Device Types", allow_abbrev=False) parser.add_argument( "--vendors", nargs="+", @@ -717,11 +790,23 @@ def main(): help="Remove components from NetBox that no longer exist in YAML (use with --update). " "WARNING: May affect existing device instances.", ) + parser.add_argument( + "--force-resolve-conflicts", + action="store_true", + default=False, + help=( + "Allow destructive remediation when a NetBox business-logic constraint blocks an update " + "(e.g. delete blocking device-bay templates before a subdevice_role parent->child flip). " + "Only applied when no live device references the type. WARNING: Destructive." + ), + ) args = parser.parse_args() if args.remove_components and not args.update: parser.error("--remove-components requires --update") + if args.force_resolve_conflicts and not args.update: + parser.error("--force-resolve-conflicts requires --update") # Normalize arguments args.vendors = [v.casefold() for vendor in args.vendors for v in vendor.split(",") if v.strip()] @@ -737,6 +822,7 @@ def main(): # We pass settings for constants, but ideally we should pass individual config items # For now, we will update NetBox to verify compatibility with this new setup netbox = NetBox(settings, handle) # handle passed explicitly + netbox.force_resolve_conflicts = args.force_resolve_conflicts # Confirm effective run behavior right after compatibility checks. log_run_mode(handle, args) @@ -754,6 +840,7 @@ def main(): parse_fn = None def on_parse_step(): + """Invoke *parse_fn* (if set) after each parsed file, used to pump preload progress.""" if parse_fn is not None: parse_fn() @@ -766,6 +853,7 @@ def on_parse_step(): if progress is not None: def pump_preload(): + """Drain pending preload-progress updates from the background preload job.""" netbox.device_types.pump_preload_progress(cache_preload_job, progress) parse_fn = pump_preload @@ -819,7 +907,7 @@ def pump_preload(): _module_parse_executor.shutdown(wait=False, cancel_futures=True) handle.set_console(None) - _log_run_summary(handle, netbox, startTime) + _log_run_summary(handle, netbox, startTime, dtl_repo=dtl_repo) if __name__ == "__main__": @@ -828,3 +916,19 @@ def pump_preload(): except KeyboardInterrupt: print(f"[{datetime.now().strftime('%H:%M:%S')}] Interrupted by user (Ctrl-C). Exiting.") raise SystemExit(130) + except GraphQLError as exc: + print( + f"[{datetime.now().strftime('%H:%M:%S')}] Error: NetBox GraphQL request failed — {exc}\n" + f"[{datetime.now().strftime('%H:%M:%S')}] This may be a temporary connectivity issue. " + "Check that NetBox is reachable and try again.", + file=sys.stderr, + ) + raise SystemExit(1) + except NetBoxRequestError as exc: + print( + f"[{datetime.now().strftime('%H:%M:%S')}] Error: NetBox REST API request failed — {exc}\n" + f"[{datetime.now().strftime('%H:%M:%S')}] Check that NetBox is reachable and" + " the API token has the required permissions.", + file=sys.stderr, + ) + raise SystemExit(1) diff --git a/pyproject.toml b/pyproject.toml index c18a49fc..e90557ba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,6 +15,7 @@ dependencies = [ [dependency-groups] dev = [ + "interrogate>=1.7.0", "pre-commit>=4.6.0", "pytest>=9.0.3", "pytest-cov>=6.0", diff --git a/tests/test_change_detector.py b/tests/test_change_detector.py index 0c66493d..1e8553b9 100644 --- a/tests/test_change_detector.py +++ b/tests/test_change_detector.py @@ -142,14 +142,6 @@ def test_component_changed_when_property_differs(self): changes = detector._compare_components(yaml_data, 1) assert any(c.component_name == "eth0" and c.change_type == ChangeType.COMPONENT_CHANGED for c in changes) - def test_power_port_alias_merged(self): - """'power-port' alias maps to same cache as 'power-ports'.""" - detector = self._make_detector() - detector.device_types.cached_components = {"power_port_templates": {("device", 1): {}}} - yaml_data = {"power-port": [{"name": "PSU1", "type": "iec-60320-c14"}]} - changes = detector._compare_components(yaml_data, 1) - assert any(c.component_name == "PSU1" and c.change_type == ChangeType.COMPONENT_ADDED for c in changes) - def test_component_without_name_is_skipped(self): """YAML component entry with no 'name' key must be skipped (line 350 continue).""" detector = self._make_detector() @@ -535,3 +527,81 @@ def test_legacy_path_positions_only_comparison(self): ) # Positions match → no change assert changes == [] + + +# --------------------------------------------------------------------------- +# _load_device_type_properties exception fallback (lines 109-110) +# --------------------------------------------------------------------------- + + +class TestLoadDeviceTypePropertiesFallback: + """Tests for the exception fallback in _load_device_type_properties.""" + + def test_import_error_during_load_returns_fallback_list(self): + from unittest.mock import patch + + from core.change_detector import ( + _DEVICE_TYPE_PROPERTIES_FALLBACK, + _load_device_type_properties, + ) + + with patch( + "core.change_detector.load_properties_for_type", + side_effect=ImportError("settings module unavailable"), + ): + result = _load_device_type_properties() + + assert result == list(_DEVICE_TYPE_PROPERTIES_FALLBACK) + + def test_attribute_error_during_load_returns_fallback_list(self): + from unittest.mock import patch + + from core.change_detector import ( + _DEVICE_TYPE_PROPERTIES_FALLBACK, + _load_device_type_properties, + ) + + with patch( + "core.change_detector.load_properties_for_type", + side_effect=AttributeError("REPO_PATH not set"), + ): + result = _load_device_type_properties() + + assert result == list(_DEVICE_TYPE_PROPERTIES_FALLBACK) + + def test_unexpected_exception_propagates(self): + """Non-import/attribute errors must not be silenced.""" + import pytest + from unittest.mock import patch + + from core.change_detector import _load_device_type_properties + + with patch( + "core.change_detector.load_properties_for_type", + side_effect=RuntimeError("schema unavailable"), + ): + with pytest.raises(RuntimeError, match="schema unavailable"): + _load_device_type_properties() + + +# --------------------------------------------------------------------------- +# _MISSING sentinel skip in _compare_device_type_properties (line 246) +# --------------------------------------------------------------------------- + + +class TestCompareDeviceTypePropertiesMissingAttribute: + """Tests for the _MISSING sentinel guard inside _compare_device_type_properties.""" + + def test_attribute_absent_from_netbox_object_is_skipped(self): + """When netbox_dt doesn't have the attribute, the property is skipped (no change reported).""" + from unittest.mock import patch + + _FIXED_PROPS = ["u_height", "is_full_depth"] + with patch("core.change_detector.get_device_type_properties", return_value=_FIXED_PROPS): + detector = ChangeDetector(MagicMock(), MagicMock()) + # A plain object() has no extra attributes, so getattr returns _MISSING. + netbox_dt = object() + + changes = detector._compare_device_type_properties({"u_height": 2}, netbox_dt) + + assert changes == [] diff --git a/tests/test_formatting.py b/tests/test_formatting.py new file mode 100644 index 00000000..15413632 --- /dev/null +++ b/tests/test_formatting.py @@ -0,0 +1,42 @@ +"""Tests for core/formatting.py.""" + +from core.formatting import _display, log_property_diffs + + +class TestDisplay: + """Tests for _display() value normaliser.""" + + def test_normal_string_returned_unchanged(self): + assert _display("hello") == "hello" + + def test_whitespace_only_string_returns_None_str(self): + assert _display(" ") == "None" + + def test_empty_string_returns_None_str(self): + assert _display("") == "None" + + def test_none_value_returns_None_str(self): + assert _display(None) == "None" + + def test_integer_value_returns_string(self): + assert _display(42) == "42" + + def test_trailing_whitespace_is_stripped(self): + assert _display("hello ") == "hello" + + +class TestLogPropertyDiffs: + """Tests for log_property_diffs().""" + + def test_emits_diff_lines_for_changes(self): + log_fn = [] + log_property_diffs([("u_height", 1, 2)], log_fn.append) + assert log_fn == [ + " - u_height: 1", + " + u_height: 2", + ] + + def test_empty_triples_emits_nothing(self): + log_fn = [] + log_property_diffs([], log_fn.append) + assert log_fn == [] diff --git a/tests/test_graphql_client.py b/tests/test_graphql_client.py index d51f84ee..74c9fcf3 100644 --- a/tests/test_graphql_client.py +++ b/tests/test_graphql_client.py @@ -1005,7 +1005,7 @@ def test_device_bay_templates_fields(self, mock_post): assert records[0].id == 30 def test_module_bay_templates_fields(self, mock_post): - """module_bay_templates should return records with the expected fields.""" + """module_bay_templates should return records with the expected fields, including module_type.""" data = { "module_bay_template_list": [ { @@ -1013,7 +1013,8 @@ def test_module_bay_templates_fields(self, mock_post): "name": "Bay 1", "position": "1", "label": "", - "device_type": {"id": "3"}, + "device_type": None, + "module_type": {"id": "7"}, }, ] } @@ -1024,9 +1025,11 @@ def test_module_bay_templates_fields(self, mock_post): assert records[0].name == "Bay 1" assert records[0].id == 40 - # Verify the generated query does not include module_type (not in schema for module_bay_templates) + assert records[0].module_type.id == 7 + # module_bay_templates DOES have module_type { id } in NetBox's schema, so the query + # should include it (unlike device_bay_templates which truly has no module_type field) sent_query = mock_post.call_args_list[0][1]["json"]["query"] - assert "module_type" not in sent_query + assert "module_type" in sent_query class TestDotDictSetattr: @@ -1440,3 +1443,98 @@ def test_custom_page_size_used_in_query_all(self, mock_post): client.query_all("query($pagination: OffsetPaginationInput) { items }", "items") sent_vars = mock_post.call_args[1]["json"]["variables"] assert sent_vars["pagination"]["limit"] == 100 + + +# --------------------------------------------------------------------------- +# query() error branches: 403, retryable HTTP, RequestException (lines 228, 234-236, 240-245) +# --------------------------------------------------------------------------- + + +class TestGraphQLQueryErrorPaths: + """Tests for error-handling branches in query().""" + + def _make_client(self): + from core.graphql_client import NetBoxGraphQLClient + + return NetBoxGraphQLClient("http://netbox.local", "testtoken") + + def test_403_raises_graphql_error_with_hint(self, mock_post): + """A 403 HTTPError immediately raises GraphQLError with a permission hint.""" + from core.graphql_client import GraphQLError + + mock_resp = MagicMock() + mock_resp.status_code = 403 + http_err = requests.exceptions.HTTPError("403 Client Error") + http_err.response = mock_resp + response_mock = MagicMock() + response_mock.raise_for_status.side_effect = http_err + mock_post.return_value = response_mock + + client = self._make_client() + with pytest.raises(GraphQLError, match="403 Forbidden"): + client.query("{ test }") + + def test_429_no_retry_raises_graphql_error(self, mock_post): + """A 429 with _retries=0 raises GraphQLError immediately (not retried).""" + from core.graphql_client import GraphQLError + + mock_429_resp = MagicMock() + http_err_429 = requests.exceptions.HTTPError("429 Too Many Requests") + http_err_429.response = MagicMock() + http_err_429.response.status_code = 429 + mock_429_resp.raise_for_status.side_effect = http_err_429 + + mock_post.return_value = mock_429_resp + + client = self._make_client() + with pytest.raises(GraphQLError): + client.query("{ test }", _retries=0) + + def test_429_with_retry_allowed_retries_then_succeeds(self, mock_post): + """A 429 with retry budget > 0 sleeps and retries; second call succeeds.""" + from unittest.mock import patch + + mock_429_resp = MagicMock() + http_err_429 = requests.exceptions.HTTPError("429") + http_err_429.response = MagicMock() + http_err_429.response.status_code = 429 + mock_429_resp.raise_for_status.side_effect = http_err_429 + + mock_200_resp = MagicMock() + mock_200_resp.raise_for_status = MagicMock() + mock_200_resp.json.return_value = {"data": {"answer": 42}} + + mock_post.side_effect = [mock_429_resp, mock_200_resp] + + client = self._make_client() + with patch("core.graphql_client.time.sleep"): + result = client.query("{ test }", _retries=1) + + assert result == {"answer": 42} + + def test_request_exception_retries_then_raises_graphql_error(self, mock_post): + """Persistent RequestException exhausts retry budget and raises GraphQLError.""" + from unittest.mock import patch + + from core.graphql_client import GraphQLError + + mock_post.side_effect = requests.exceptions.ConnectionError("connection refused") + + client = self._make_client() + with patch("core.graphql_client.time.sleep"): + with pytest.raises(GraphQLError, match="connection refused"): + client.query("{ test }", _retries=1) + + def test_request_exception_retries_before_exhausting(self, mock_post): + """A RequestException on the first attempt stores last_exc and retries.""" + from unittest.mock import patch + + from core.graphql_client import GraphQLError + + # Fail twice (matching _retries=1 giving 2 total attempts) + mock_post.side_effect = requests.exceptions.Timeout("timed out") + + client = self._make_client() + with patch("core.graphql_client.time.sleep"): + with pytest.raises(GraphQLError, match="timed out"): + client.query("{ test }", _retries=1) diff --git a/tests/test_nb_dt_import.py b/tests/test_nb_dt_import.py index 2a531cbb..153f1f8f 100644 --- a/tests/test_nb_dt_import.py +++ b/tests/test_nb_dt_import.py @@ -303,6 +303,8 @@ def _make_mock_repo(device_types=None): def _make_mock_netbox(modules=False, rack_types=False): """Return a pre-configured NetBox mock.""" + from collections import Counter + mock_nb = MagicMock() mock_nb.modules = modules mock_nb.rack_types = rack_types @@ -310,9 +312,24 @@ def _make_mock_netbox(modules=False, rack_types=False): mock_nb.device_types.existing_device_types_by_slug = {} mock_nb.count_device_type_images.return_value = 0 mock_nb.count_module_type_images.return_value = 0 - mock_nb.filter_actionable_module_types.return_value = ([], []) + mock_nb.filter_actionable_module_types.return_value = ([], {}, []) mock_nb.get_existing_module_types.return_value = {} mock_nb.get_existing_rack_types.return_value = {} + mock_nb.counter = Counter( + added=0, + components_added=0, + manufacturer=0, + module_added=3, + module_updated=2, + module_update_failed=0, + rack_type_added=0, + rack_type_updated=0, + images=0, + properties_updated=0, + components_updated=0, + components_removed=0, + device_types_failed=0, + ) return mock_nb @@ -659,6 +676,36 @@ def test_remove_components_without_update_exits_with_error(self, nb_dt_import): nb_dt_import.main() assert exc_info.value.code == 2 + def test_force_resolve_conflicts_without_update_exits_with_error(self, nb_dt_import): + """--force-resolve-conflicts without --update triggers parser.error (SystemExit 2).""" + with patch.object(sys, "argv", ["nb-dt-import.py", "--force-resolve-conflicts"]): + with pytest.raises(SystemExit) as exc_info: + nb_dt_import.main() + assert exc_info.value.code == 2 + + def test_update_with_force_resolve_conflicts(self, nb_dt_import): + """--update --force-resolve-conflicts sets netbox.force_resolve_conflicts=True.""" + dt = [{"manufacturer": {"slug": "cisco"}, "model": "A", "slug": "a"}] + change_entry = SimpleNamespace(manufacturer_slug="cisco", model="A", slug="a") + report = SimpleNamespace(new_device_types=[change_entry], modified_device_types=[]) + + with ( + patch.object(sys, "argv", ["nb-dt-import.py", "--update", "--force-resolve-conflicts"]), + patch("nb_dt_import.DTLRepo") as MockRepo, + patch("nb_dt_import.NetBox") as MockNetBox, + patch("nb_dt_import.ChangeDetector") as MockDetector, + ): + mock_repo = _make_mock_repo(device_types=dt) + mock_repo.get_devices.return_value = (["file.yaml"], [{"slug": "cisco"}]) + MockRepo.return_value = mock_repo + mock_nb = _make_mock_netbox() + MockNetBox.return_value = mock_nb + MockDetector.return_value.detect_changes.return_value = report + + nb_dt_import.main() + + assert mock_nb.force_resolve_conflicts is True + def test_missing_env_var_triggers_system_exit(self, nb_dt_import): """A missing mandatory env var calls handle.exception which exits.""" with ( @@ -713,7 +760,7 @@ def test_modules_future_with_module_types_to_process(self, nb_dt_import): patch("nb_dt_import.ChangeDetector") as MockDetector, ): mock_nb = _make_mock_netbox(modules=True) - mock_nb.filter_actionable_module_types.return_value = ([module_type], []) + mock_nb.filter_actionable_module_types.return_value = ([module_type], {}, []) MockNetBox.return_value = mock_nb MockNetBox.filter_new_module_types.return_value = [] @@ -741,7 +788,7 @@ def test_modules_update_mode_logs_change_detection_section(self, nb_dt_import): patch("nb_dt_import.ChangeDetector") as MockDetector, ): mock_nb = _make_mock_netbox(modules=True) - mock_nb.filter_actionable_module_types.return_value = ([], []) + mock_nb.filter_actionable_module_types.return_value = ([], {}, []) MockNetBox.return_value = mock_nb MockNetBox.filter_new_module_types.return_value = [] MockRepo.return_value = _make_mock_repo() @@ -803,17 +850,23 @@ def _get_devices_se(path, vendors): nb_dt_import.main() def test_settings_netbox_features_modules_logs_module_count(self, nb_dt_import): - """When NETBOX_FEATURES['modules'] is True, module_added counter is logged.""" + """When netbox.modules is True, module_added/updated counters are logged.""" with ( patch.object(sys, "argv", ["nb-dt-import.py", "--only-new"]), patch("nb_dt_import.DTLRepo") as MockRepo, patch("nb_dt_import.NetBox") as MockNetBox, - patch.object(nb_dt_import.settings, "NETBOX_FEATURES", {"modules": True}), + patch("nb_dt_import.LogHandler") as MockLogHandler, ): MockRepo.return_value = _make_mock_repo() - MockNetBox.return_value = _make_mock_netbox() + mock_nb = _make_mock_netbox(modules=True) + MockNetBox.return_value = mock_nb - nb_dt_import.main() # should not raise + nb_dt_import.main() + + log_calls = [str(c) for c in MockLogHandler.return_value.log.call_args_list] + logged = " ".join(log_calls) + assert "3 modules created" in logged + assert "2 modules updated" in logged def test_progress_panel_tty_sets_console_and_pumps_preload(self, nb_dt_import): """With a TTY progress, set_console is called and pump_preload wired up.""" @@ -1015,3 +1068,192 @@ def test_entry_point_keyboard_interrupt_exits_130(self): runpy.run_path(_NB_DT_IMPORT_PATH, run_name="__main__") assert exc_info.value.code == 130 + + +# --------------------------------------------------------------------------- +# _process_module_types hints and counters (lines 554-559, 572, 574-575) +# --------------------------------------------------------------------------- + + +class TestProcessModuleTypesHints: + """Tests for pending-removal counters and hint log lines in _process_module_types.""" + + def _make_args(self, only_new=False, update=False, remove_components=False): + return SimpleNamespace( + only_new=only_new, + update=update, + remove_components=remove_components, + vendors=None, + slugs=None, + ) + + def test_pending_removal_counters_and_hints(self, nb_dt_import): + """changed_property_log with COMPONENT_REMOVED entries increments pending counters. + + Emits both --update and --remove-components hints when flags are absent. + """ + from core.change_detector import ChangeType + + comp_change = MagicMock() + comp_change.change_type = ChangeType.COMPONENT_REMOVED + changed_property_log = [("cisco", "CM1", [], [comp_change, comp_change])] + + handle = MagicMock() + mock_nb = MagicMock() + mock_nb.get_existing_module_types.return_value = {} + module_to_process = {"manufacturer": {"slug": "cisco"}, "model": "CM1"} + mock_nb.filter_actionable_module_types.return_value = ( + [module_to_process], + {}, + changed_property_log, + ) + mock_nb.count_module_type_images.return_value = 0 + + mock_repo = MagicMock() + mock_repo.get_devices.return_value = ([], []) + + nb_dt_import._process_module_types( + self._make_args(only_new=False, update=False, remove_components=False), + mock_nb, + mock_repo, + handle, + None, + set(), + ) + + logged = [call.args[0] for call in handle.log.call_args_list] + # --update hint should appear (module_changed_count > 0, update=False) + assert any("--update" in msg for msg in logged) + # Removal guidance must include --update and --remove-components in the same hint. + assert any("--update --remove-components" in msg for msg in logged) + # The hint must report the actual counts: 2 components across 1 module type. + assert any("2 stale component" in msg for msg in logged) + assert any("1 module type" in msg for msg in logged) + + +# --------------------------------------------------------------------------- +# _log_run_summary rack_types and duplicate_definitions (lines 687-688, 697-700) +# --------------------------------------------------------------------------- + + +class TestLogRunSummary: + """Tests for _log_run_summary rack_type and duplicate-definitions branches.""" + + def test_rack_types_counters_are_logged(self, nb_dt_import): + """When netbox.rack_types is True, rack_type_added/updated counters are logged.""" + from datetime import datetime + + handle = MagicMock() + mock_nb = MagicMock() + mock_nb.modules = False + mock_nb.rack_types = True + from collections import Counter + + mock_nb.counter = Counter( + { + "added": 0, + "properties_updated": 0, + "components_updated": 0, + "components_added": 0, + "components_removed": 0, + "images": 0, + "manufacturer": 0, + "rack_type_added": 3, + "rack_type_updated": 1, + } + ) + + nb_dt_import._log_run_summary(handle, mock_nb, datetime.now()) + + logged = [call.args[0] for call in handle.log.call_args_list] + assert any("3 rack types created" in msg for msg in logged) + assert any("1 rack types updated" in msg for msg in logged) + + def test_duplicate_definitions_are_logged(self, nb_dt_import): + """When dtl_repo has duplicate_definitions, each entry is logged with kept/ignored.""" + from datetime import datetime + + handle = MagicMock() + mock_nb = MagicMock() + mock_nb.modules = False + mock_nb.rack_types = False + from collections import Counter + + mock_nb.counter = Counter( + { + "added": 0, + "properties_updated": 0, + "components_updated": 0, + "components_added": 0, + "components_removed": 0, + "images": 0, + "manufacturer": 0, + } + ) + + mock_repo = MagicMock() + mock_repo.duplicate_definitions = [ + { + "manufacturer": "cisco", + "model": "X", + "kept": "a.yaml", + "ignored": ["b.yaml"], + } + ] + + nb_dt_import._log_run_summary(handle, mock_nb, datetime.now(), dtl_repo=mock_repo) + + logged = [call.args[0] for call in handle.log.call_args_list] + assert any("cisco" in msg for msg in logged) + assert any("a.yaml" in msg for msg in logged) + assert any("b.yaml" in msg for msg in logged) + + +# --------------------------------------------------------------------------- +# __main__ entry point: GraphQLError and NetBoxRequestError handlers (lines 876-890) +# --------------------------------------------------------------------------- + + +class TestEntryPointErrorHandlers: + """Tests for GraphQLError and NetBoxRequestError handlers in the __main__ block.""" + + def test_graphql_error_prints_message_and_exits_1(self, capsys): + """GraphQLError raised from main() becomes SystemExit(1) with stderr output.""" + from core.graphql_client import GraphQLError + + with ( + patch("core.repo.DTLRepo") as MockDTLRepo, + patch("core.netbox_api.NetBox"), + ): + MockDTLRepo.side_effect = GraphQLError("graphql failed") + + with patch.object(sys, "argv", ["nb-dt-import.py", "--only-new"]): + with pytest.raises(SystemExit) as exc_info: + runpy.run_path(_NB_DT_IMPORT_PATH, run_name="__main__") + + assert exc_info.value.code == 1 + assert "graphql failed" in capsys.readouterr().err + + def test_netbox_request_error_prints_message_and_exits_1(self, capsys): + """NetBoxRequestError raised from main() becomes SystemExit(1) with stderr output.""" + from unittest.mock import MagicMock + + import pynetbox.core.query + + mock_req = MagicMock() + mock_req.status_code = 400 + mock_req.url = "http://netbox/api/" + netbox_err = pynetbox.core.query.RequestError(mock_req) + + with ( + patch("core.repo.DTLRepo") as MockDTLRepo, + patch("core.netbox_api.NetBox"), + ): + MockDTLRepo.side_effect = netbox_err + + with patch.object(sys, "argv", ["nb-dt-import.py", "--only-new"]): + with pytest.raises(SystemExit) as exc_info: + runpy.run_path(_NB_DT_IMPORT_PATH, run_name="__main__") + + assert exc_info.value.code == 1 + assert "NetBox REST API request failed" in capsys.readouterr().err diff --git a/tests/test_netbox_api.py b/tests/test_netbox_api.py index 6051f26f..6912457d 100644 --- a/tests/test_netbox_api.py +++ b/tests/test_netbox_api.py @@ -335,6 +335,109 @@ def test_fetch_global_endpoint_records_uses_graphql( assert updates == [("interface_templates", 3)] +def test_fetch_global_endpoint_records_progress_emits_live_per_page( + mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types +): + """Progress callback must fire per page during the fetch, not in one batch at the end. + + Regression: an earlier implementation buffered per-attempt advances and only + flushed them after the fetch completed. For large endpoints (e.g. 100k+ + interfaces) the bar appeared frozen at 0 for the whole fetch, then jumped + to 100% at the end. + """ + from unittest.mock import patch as _patch + from core.graphql_client import DotDict + + mock_nb_api = mock_pynetbox.api.return_value + dt = make_device_types(nb_api=mock_nb_api) + + pages = [ + [DotDict({"id": "1", "name": "a", "device_type": {"id": "5"}, "module_type": None})], + [DotDict({"id": "2", "name": "b", "device_type": {"id": "5"}, "module_type": None})], + [DotDict({"id": "3", "name": "c", "device_type": {"id": "5"}, "module_type": None})], + ] + + advances_during_fetch = [] + + def fake_get(endpoint_name, on_page=None): + # Stream pages and verify that the consumer-facing callback was invoked + # before the next page is yielded. + all_records = [] + for idx, page in enumerate(pages, start=1): + all_records.extend(page) + if on_page is not None: + on_page(len(page)) + assert len(advances_during_fetch) == idx + return all_records + + def progress_cb(endpoint, advance): + advances_during_fetch.append((endpoint, advance)) + + with _patch.object(dt.graphql, "get_component_templates", side_effect=fake_get): + records = dt._fetch_global_endpoint_records( + "interface_templates", + progress_callback=progress_cb, + expected_total=3, + ) + + assert len(records) == 3 + # Live emission: one callback per page, not a single batched flush. + assert advances_during_fetch == [ + ("interface_templates", 1), + ("interface_templates", 1), + ("interface_templates", 1), + ] + + +def test_fetch_global_endpoint_records_emits_rewind_on_retry( + mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types +): + """When a count-mismatch triggers a retry, a negative-advance "rewind" must be emitted. + + Without the rewind, the next attempt's live advances would double-count on + top of the failed attempt's leftover ones. + """ + from unittest.mock import patch as _patch + from core.graphql_client import DotDict + + mock_nb_api = mock_pynetbox.api.return_value + dt = make_device_types(nb_api=mock_nb_api) + + iface1 = DotDict({"id": "1", "name": "a", "device_type": {"id": "5"}, "module_type": None}) + iface2 = DotDict({"id": "2", "name": "b", "device_type": {"id": "5"}, "module_type": None}) + + call_count = {"n": 0} + + def fake_get(endpoint_name, on_page=None): + call_count["n"] += 1 + if call_count["n"] == 1: + if on_page is not None: + on_page(1) + return [iface1] + if on_page is not None: + on_page(1) + on_page(1) + return [iface1, iface2] + + advances = [] + + def progress_cb(endpoint, advance): + advances.append(advance) + + with _patch("core.netbox_api.time.sleep"): + with _patch.object(dt.graphql, "get_component_templates", side_effect=fake_get): + records = dt._fetch_global_endpoint_records( + "interface_templates", + progress_callback=progress_cb, + expected_total=2, + ) + + assert len(records) == 2 + # Live: +1 (failed attempt page), -1 (rewind), +1 + +1 (successful retry pages). + assert advances == [1, -1, 1, 1] + assert sum(advances) == 2 + + def test_fetch_global_endpoint_records_progress_skipped_when_empty( mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types ): @@ -360,6 +463,294 @@ def test_fetch_global_endpoint_records_progress_skipped_when_empty( assert updates == [] +def test_fetch_global_endpoint_records_retries_and_aborts_on_count_mismatch( + mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types +): + """After _MAX_RETRIES mismatches a GraphQLCountMismatchError is raised; each attempt logs a warning.""" + from unittest.mock import patch as _patch + from core.graphql_client import GraphQLCountMismatchError + + mock_nb_api = mock_pynetbox.api.return_value + mock_graphql_requests.side_effect = _make_graphql_dispatch( + { + "device_type_list": {"data": {"device_type_list": []}}, + "interface_template_list": { + "data": { + "interface_template_list": [ + { + "id": "1", + "name": "xe-0/0/0", + "type": "10gbase-x-sfpp", + "label": "", + "mgmt_only": False, + "enabled": True, + "poe_mode": None, + "poe_type": None, + "device_type": {"id": "5"}, + "module_type": None, + } + ] + } + }, + } + ) + + dt = make_device_types(nb_api=mock_nb_api) + logged = [] + dt.handle.log = lambda msg: logged.append(msg) + + with _patch("core.netbox_api.time.sleep") as mock_sleep: + with pytest.raises(GraphQLCountMismatchError, match="interface_templates"): + dt._fetch_global_endpoint_records( + "interface_templates", + progress_callback=None, + expected_total=113259, + ) + + from core.netbox_api import _MAX_RETRIES, _RETRY_BACKOFF + + # One sleep per retry attempt + assert mock_sleep.call_count == _MAX_RETRIES + # Backoff durations must match the configured sequence — guards against a + # regression that silently changes the wait pattern. + assert [call.args[0] for call in mock_sleep.call_args_list] == list(_RETRY_BACKOFF[:_MAX_RETRIES]) + # A WARNING is logged for each retry + warnings = [m for m in logged if "WARNING" in m and "interface_templates" in m] + assert len(warnings) == _MAX_RETRIES + + +def test_fetch_global_endpoint_records_detects_mismatch_when_rest_returns_zero( + mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types +): + """A REST count of 0 must NOT silently skip validation when GraphQL returns records. + + Regression: previously ``if expected_total and ...`` treated ``0`` as "skip + validation", meaning a real REST count of 0 paired with a GraphQL response that + leaked records would go unnoticed. The check now uses ``is not None`` so 0 is a + legitimate expected value and any mismatch (including 0 vs N>0) is flagged. + """ + from unittest.mock import patch as _patch + from core.graphql_client import GraphQLCountMismatchError + + mock_nb_api = mock_pynetbox.api.return_value + mock_graphql_requests.side_effect = _make_graphql_dispatch( + { + "device_type_list": {"data": {"device_type_list": []}}, + "interface_template_list": { + "data": { + "interface_template_list": [ + { + "id": "1", + "name": "xe-0/0/0", + "type": "10gbase-x-sfpp", + "label": "", + "mgmt_only": False, + "enabled": True, + "poe_mode": None, + "poe_type": None, + "device_type": {"id": "5"}, + "module_type": None, + } + ] + } + }, + } + ) + + dt = make_device_types(nb_api=mock_nb_api) + + with _patch("core.netbox_api.time.sleep"): + with pytest.raises(GraphQLCountMismatchError, match="interface_templates"): + dt._fetch_global_endpoint_records( + "interface_templates", + progress_callback=None, + expected_total=0, + ) + + +def test_fetch_global_endpoint_records_succeeds_on_retry( + mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types +): + """When the first fetch is truncated but a retry returns the full count, the records are returned.""" + from unittest.mock import patch as _patch + from core.graphql_client import DotDict + + mock_nb_api = mock_pynetbox.api.return_value + dt = make_device_types(nb_api=mock_nb_api) + + iface1 = DotDict({"id": "1", "name": "xe-0/0/0", "device_type": {"id": "5"}, "module_type": None}) + iface2 = DotDict({"id": "2", "name": "xe-0/0/1", "device_type": {"id": "5"}, "module_type": None}) + + call_count = {"n": 0} + + def fake_get(endpoint_name, on_page=None): + call_count["n"] += 1 + if call_count["n"] == 1: + return [iface1] # truncated + return [iface1, iface2] # full on retry + + with _patch("core.netbox_api.time.sleep"): + with _patch.object(dt.graphql, "get_component_templates", side_effect=fake_get): + records = dt._fetch_global_endpoint_records( + "interface_templates", + progress_callback=None, + expected_total=2, + ) + + assert len(records) == 2 + assert call_count["n"] == 2 # initial attempt + 1 retry + + +def test_fetch_global_endpoint_records_progress_not_double_counted_on_retry( + mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types +): + """A mismatched-then-retried fetch must not double-advance the progress bar. + + If page advances were published during the failing attempt and again during + the successful retry, the progress callback would receive more advances than + the final expected total. Only the successful attempt should publish. + """ + from unittest.mock import patch as _patch + from core.graphql_client import DotDict + + mock_nb_api = mock_pynetbox.api.return_value + dt = make_device_types(nb_api=mock_nb_api) + + iface1 = DotDict({"id": "1", "name": "xe-0/0/0", "device_type": {"id": "5"}, "module_type": None}) + iface2 = DotDict({"id": "2", "name": "xe-0/0/1", "device_type": {"id": "5"}, "module_type": None}) + + call_count = {"n": 0} + + def fake_get(endpoint_name, on_page=None): + call_count["n"] += 1 + if call_count["n"] == 1: + # First attempt: emit one page advance, but return truncated list -> mismatch + if on_page is not None: + on_page(1) + return [iface1] + # Retry attempt: emit two page advances, return full list -> success + if on_page is not None: + on_page(1) + on_page(1) + return [iface1, iface2] + + advances = [] + + def progress_cb(endpoint, advance): + advances.append((endpoint, advance)) + + with _patch("core.netbox_api.time.sleep"): + with _patch.object(dt.graphql, "get_component_templates", side_effect=fake_get): + records = dt._fetch_global_endpoint_records( + "interface_templates", + progress_callback=progress_cb, + expected_total=2, + ) + + assert len(records) == 2 + assert call_count["n"] == 2 + # Total advances must equal expected_total (2), not 1 + 2 == 3. + total_advance = sum(n for _, n in advances) + assert total_advance == 2, f"progress callback double-counted retry advances: got {total_advance}, expected 2" + + +def test_fetch_global_endpoint_records_no_warning_on_count_match( + mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types +): + """No warning is logged when GraphQL returns the expected number of records.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_graphql_requests.side_effect = _make_graphql_dispatch( + { + "device_type_list": {"data": {"device_type_list": []}}, + "interface_template_list": { + "data": { + "interface_template_list": [ + { + "id": "1", + "name": "xe-0/0/0", + "type": "10gbase-x-sfpp", + "label": "", + "mgmt_only": False, + "enabled": True, + "poe_mode": None, + "poe_type": None, + "device_type": {"id": "5"}, + "module_type": None, + } + ] + } + }, + } + ) + + dt = make_device_types(nb_api=mock_nb_api) + logged = [] + dt.handle.log = lambda msg: logged.append(msg) + + records = dt._fetch_global_endpoint_records( + "interface_templates", + progress_callback=None, + expected_total=1, + ) + + assert len(records) == 1 + assert not any("WARNING" in m for m in logged) + + +def test_get_rest_component_count_returns_count( + mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types +): + """_get_rest_component_count returns the integer count from pynetbox.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.dcim.interface_templates.count.return_value = 42 + + dt = make_device_types(nb_api=mock_nb_api) + assert dt._get_rest_component_count("interface_templates") == 42 + + +def test_get_rest_component_count_returns_none_on_error( + mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types +): + """_get_rest_component_count returns None if the REST call fails.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.dcim.interface_templates.count.side_effect = Exception("connection failed") + + dt = make_device_types(nb_api=mock_nb_api) + assert dt._get_rest_component_count("interface_templates") is None + + +def test_get_endpoint_totals_fetches_rest_counts( + mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types +): + """_get_endpoint_totals fetches actual REST counts for graphql endpoints.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.dcim.interface_templates.count.return_value = 100 + mock_nb_api.dcim.power_port_templates.count.return_value = 50 + + dt = make_device_types(nb_api=mock_nb_api) + components = [("interface_templates", "Interfaces"), ("power_port_templates", "Power Ports")] + totals = dt._get_endpoint_totals(components) + + assert totals["interface_templates"] == 100 + assert totals["power_port_templates"] == 50 + + +def test_get_endpoint_totals_tolerates_count_failure( + mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types +): + """_get_endpoint_totals preserves None sentinel when a REST count fails.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.dcim.interface_templates.count.side_effect = Exception("timeout") + mock_nb_api.dcim.power_port_templates.count.return_value = 20 + + dt = make_device_types(nb_api=mock_nb_api) + components = [("interface_templates", "Interfaces"), ("power_port_templates", "Power Ports")] + totals = dt._get_endpoint_totals(components) + + assert totals["interface_templates"] is None + assert totals["power_port_templates"] == 20 + + def test_preload_always_global_caches_all_vendors( mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types ): @@ -452,6 +843,57 @@ def test_start_component_preload_global_job_can_be_consumed( assert preload_job["executor"] is None +def test_preload_tolerates_none_endpoint_totals(mock_settings, mock_pynetbox, graphql_client, make_device_types): + """``_preload_track_progress`` must not raise TypeError when a total is ``None``. + + Regression: ``_get_endpoint_totals`` now returns ``None`` for endpoints whose + REST count failed (preserving the "count unavailable" sentinel). Internal + ``max(endpoint_totals.get(name, 0), ...)`` calls would raise ``TypeError`` on + ``None`` because ``dict.get`` returns the stored ``None`` instead of the default. + The fix uses ``endpoint_totals.get(name) or 0`` to coerce ``None`` to ``0`` for + the ``max()`` comparison while still letting ``None`` flow through as + ``expected_total`` to ``_fetch_global_endpoint_records``. + """ + from concurrent.futures import Future + + dt = make_device_types(nb_api=mock_pynetbox.api.return_value) + + components = [("interface_templates", "Interfaces")] + fut = Future() + fut.set_result([]) + futures = {"interface_templates": fut} + + progress = MagicMock() + task_ids = {"interface_templates": "task-1"} + endpoint_totals = {"interface_templates": None} + + import queue + + progress_updates = queue.Queue() + # Exercise BOTH branches: the already-finished branch (where the + # endpoint_totals.get(name) or 0 fallback runs) and the pending-future + # branch (covered by another test). Marking interface_templates finished + # routes through the already_done code path with a None total. + preload_job = {"finished_endpoints": {"interface_templates"}} + + # Must not raise TypeError on max(None, ...). + result = dt._preload_track_progress( + components, + futures, + progress, + task_ids, + preload_job, + progress_updates, + endpoint_totals, + ) + + assert result == {"interface_templates": []} + # The already_done branch must update the task to completed==total. + # With an empty result list and a None REST count, final_total = max(0, 0, 1) = 1 + # (the max(..., 1) floor prevents a 0/0 bar in the Rich progress UI). + progress.update.assert_called_once_with("task-1", total=1, completed=1) + + def test_upload_images_success_logs_verbose_only( mock_settings, mock_pynetbox, graphql_client, make_device_types, tmp_path ): @@ -516,10 +958,12 @@ def test_filter_actionable_module_types_skips_unchanged_existing_module( existing_interface = MagicMock() existing_interface.name = "xe-0/0/0" - existing_interface.module_type.id = 42 - mock_nb_api.dcim.interface_templates.filter.return_value = [existing_interface] nb = NetBox(mock_settings, mock_settings.handle) + # Simulate the global GraphQL preload having already populated the cache for module 42. + nb.device_types._global_preload_done = True + nb.device_types.cached_components["interface_templates"] = {("module", 42): {"xe-0/0/0": existing_interface}} + module_types = [ { "manufacturer": {"slug": "juniper"}, @@ -531,7 +975,7 @@ def test_filter_actionable_module_types_skips_unchanged_existing_module( ] with patch("glob.glob", return_value=[]): - actionable, _ = nb.filter_actionable_module_types( + actionable, _, _ = nb.filter_actionable_module_types( module_types, nb.get_existing_module_types(), only_new=False, @@ -561,9 +1005,11 @@ def test_filter_actionable_module_types_includes_module_with_missing_component( } ) - mock_nb_api.dcim.interface_templates.filter.return_value = [] - nb = NetBox(mock_settings, mock_settings.handle) + # Global preload done; module 42 has no interfaces cached → component is missing. + nb.device_types._global_preload_done = True + nb.device_types.cached_components["interface_templates"] = {("module", 42): {}} + module_type = { "manufacturer": {"slug": "juniper"}, "model": "Linecard 1", @@ -573,7 +1019,7 @@ def test_filter_actionable_module_types_includes_module_with_missing_component( } with patch("glob.glob", return_value=[]): - actionable, _ = nb.filter_actionable_module_types( + actionable, _, _ = nb.filter_actionable_module_types( [module_type], nb.get_existing_module_types(), only_new=False, @@ -968,11 +1414,11 @@ def test_legacy_path_uses_rear_port_position_scalar(self): {"rear_port_name": None, "front_port_position": 1, "rear_port_position": 3} ] - def test_legacy_path_no_rear_port_position_gives_empty_canonical(self): - """NetBox < 4.5 with no rear_port_position → empty canonical.""" + def test_legacy_path_no_rear_port_position_gives_none_canonical(self): + """NetBox < 4.5 with no rear_port_position → None sentinel (fields unavailable, skip comparison).""" record = MagicMock(spec=[]) wrapped = _FrontPortRecordWithMappings(record) - assert wrapped._mappings_canonical == [] + assert wrapped._mappings_canonical is None def test_delegates_unknown_attr_to_record(self): """Attribute access falls through to the underlying record.""" @@ -2217,7 +2663,12 @@ def test_update_applies_property_changes(self, mock_settings, mock_pynetbox, gra def test_update_property_change_request_error_logged( self, mock_settings, mock_pynetbox, graphql_client, make_device_types ): - """RequestError during property update is caught and logged.""" + """RequestError during property update is caught and logged. + + Regression: when the property PATCH fails AND there are no component + changes, ``device_types_failed`` must be incremented and the misleading + "Device Type Updated" log MUST NOT be emitted. + """ import pynetbox as real_pynb2 from core.change_detector import ChangeReport, DeviceTypeChange, PropertyChange @@ -2253,68 +2704,429 @@ def test_update_property_change_request_error_logged( "src": "/tmp/device-types/cisco/testswitch.yaml", } mock_settings.handle.log.reset_mock() + mock_settings.handle.verbose_log.reset_mock() nb.create_device_types([device_type], update=True, change_report=report) + # Error path was logged. mock_settings.handle.log.assert_called() - - def test_update_applies_component_changes(self, mock_settings, mock_pynetbox, graphql_client, make_device_types): - """update=True with component_changes calls update_components (and optionally remove_components).""" - from core.change_detector import ( - ChangeReport, - ChangeType, - ComponentChange, - DeviceTypeChange, + # Failure surfaced via dedicated counter so summary can show it. + assert nb.counter["device_types_failed"] == 1 + # Counters that imply a successful PATCH must NOT be bumped. + assert nb.counter["properties_updated"] == 0 + # "Device Type Updated" is misleading when nothing was applied — must + # NOT appear on either the verbose or non-verbose log. + all_calls = [c.args[0] for c in mock_settings.handle.log.call_args_list] + all_calls += [c.args[0] for c in mock_settings.handle.verbose_log.call_args_list] + assert not any("Device Type Updated" in msg for msg in all_calls), ( + f"Misleading 'Device Type Updated' log emitted after failure: {all_calls}" ) + # The failure log must surface the model so operators can find it. + assert any("Device Type Update Failed" in msg and "TestSwitch" in msg for msg in all_calls) + # Outcome.FAILED must also be recorded in the registry so the end-of-run report reflects it. + from core.outcomes import EntityKind, Outcome + + failures = nb.outcomes.failures() + assert len(failures) == 1 + assert failures[0].kind == EntityKind.DEVICE_TYPE + assert failures[0].outcome == Outcome.FAILED + assert "TestSwitch" in failures[0].identity + + def _build_subdevice_role_flip_setup(self, mock_settings, mock_pynetbox, make_device_types, *, force=False): + """Shared scaffolding for force-resolve-conflicts integration tests. + + Returns ``(nb, dt, mock_nb_api, change, device_type, blocking_template, error)`` + where the device-type update is wired to fail with the parent->child + device-bay constraint and the resolver query path is pre-stubbed to + report zero dependent devices and one blocking device-bay template. + """ + import pynetbox as real_pynb2 + from core.change_detector import ChangeReport, DeviceTypeChange, PropertyChange + mock_pynetbox.RequestError = real_pynb2.RequestError mock_nb_api = mock_pynetbox.api.return_value dt = make_device_types(nb_api=mock_nb_api) - dt.update_components = MagicMock() - dt.remove_components = MagicMock() existing_dt = MagicMock() - existing_dt.id = 1 - existing_dt.model = "TestSwitch" - existing_dt.manufacturer.name = "Cisco" - dt.existing_device_types = {("cisco", "TestSwitch"): existing_dt} + existing_dt.id = 99 + existing_dt.model = "SuperServer" + existing_dt.manufacturer.name = "Supermicro" + dt.existing_device_types = {("supermicro", "SuperServer"): existing_dt} dt.existing_device_types_by_slug = {} nb = NetBox(mock_settings, mock_settings.handle) nb.device_types = dt + nb.force_resolve_conflicts = force + + # Pre-stub resolver-side queries: zero dependent devices, one blocking template. + mock_nb_api.dcim.devices.filter.return_value = [] + mock_nb_api.dcim.devices.count.return_value = 0 + blocking_template = MagicMock() + blocking_template.name = "module-bay-1" + mock_nb_api.dcim.device_bay_templates.filter.return_value = [blocking_template] + + # Build a real RequestError carrying the constraint message. + constraint_body = ( + b'{"subdevice_role": ["Must delete all device bay templates associated with ' + b'this device before declassifying it as a parent device."]}' + ) + err = real_pynb2.RequestError(MagicMock(status_code=400, content=constraint_body)) + # Force the .error property to return the parsed dict (pynetbox usually does this). + err.error = { + "subdevice_role": [ + "Must delete all device bay templates associated with this device " + "before declassifying it as a parent device." + ] + } change = DeviceTypeChange( - manufacturer_slug="cisco", - model="TestSwitch", - slug="testswitch", - component_changes=[ComponentChange("interfaces", "eth0", ChangeType.COMPONENT_ADDED)], + manufacturer_slug="supermicro", + model="SuperServer", + slug="superserver", + property_changes=[PropertyChange("subdevice_role", "parent", "child")], ) report = ChangeReport(modified_device_types=[change]) device_type = { - "manufacturer": {"slug": "cisco"}, - "model": "TestSwitch", - "slug": "testswitch", - "src": "/tmp/device-types/cisco/testswitch.yaml", + "manufacturer": {"slug": "supermicro"}, + "model": "SuperServer", + "slug": "superserver", + "src": "/tmp/device-types/supermicro/superserver.yaml", } - nb.create_device_types([device_type], update=True, change_report=report, remove_components=True) - dt.update_components.assert_called() - dt.remove_components.assert_called() + return nb, dt, mock_nb_api, report, device_type, blocking_template, err - def test_update_verbose_log_when_change_applied( + def test_constraint_failure_logs_hint_when_flag_off( self, mock_settings, mock_pynetbox, graphql_client, make_device_types ): - """verbose_log with 'Device Type Updated' when dt_change is not None.""" - from core.change_detector import ChangeReport, DeviceTypeChange, PropertyChange + """Without --force-resolve-conflicts, classifier hint is logged but no auto-resolve runs.""" + nb, dt, mock_nb_api, report, device_type, blocking_template, err = self._build_subdevice_role_flip_setup( + mock_settings, mock_pynetbox, make_device_types, force=False + ) + mock_nb_api.dcim.device_types.update.side_effect = err - mock_nb_api = mock_pynetbox.api.return_value - dt = make_device_types(nb_api=mock_nb_api) + mock_settings.handle.log.reset_mock() + nb.create_device_types([device_type], update=True, change_report=report) - existing_dt = MagicMock() - existing_dt.id = 1 - existing_dt.model = "TestSwitch" - existing_dt.manufacturer.name = "Cisco" - dt.existing_device_types = {("cisco", "TestSwitch"): existing_dt} - dt.existing_device_types_by_slug = {} + # Auto-resolve must NOT have run (no template delete attempted). + blocking_template.delete.assert_not_called() + # PATCH must have been attempted exactly once (no retry). + mock_nb_api.dcim.device_types.update.assert_called_once() + # Failure counter bumped, success counter not. + assert nb.counter["device_types_failed"] == 1 + assert nb.counter["properties_updated"] == 0 + # Hint mentions the flag and the blocking template. + all_logs = [c.args[0] for c in mock_settings.handle.log.call_args_list] + assert any("--force-resolve-conflicts" in m for m in all_logs), all_logs + assert any("module-bay-1" in m for m in all_logs), all_logs + # Failure recorded into the OutcomeRegistry with structured context. + from core.outcomes import EntityKind, Outcome + + failures = nb.outcomes.failures() + assert len(failures) == 1 + assert failures[0].kind == EntityKind.DEVICE_TYPE + assert failures[0].outcome == Outcome.FAILED + assert "SuperServer" in failures[0].identity + assert "module-bay-1" in failures[0].blocking_objects + assert failures[0].hint and "--force-resolve-conflicts" in failures[0].hint + + def test_constraint_failure_auto_resolves_when_flag_on_and_safe( + self, mock_settings, mock_pynetbox, graphql_client, make_device_types + ): + """With flag on + zero dependents, blocking templates are deleted and PATCH retried.""" + nb, dt, mock_nb_api, report, device_type, blocking_template, err = self._build_subdevice_role_flip_setup( + mock_settings, mock_pynetbox, make_device_types, force=True + ) + # First update call fails; second (after auto-resolve) succeeds. + mock_nb_api.dcim.device_types.update.side_effect = [err, MagicMock()] - nb = NetBox(mock_settings, mock_settings.handle) + nb.create_device_types([device_type], update=True, change_report=report) + + blocking_template.delete.assert_called_once() + assert mock_nb_api.dcim.device_types.update.call_count == 2 + assert nb.counter["properties_updated"] == 1 + assert nb.counter["device_types_failed"] == 0 + # Successful retry path must NOT record a failure into the registry. + assert nb.outcomes.failures() == [] + + def test_constraint_failure_auto_resolve_retry_still_fails( + self, mock_settings, mock_pynetbox, graphql_client, make_device_types + ): + """If the retried PATCH after auto-resolve still fails, count it as a failure exactly once.""" + import pynetbox as real_pynb2 + + nb, dt, mock_nb_api, report, device_type, blocking_template, err = self._build_subdevice_role_flip_setup( + mock_settings, mock_pynetbox, make_device_types, force=True + ) + err2 = real_pynb2.RequestError(MagicMock(status_code=500, content=b'{"detail":"still bad"}')) + mock_nb_api.dcim.device_types.update.side_effect = [err, err2] + + nb.create_device_types([device_type], update=True, change_report=report) + + blocking_template.delete.assert_called_once() + assert mock_nb_api.dcim.device_types.update.call_count == 2 + assert nb.counter["properties_updated"] == 0 + assert nb.counter["device_types_failed"] == 1 + # The failed retry must surface in the structured failure report exactly once. + failures = nb.outcomes.failures() + assert len(failures) == 1 + assert "SuperServer" in failures[0].identity + + def test_constraint_failure_blocked_when_devices_in_use( + self, mock_settings, mock_pynetbox, graphql_client, make_device_types + ): + """With flag on but live devices reference the type, no remediation runs (safety gate).""" + nb, dt, mock_nb_api, report, device_type, blocking_template, err = self._build_subdevice_role_flip_setup( + mock_settings, mock_pynetbox, make_device_types, force=True + ) + live_device = MagicMock() + live_device.name = "router-1" + mock_nb_api.dcim.devices.filter.return_value = [live_device] + mock_nb_api.dcim.devices.count.return_value = 1 + mock_nb_api.dcim.device_types.update.side_effect = err + + mock_settings.handle.log.reset_mock() + nb.create_device_types([device_type], update=True, change_report=report) + + # Safety gate honoured: no delete, no retry. + blocking_template.delete.assert_not_called() + mock_nb_api.dcim.device_types.update.assert_called_once() + assert nb.counter["device_types_failed"] == 1 + all_logs = [c.args[0] for c in mock_settings.handle.log.call_args_list] + assert any("router-1" in m for m in all_logs), all_logs + + def test_update_applies_component_changes(self, mock_settings, mock_pynetbox, graphql_client, make_device_types): + """update=True with component_changes calls update_components (and optionally remove_components).""" + from core.change_detector import ( + ChangeReport, + ChangeType, + ComponentChange, + DeviceTypeChange, + ) + + mock_nb_api = mock_pynetbox.api.return_value + dt = make_device_types(nb_api=mock_nb_api) + dt.remove_components = MagicMock() + + existing_dt = MagicMock() + existing_dt.id = 1 + existing_dt.model = "TestSwitch" + existing_dt.manufacturer.name = "Cisco" + dt.existing_device_types = {("cisco", "TestSwitch"): existing_dt} + dt.existing_device_types_by_slug = {} + + nb = NetBox(mock_settings, mock_settings.handle) + nb.device_types = dt + + def fake_update_components(*args, **kwargs): + nb.counter.update({"components_added": 1}) + + dt.update_components = MagicMock(side_effect=fake_update_components) + + change = DeviceTypeChange( + manufacturer_slug="cisco", + model="TestSwitch", + slug="testswitch", + component_changes=[ComponentChange("interfaces", "eth0", ChangeType.COMPONENT_ADDED)], + ) + report = ChangeReport(modified_device_types=[change]) + + device_type = { + "manufacturer": {"slug": "cisco"}, + "model": "TestSwitch", + "slug": "testswitch", + "src": "/tmp/device-types/cisco/testswitch.yaml", + } + nb.create_device_types([device_type], update=True, change_report=report, remove_components=True) + dt.update_components.assert_called() + assert nb.counter["device_types_component_updates"] == 1 + assert nb.counter.get("properties_updated", 0) == 0 + + def test_component_changes_all_fail_increments_device_types_failed( + self, mock_settings, mock_pynetbox, graphql_client, make_device_types + ): + """When component API calls are issued but all fail, device_types_failed must be incremented. + + Regression: the old code set component_attempted by comparing counters + before/after the API calls, so a total component failure was silently + swallowed (counters didn't move → component_attempted=False → CACHED). + """ + from core.change_detector import ( + ChangeReport, + ChangeType, + ComponentChange, + DeviceTypeChange, + ) + from core.outcomes import EntityKind, Outcome + + mock_nb_api = mock_pynetbox.api.return_value + dt = make_device_types(nb_api=mock_nb_api) + + existing_dt = MagicMock() + existing_dt.id = 7 + existing_dt.model = "FailSwitch" + existing_dt.manufacturer.name = "Acme" + dt.existing_device_types = {("acme", "FailSwitch"): existing_dt} + dt.existing_device_types_by_slug = {} + + nb = NetBox(mock_settings, mock_settings.handle) + nb.device_types = dt + + # update_components is called but intentionally moves no counters + # (simulates every per-component API call failing internally). + dt.update_components = MagicMock() + + change = DeviceTypeChange( + manufacturer_slug="acme", + model="FailSwitch", + slug="failswitch", + component_changes=[ComponentChange("interfaces", "eth0", ChangeType.COMPONENT_ADDED)], + ) + report = ChangeReport(modified_device_types=[change]) + device_type = { + "manufacturer": {"slug": "acme"}, + "model": "FailSwitch", + "slug": "failswitch", + "src": "/tmp/device-types/acme/failswitch.yaml", + } + nb.create_device_types([device_type], update=True, change_report=report) + + dt.update_components.assert_called_once() + assert nb.counter["device_types_failed"] == 1 + assert nb.counter.get("device_types_component_updates", 0) == 0 + failures = nb.outcomes.failures() + assert len(failures) == 1 + assert failures[0].kind == EntityKind.DEVICE_TYPE + assert failures[0].outcome == Outcome.FAILED + assert "FailSwitch" in failures[0].identity + + def test_component_changes_partial_success_records_partial_outcome( + self, mock_settings, mock_pynetbox, graphql_client, make_device_types + ): + """When only some component API calls succeed, a PARTIAL outcome must be recorded. + + Regression: before the delta/count comparison, any non-zero counter + movement was treated as full success regardless of how many changes + were attempted. + """ + from core.change_detector import ( + ChangeReport, + ChangeType, + ComponentChange, + DeviceTypeChange, + ) + from core.outcomes import EntityKind, Outcome + + mock_nb_api = mock_pynetbox.api.return_value + dt = make_device_types(nb_api=mock_nb_api) + + existing_dt = MagicMock() + existing_dt.id = 9 + existing_dt.model = "PartialSwitch" + existing_dt.manufacturer.name = "Acme" + dt.existing_device_types = {("acme", "PartialSwitch"): existing_dt} + dt.existing_device_types_by_slug = {} + + nb = NetBox(mock_settings, mock_settings.handle) + nb.device_types = dt + + # Simulate 1 of 3 component additions succeeding: the mock increments + # components_added by 1, leaving actionable_count=3 vs delta=1. + def partial_update(*args, **kwargs): + nb.counter.update({"components_added": 1}) + + dt.update_components = MagicMock(side_effect=partial_update) + + change = DeviceTypeChange( + manufacturer_slug="acme", + model="PartialSwitch", + slug="partialswitch", + component_changes=[ + ComponentChange("interfaces", "eth0", ChangeType.COMPONENT_ADDED), + ComponentChange("interfaces", "eth1", ChangeType.COMPONENT_ADDED), + ComponentChange("interfaces", "eth2", ChangeType.COMPONENT_ADDED), + ], + ) + report = ChangeReport(modified_device_types=[change]) + device_type = { + "manufacturer": {"slug": "acme"}, + "model": "PartialSwitch", + "slug": "partialswitch", + "src": "/tmp/device-types/acme/partialswitch.yaml", + } + nb.create_device_types([device_type], update=True, change_report=report) + + dt.update_components.assert_called_once() + assert nb.counter["device_types_failed"] == 0 + assert nb.counter.get("device_types_component_updates", 0) == 1 + partials = nb.outcomes.partials() + assert len(partials) == 1 + assert partials[0].kind == EntityKind.DEVICE_TYPE + assert partials[0].outcome == Outcome.PARTIAL + assert "PartialSwitch" in partials[0].identity + assert "1 of 3" in (partials[0].reason or "") + + def test_component_only_update_does_not_count_as_property_update( + self, mock_settings, mock_pynetbox, graphql_client, make_device_types + ): + """Component-only change must NOT increment properties_updated.""" + from core.change_detector import ( + ChangeReport, + ChangeType, + ComponentChange, + DeviceTypeChange, + ) + + mock_nb_api = mock_pynetbox.api.return_value + dt = make_device_types(nb_api=mock_nb_api) + dt.remove_components = MagicMock() + + existing_dt = MagicMock() + existing_dt.id = 42 + existing_dt.model = "MySwitch" + existing_dt.manufacturer.name = "Acme" + dt.existing_device_types = {("acme", "MySwitch"): existing_dt} + dt.existing_device_types_by_slug = {} + + nb = NetBox(mock_settings, mock_settings.handle) + nb.device_types = dt + + def fake_update_components(*args, **kwargs): + nb.counter.update({"components_added": 1}) + + dt.update_components = MagicMock(side_effect=fake_update_components) + + change = DeviceTypeChange( + manufacturer_slug="acme", + model="MySwitch", + slug="myswitch", + component_changes=[ComponentChange("interfaces", "eth0", ChangeType.COMPONENT_ADDED)], + ) + report = ChangeReport(modified_device_types=[change]) + device_type = { + "manufacturer": {"slug": "acme"}, + "model": "MySwitch", + "slug": "myswitch", + "src": "/tmp/device-types/acme/myswitch.yaml", + } + nb.create_device_types([device_type], update=True, change_report=report) + assert nb.counter.get("properties_updated", 0) == 0 + assert nb.counter["device_types_component_updates"] == 1 + + def test_update_verbose_log_when_change_applied( + self, mock_settings, mock_pynetbox, graphql_client, make_device_types + ): + """verbose_log with 'Device Type Updated' when dt_change is not None.""" + from core.change_detector import ChangeReport, DeviceTypeChange, PropertyChange + + mock_nb_api = mock_pynetbox.api.return_value + dt = make_device_types(nb_api=mock_nb_api) + + existing_dt = MagicMock() + existing_dt.id = 1 + existing_dt.model = "TestSwitch" + existing_dt.manufacturer.name = "Cisco" + dt.existing_device_types = {("cisco", "TestSwitch"): existing_dt} + dt.existing_device_types_by_slug = {} + + nb = NetBox(mock_settings, mock_settings.handle) nb.device_types = dt mock_settings.handle.verbose_log.reset_mock() @@ -2408,7 +3220,7 @@ def test_request_error_logs_and_continues(self, mock_settings, mock_pynetbox, gr def test_creates_all_component_types(self, mock_settings, mock_pynetbox, graphql_client, make_device_types): """All component type branches are called. - Covers power-port alias, console-ports, power-outlets, + Covers power-ports, console-ports, power-outlets, console-server-ports, rear-ports, front-ports, device-bays, and module-bays. """ @@ -2443,7 +3255,7 @@ def test_creates_all_component_types(self, mock_settings, mock_pynetbox, graphql "manufacturer": {"slug": "cisco"}, "model": "TestSwitch", "slug": "testswitch", - "power-port": [{"name": "PSU1"}], + "power-ports": [{"name": "PSU1"}], "console-ports": [{"name": "Con1"}], "power-outlets": [{"name": "PO1"}], "console-server-ports": [{"name": "CSP1"}], @@ -2515,7 +3327,7 @@ def test_empty_module_types_returns_empty(self, mock_settings, mock_pynetbox, mo """Empty module_types list returns [], {} immediately.""" mock_pynetbox.api.return_value.version = "3.5" nb = NetBox(mock_settings, mock_settings.handle) - result, images = nb.filter_actionable_module_types([], {}, only_new=False) + result, images, _ = nb.filter_actionable_module_types([], {}, only_new=False) assert result == [] assert images == {} @@ -2529,7 +3341,7 @@ def test_only_new_delegates_to_filter_new(self, mock_settings, mock_pynetbox, mo {"manufacturer": {"slug": "cisco"}, "model": "LC"}, {"manufacturer": {"slug": "cisco"}, "model": "NEW"}, ] - result, images = nb.filter_actionable_module_types(module_types, all_mts, only_new=True) + result, images, _ = nb.filter_actionable_module_types(module_types, all_mts, only_new=True) assert len(result) == 1 assert result[0]["model"] == "NEW" assert images == {} @@ -2552,7 +3364,7 @@ def test_new_module_type_added_to_actionable(self, mock_settings, mock_pynetbox, "src": "/repo/module-types/cisco/new.yaml", } with patch("glob.glob", return_value=[]): - result, _ = nb.filter_actionable_module_types([module_type], {}, only_new=False) + result, _, _ = nb.filter_actionable_module_types([module_type], {}, only_new=False) assert result == [module_type] def test_existing_module_with_new_image_is_actionable( @@ -2569,6 +3381,7 @@ def test_existing_module_with_new_image_is_actionable( } ) nb = NetBox(mock_settings, mock_settings.handle) + nb.device_types._global_preload_done = True # isolate from preload side-effects existing_mt = MagicMock() existing_mt.id = 42 @@ -2596,14 +3409,207 @@ def test_existing_module_with_new_image_is_actionable( "core.netbox_api.NetBox._discover_module_image_files", return_value=[str(img)], ): - result, _ = nb.filter_actionable_module_types([module_type], all_mts, only_new=False) + result, existing_images_map, _ = nb.filter_actionable_module_types( + [module_type], all_mts, only_new=False + ) assert result == [module_type] + # Verify the upload worklist is propagated so create_module_types can upload the image. + assert existing_images_map == {42: set()} + def test_existing_module_with_changed_property_is_actionable( + self, mock_settings, mock_pynetbox, mock_graphql_requests + ): + """Existing module type with a changed scalar property (e.g. part_number) is actionable.""" + from core.graphql_client import DotDict -# --------------------------------------------------------------------------- -# create_module_types: existing module verbose_log + RequestError + only_new -# + component branches (lines 528, 531, 565, 579, 583, 589) -# --------------------------------------------------------------------------- + mock_pynetbox.api.return_value.version = "3.5" + mock_graphql_requests.side_effect = paginate_dispatch( + { + "manufacturer_list": [], + "device_type_list": [], + "module_type_list": [], + "image_attachment_list": [], + } + ) + nb = NetBox(mock_settings, mock_settings.handle) + # Skip the GraphQL preload — these tests isolate scalar/image diffs. + nb.device_types._global_preload_done = True + + existing_mt = DotDict( + {"id": 42, "model": "IOM-s-3.0T", "part_number": "OLD_PN", "manufacturer": {"slug": "nokia"}} + ) + all_mts = {"nokia": {"IOM-s-3.0T": existing_mt}} + + module_type = { + "manufacturer": {"slug": "nokia"}, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "src": "/tmp/repo/module-types/Nokia/IOM-s-3.0T.yaml", + } + + with patch("glob.glob", return_value=[]): + with patch.object(nb, "_fetch_module_type_existing_images", return_value={}): + actionable, _, changed_property_log = nb.filter_actionable_module_types( + [module_type], + all_mts, + only_new=False, + ) + + assert actionable == [module_type] + # Verify the scalar diff is captured for the modified-summary log path. + assert len(changed_property_log) == 1 + mfr_slug, model, fields_info, _ = changed_property_log[0] + assert mfr_slug == "nokia" + assert model == "IOM-s-3.0T" + assert any(name == "part_number" and old == "OLD_PN" and new == "3HE16474AA" for name, old, new in fields_info) + + def test_existing_module_with_missing_image_and_property_change_logs_both( + self, mock_settings, mock_pynetbox, mock_graphql_requests, tmp_path + ): + """Mixed image + property change: image change must NOT short-circuit property/component diff. + + Regression: an early ``continue`` after detecting a missing image meant scalar/component + comparison was skipped; the module ended up actionable but absent from + ``changed_property_log``, so ``log_module_type_changes`` never showed the property diff + and the "Modified module types" count was wrong. + """ + from core.graphql_client import DotDict + + mock_pynetbox.api.return_value.version = "3.5" + mock_graphql_requests.side_effect = paginate_dispatch( + { + "manufacturer_list": [], + "device_type_list": [], + "module_type_list": [], + "image_attachment_list": [], + } + ) + nb = NetBox(mock_settings, mock_settings.handle) + # Skip the GraphQL preload — these tests isolate scalar/image diffs. + nb.device_types._global_preload_done = True + + existing_mt = DotDict( + {"id": 42, "model": "IOM-s-3.0T", "part_number": "OLD_PN", "manufacturer": {"slug": "nokia"}} + ) + all_mts = {"nokia": {"IOM-s-3.0T": existing_mt}} + + module_type = { + "manufacturer": {"slug": "nokia"}, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "src": "/tmp/repo/module-types/Nokia/IOM-s-3.0T.yaml", + } + + # NetBox has no image attachments for this module → missing image triggers image_changed. + with patch.object(nb, "_discover_module_image_files", return_value=["/tmp/IOM-s-3.0T.front.png"]): + with patch.object(nb, "_fetch_module_type_existing_images", return_value={42: set()}): + actionable, existing_images_map, changed_property_log = nb.filter_actionable_module_types( + [module_type], + all_mts, + only_new=False, + ) + + assert actionable == [module_type] + # Existing-images map drives the upload diff in create_module_types — assert it + # records the empty set for module 42 so a regression that drops the second + # return value is caught. + assert existing_images_map == {42: set()} + # Property diff MUST still be captured even though images also changed. + assert len(changed_property_log) == 1 + mfr_slug, model, fields_info, _ = changed_property_log[0] + assert mfr_slug == "nokia" + assert model == "IOM-s-3.0T" + assert any(f == "part_number" for f, _, _ in fields_info) + + def test_existing_module_with_only_missing_image_is_actionable_but_not_logged( + self, mock_settings, mock_pynetbox, mock_graphql_requests + ): + """Image-only change: actionable (so the image is uploaded) but NOT in changed_property_log. + + Image-only updates are handled in default mode and must not appear as "Modified + module types" or trigger the misleading ``--update`` hint. + """ + from core.graphql_client import DotDict + + mock_pynetbox.api.return_value.version = "3.5" + mock_graphql_requests.side_effect = paginate_dispatch( + { + "manufacturer_list": [], + "device_type_list": [], + "module_type_list": [], + "image_attachment_list": [], + } + ) + nb = NetBox(mock_settings, mock_settings.handle) + # Skip the GraphQL preload — these tests isolate scalar/image diffs. + nb.device_types._global_preload_done = True + + existing_mt = DotDict( + {"id": 42, "model": "IOM-s-3.0T", "part_number": "3HE16474AA", "manufacturer": {"slug": "nokia"}} + ) + all_mts = {"nokia": {"IOM-s-3.0T": existing_mt}} + + module_type = { + "manufacturer": {"slug": "nokia"}, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "src": "/tmp/repo/module-types/Nokia/IOM-s-3.0T.yaml", + } + + with patch.object(nb, "_discover_module_image_files", return_value=["/tmp/IOM-s-3.0T.front.png"]): + with patch.object(nb, "_fetch_module_type_existing_images", return_value={42: set()}): + actionable, existing_images_map, changed_property_log = nb.filter_actionable_module_types( + [module_type], + all_mts, + only_new=False, + ) + + assert actionable == [module_type] + # Image-only change must surface the existing-images map (drives upload diff) + # without polluting the property log. + assert existing_images_map == {42: set()} + assert changed_property_log == [] + + def test_existing_module_with_unchanged_property_is_not_actionable( + self, mock_settings, mock_pynetbox, mock_graphql_requests + ): + """Existing module type whose properties all match NetBox is not actionable.""" + from core.graphql_client import DotDict + + mock_pynetbox.api.return_value.version = "3.5" + mock_graphql_requests.side_effect = paginate_dispatch( + { + "manufacturer_list": [], + "device_type_list": [], + "module_type_list": [], + "image_attachment_list": [], + } + ) + nb = NetBox(mock_settings, mock_settings.handle) + # Skip the GraphQL preload — these tests isolate scalar/image diffs. + nb.device_types._global_preload_done = True + + existing_mt = DotDict( + {"id": 42, "model": "IOM-s-3.0T", "part_number": "3HE16474AA", "manufacturer": {"slug": "nokia"}} + ) + all_mts = {"nokia": {"IOM-s-3.0T": existing_mt}} + + module_type = { + "manufacturer": {"slug": "nokia"}, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "src": "/tmp/repo/module-types/Nokia/IOM-s-3.0T.yaml", + } + + with patch("glob.glob", return_value=[]): + with patch.object(nb, "_fetch_module_type_existing_images", return_value={}): + actionable, _, _ = nb.filter_actionable_module_types( + [module_type], + all_mts, + only_new=False, + ) + + assert actionable == [] class TestCreateModuleTypesEdge: @@ -2687,19 +3693,367 @@ def test_creates_module_type_with_power_outlets_console_server_ports_front_ports "front-ports": [{"name": "FP1"}], "src": "/repo/module-types/cisco/lc.yaml", } - nb.create_module_types([module_type], all_module_types={}, module_type_existing_images={}) - nb.device_types.create_module_power_outlets.assert_called_once() - nb.device_types.create_module_console_server_ports.assert_called_once() - nb.device_types.create_module_front_ports.assert_called_once() + nb.create_module_types([module_type], all_module_types={}, module_type_existing_images={}) + nb.device_types.create_module_power_outlets.assert_called_once() + nb.device_types.create_module_console_server_ports.assert_called_once() + nb.device_types.create_module_front_ports.assert_called_once() + + def test_existing_module_type_property_update_calls_api(self, mock_settings, mock_pynetbox): + """Existing module type with changed part_number calls module_types.update and increments counter.""" + from core.graphql_client import DotDict + + mock_pynetbox.api.return_value.version = "3.5" + nb = NetBox(mock_settings, mock_settings.handle) + + existing_mt = DotDict( + { + "id": 5, + "model": "IOM-s-3.0T", + "part_number": "OLD_PN", + "manufacturer": {"name": "Nokia", "slug": "nokia"}, + } + ) + all_module_types = {"nokia": {"IOM-s-3.0T": existing_mt}} + + module_type = { + "manufacturer": {"slug": "nokia"}, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "src": "/repo/module-types/Nokia/IOM-s-3.0T.yaml", + } + nb.create_module_types( + [module_type], + all_module_types=all_module_types, + module_type_existing_images={}, + ) + mock_pynetbox.api.return_value.dcim.module_types.update.assert_called_once() + # Verify the PATCH payload targets the right resource and field — guards + # against a regression that updates the wrong module or omits part_number. + update_payload = mock_pynetbox.api.return_value.dcim.module_types.update.call_args.args[0][0] + assert update_payload["id"] == 5 + assert update_payload["part_number"] == "3HE16474AA" + assert nb.counter["module_updated"] == 1 + + def test_existing_module_type_property_unchanged_no_api_call(self, mock_settings, mock_pynetbox): + """Existing module type with matching part_number does not call module_types.update.""" + from core.graphql_client import DotDict + + mock_pynetbox.api.return_value.version = "3.5" + nb = NetBox(mock_settings, mock_settings.handle) + + existing_mt = DotDict( + { + "id": 5, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "manufacturer": {"name": "Nokia", "slug": "nokia"}, + } + ) + all_module_types = {"nokia": {"IOM-s-3.0T": existing_mt}} + + module_type = { + "manufacturer": {"slug": "nokia"}, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "src": "/repo/module-types/Nokia/IOM-s-3.0T.yaml", + } + nb.create_module_types( + [module_type], + all_module_types=all_module_types, + module_type_existing_images={}, + ) + mock_pynetbox.api.return_value.dcim.module_types.update.assert_not_called() + assert nb.counter["module_updated"] == 0 + + def test_existing_module_type_only_new_skips_property_update(self, mock_settings, mock_pynetbox): + """only_new=True skips property update even when part_number differs.""" + from core.graphql_client import DotDict + + mock_pynetbox.api.return_value.version = "3.5" + nb = NetBox(mock_settings, mock_settings.handle) + + existing_mt = DotDict( + { + "id": 5, + "model": "IOM-s-3.0T", + "part_number": "OLD_PN", + "manufacturer": {"name": "Nokia", "slug": "nokia"}, + } + ) + all_module_types = {"nokia": {"IOM-s-3.0T": existing_mt}} + + module_type = { + "manufacturer": {"slug": "nokia"}, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "src": "/repo/module-types/Nokia/IOM-s-3.0T.yaml", + } + nb.create_module_types( + [module_type], + only_new=True, + all_module_types=all_module_types, + module_type_existing_images={}, + ) + mock_pynetbox.api.return_value.dcim.module_types.update.assert_not_called() + assert nb.counter["module_updated"] == 0 + + def test_existing_module_type_component_update_calls_update_components( + self, mock_settings, mock_pynetbox, graphql_client, make_device_types + ): + """Existing module type with changed component property calls update_components and increments counter.""" + from core.graphql_client import DotDict + + mock_pynetbox.api.return_value.version = "3.5" + nb = NetBox(mock_settings, mock_settings.handle) + nb.device_types = make_device_types(nb_api=mock_pynetbox.api.return_value) + + # Simulate update_components actually updating a component (increments components_updated) + def _do_update(*args, **kwargs): + nb.counter["components_updated"] += 1 + + nb.device_types.update_components = MagicMock(side_effect=_do_update) + + existing_mt = DotDict( + { + "id": 5, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "manufacturer": {"name": "Nokia", "slug": "nokia"}, + } + ) + all_module_types = {"nokia": {"IOM-s-3.0T": existing_mt}} + + # Cache shows interface with no description; YAML has a description → COMPONENT_CHANGED + nb.device_types.cached_components = { + "interface_templates": { + ("module", 5): { + "xe-0": DotDict( + { + "id": "10", + "name": "xe-0", + "description": "", + "type": {"value": "10gbase-x-sfpp"}, + } + ) + } + }, + } + + module_type = { + "manufacturer": {"slug": "nokia"}, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "interfaces": [{"name": "xe-0", "type": "10gbase-x-sfpp", "description": "Uplink"}], + "src": "/repo/module-types/Nokia/IOM-s-3.0T.yaml", + } + nb.create_module_types( + [module_type], + all_module_types=all_module_types, + module_type_existing_images={}, + ) + nb.device_types.update_components.assert_called_once() + # Scalar PATCH must NOT fire here — part_number already matches NetBox. + # A regression that always issues the PATCH would slip past the existing + # counter assertion below. + mock_pynetbox.api.return_value.dcim.module_types.update.assert_not_called() + # Inspect the diff payload: must contain a COMPONENT_CHANGED for "xe-0" + # carrying a PropertyChange for "description". + from core.change_detector import ChangeType + + component_changes = nb.device_types.update_components.call_args.args[2] + changed = [c for c in component_changes if c.change_type == ChangeType.COMPONENT_CHANGED] + assert len(changed) == 1 + assert changed[0].component_name == "xe-0" + prop_names = {pc.property_name for pc in changed[0].property_changes} + assert prop_names == {"description"} + assert nb.counter["module_updated"] == 1 + + def test_existing_module_type_property_and_component_update_increments_once( + self, mock_settings, mock_pynetbox, graphql_client, make_device_types + ): + """Both property and component change → module_updated incremented only once.""" + from core.graphql_client import DotDict + + mock_pynetbox.api.return_value.version = "3.5" + nb = NetBox(mock_settings, mock_settings.handle) + nb.device_types = make_device_types(nb_api=mock_pynetbox.api.return_value) + + # Simulate update_components actually updating a component + def _do_update(*args, **kwargs): + nb.counter["components_updated"] += 1 + + nb.device_types.update_components = MagicMock(side_effect=_do_update) + + existing_mt = DotDict( + { + "id": 5, + "model": "IOM-s-3.0T", + "part_number": "OLD_PN", + "manufacturer": {"name": "Nokia", "slug": "nokia"}, + } + ) + all_module_types = {"nokia": {"IOM-s-3.0T": existing_mt}} + + nb.device_types.cached_components = { + "interface_templates": { + ("module", 5): { + "xe-0": DotDict( + { + "id": "10", + "name": "xe-0", + "description": "", + "type": {"value": "10gbase-x-sfpp"}, + } + ) + } + }, + } + + module_type = { + "manufacturer": {"slug": "nokia"}, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "interfaces": [{"name": "xe-0", "type": "10gbase-x-sfpp", "description": "Uplink"}], + "src": "/repo/module-types/Nokia/IOM-s-3.0T.yaml", + } + nb.create_module_types( + [module_type], + all_module_types=all_module_types, + module_type_existing_images={}, + ) + nb.device_types.update_components.assert_called_once() + # Verify the scalar PATCH (module-type property) was actually attempted + # for the right module — guards against a regression where the property + # diff is missed but the component diff still fires. + scalar_update = mock_pynetbox.api.return_value.dcim.module_types.update + scalar_update.assert_called_once() + scalar_payload = scalar_update.call_args.args[0][0] + assert scalar_payload["id"] == 5 + assert scalar_payload["part_number"] == "3HE16474AA" + # Inspect the diff payload: must contain a COMPONENT_CHANGED for "xe-0" + # property and is applied via the module-type PATCH path, not via the + # component diff). + from core.change_detector import ChangeType + + component_changes = nb.device_types.update_components.call_args.args[2] + changed = [c for c in component_changes if c.change_type == ChangeType.COMPONENT_CHANGED] + assert len(changed) == 1 + assert changed[0].component_name == "xe-0" + assert {pc.property_name for pc in changed[0].property_changes} == {"description"} + # property update already incremented; component path should NOT double-count + assert nb.counter["module_updated"] == 1 + + def test_existing_module_type_removal_only_no_counter_increment( + self, mock_settings, mock_pynetbox, graphql_client, make_device_types + ): + """COMPONENT_REMOVED-only changes call update_components but do NOT increment module_updated.""" + from core.graphql_client import DotDict + + mock_pynetbox.api.return_value.version = "3.5" + nb = NetBox(mock_settings, mock_settings.handle) + nb.device_types = make_device_types(nb_api=mock_pynetbox.api.return_value) + # update_components is a no-op for removals (no counter change) + nb.device_types.update_components = MagicMock() + + existing_mt = DotDict( + { + "id": 5, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "manufacturer": {"name": "Nokia", "slug": "nokia"}, + } + ) + all_module_types = {"nokia": {"IOM-s-3.0T": existing_mt}} + + # Cache has an interface that YAML doesn't have → COMPONENT_REMOVED + nb.device_types.cached_components = { + "interface_templates": { + ("module", 5): { + "xe-0": DotDict( + {"id": "10", "name": "xe-0", "description": "", "type": {"value": "10gbase-x-sfpp"}} + ), + "xe-extra": DotDict({"id": "11", "name": "xe-extra", "description": ""}), + } + }, + } + + module_type = { + "manufacturer": {"slug": "nokia"}, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", + "interfaces": [{"name": "xe-0", "type": "10gbase-x-sfpp", "description": ""}], + "src": "/repo/module-types/Nokia/IOM-s-3.0T.yaml", + } + nb.create_module_types( + [module_type], + all_module_types=all_module_types, + module_type_existing_images={}, + ) + nb.device_types.update_components.assert_called_once() + # Inspect the diff payload: must contain only a COMPONENT_REMOVED for + # "xe-extra" — no PropertyChanges, no other change types. + from core.change_detector import ChangeType + + component_changes = nb.device_types.update_components.call_args.args[2] + removed = [c for c in component_changes if c.change_type == ChangeType.COMPONENT_REMOVED] + non_removed = [c for c in component_changes if c.change_type != ChangeType.COMPONENT_REMOVED] + assert len(removed) == 1 + assert removed[0].component_name == "xe-extra" + assert removed[0].property_changes == [] + assert non_removed == [] + # removal-only: update_components did nothing (no counter bumps) → module_updated stays 0 + assert nb.counter["module_updated"] == 0 + + def test_property_update_plus_removal_only_remove_false_counts_as_updated( + self, mock_settings, mock_pynetbox, graphql_client, make_device_types + ): + """Properties changed + removal-only diff with remove_components=False → module_updated incremented.""" + from core.graphql_client import DotDict + mock_pynetbox.api.return_value.version = "3.5" + nb = NetBox(mock_settings, mock_settings.handle) + nb.device_types = make_device_types(nb_api=mock_pynetbox.api.return_value) + nb.device_types.update_components = MagicMock() + nb.device_types.remove_components = MagicMock() -# --------------------------------------------------------------------------- -# count_module_type_images: existing MT with non-matching image (line 680) -# --------------------------------------------------------------------------- + existing_mt = DotDict( + { + "id": 5, + "model": "IOM-s-3.0T", + "part_number": "OLD_PN", + "manufacturer": {"name": "Nokia", "slug": "nokia"}, + } + ) + all_module_types = {"nokia": {"IOM-s-3.0T": existing_mt}} + # Cache has an extra interface not in YAML → COMPONENT_REMOVED + nb.device_types.cached_components = { + "interface_templates": { + ("module", 5): { + "xe-extra": DotDict({"id": "11", "name": "xe-extra", "description": ""}), + } + }, + } -class TestCountModuleTypeImagesExisting: - """Tests for count_module_type_images with existing module types.""" + module_type = { + "manufacturer": {"slug": "nokia"}, + "model": "IOM-s-3.0T", + "part_number": "3HE16474AA", # changed from OLD_PN + "interfaces": [], # xe-extra absent → COMPONENT_REMOVED + "src": "/repo/module-types/Nokia/IOM-s-3.0T.yaml", + } + # remove_components=False: the removal-only diff is not actionable, + # but the scalar PATCH succeeded → module_updated must be incremented. + nb.create_module_types( + [module_type], + all_module_types=all_module_types, + module_type_existing_images={}, + remove_components=False, + ) + mock_pynetbox.api.return_value.dcim.module_types.update.assert_called_once() + nb.device_types.remove_components.assert_not_called() + assert nb.counter["module_updated"] == 1 + assert nb.counter.get("module_update_failed", 0) == 0 def test_existing_module_new_image_counted(self, tmp_path): """Existing MT whose image name is NOT in existing_names increments count.""" @@ -2904,13 +4258,14 @@ def test_own_executor_with_progress( progress.add_task.return_value = 1 # Run only one component to keep the test fast components = [("interface_templates", "Interface Templates")] - dt._preload_global(components, progress_wrapper=None, progress=progress) + with patch.object(dt, "_get_endpoint_totals", return_value={"interface_templates": 0}): + dt._preload_global(components, progress_wrapper=None, progress=progress) progress.add_task.assert_called() def test_preload_global_no_progress_future_failure( self, mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types ): - """When no progress and a future raises, log is called and result is [].""" + """When no progress and a future raises, the exception is logged and re-raised.""" mock_nb_api = mock_pynetbox.api.return_value dt = make_device_types(nb_api=mock_nb_api) @@ -2930,7 +4285,8 @@ def test_preload_global_no_progress_future_failure( "finished_endpoints": set(), } components = [("interface_templates", "Interface Templates")] - dt._preload_global(components, preload_job=preload_job, progress=None) + with pytest.raises(RuntimeError, match="network error"): + dt._preload_global(components, preload_job=preload_job, progress=None) mock_settings.handle.log.assert_any_call("Preload failed for Interface Templates: network error") @@ -2980,17 +4336,6 @@ def test_empty_ids_returns_immediately(self, mock_settings, mock_pynetbox, graph dt.preload_module_type_components(set(), ["interfaces"]) mock_nb_api.dcim.interface_templates.filter.assert_not_called() - def test_duplicate_endpoint_skipped(self, mock_settings, mock_pynetbox, graphql_client, make_device_types): - """Same endpoint_attr from two keys is only fetched once.""" - mock_nb_api = mock_pynetbox.api.return_value - dt = make_device_types(nb_api=mock_nb_api) - mock_nb_api.dcim.power_port_templates.filter.return_value = [] - - # Both "power-ports" and "power-port" map to the same endpoint_attr - dt.preload_module_type_components({1}, ["power-ports", "power-port"]) - # Should only call filter once (deduplicated) - assert mock_nb_api.dcim.power_port_templates.filter.call_count == 1 - def test_item_with_no_module_type_skipped(self, mock_settings, mock_pynetbox, graphql_client, make_device_types): """Items where item.module_type is None are skipped.""" mock_nb_api = mock_pynetbox.api.return_value @@ -3154,20 +4499,6 @@ def test_property_update_request_error_logged( class TestUpdateComponentsAdditionsBranches: """Tests for component-addition branches inside update_components.""" - def test_alias_resolution_power_port(self, mock_settings, mock_pynetbox, graphql_client, make_device_types): - """'power-port' alias resolves to 'power-ports' component type addition.""" - from core.change_detector import ChangeType, ComponentChange - - mock_nb_api = mock_pynetbox.api.return_value - dt = make_device_types(nb_api=mock_nb_api) - dt.cached_components = {"power_port_templates": {("device", 1): {}}} - - changes = [ComponentChange("power-ports", "PSU1", ChangeType.COMPONENT_ADDED)] - # yaml_data uses alias "power-port" - yaml_data = {"power-port": [{"name": "PSU1"}]} - dt.update_components(yaml_data, 1, changes, parent_type="device") - mock_nb_api.dcim.power_port_templates.create.assert_called() - def test_yaml_key_none_continues(self, mock_settings, mock_pynetbox, graphql_client, make_device_types): """If component_type not in yaml_data (neither canonical nor alias), skip.""" from core.change_detector import ChangeType, ComponentChange @@ -3742,9 +5073,16 @@ def test_progress_iterator_used_when_provided( "src": "/tmp/device-types/cisco/testswitch.yaml", } ] - # progress wraps the list but is also iterable - nb.create_device_types(device_types, progress=iter(device_types)) - # No assertion needed — just verify no exception + # Use a tracking iterator to verify the progress wrapper is actually consumed. + consumed = [] + + def tracking_iter(): + for item in device_types: + consumed.append(item) + yield item + + nb.create_device_types(device_types, progress=tracking_iter()) + assert len(consumed) > 0, "Progress iterator was not consumed by create_device_types" def test_image_file_not_found_logs_error( self, mock_settings, mock_pynetbox, graphql_client, make_device_types, tmp_path @@ -3831,13 +5169,21 @@ def test_progress_iterator_used(self, mock_settings, mock_pynetbox): "src": "/repo/module-types/cisco/lc.yaml", } ] + consumed = [] + + def tracking_iter(): + for item in module_types: + consumed.append(item) + yield item + nb.create_module_types( module_types, - progress=iter(module_types), + progress=tracking_iter(), all_module_types={}, module_type_existing_images={}, ) nb.netbox.dcim.module_types.create.assert_called_once() + assert len(consumed) > 0, "Progress iterator was not consumed by create_module_types" def test_all_module_types_fetched_when_none(self, mock_settings, mock_pynetbox, mock_graphql_requests): """all_module_types is fetched when not supplied.""" @@ -4117,7 +5463,7 @@ class TestPreloadGlobalMissingLines: def test_already_done_endpoint_with_future_exception( self, mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types ): - """already_done endpoint whose future raises → log + empty records (lines 1119-1121).""" + """already_done endpoint whose future raises → log + exception re-raised.""" mock_nb_api = mock_pynetbox.api.return_value dt = make_device_types(nb_api=mock_nb_api) @@ -4138,7 +5484,8 @@ def test_already_done_endpoint_with_future_exception( "finished_endpoints": {"interface_templates"}, # already done } components = [("interface_templates", "Interface Templates")] - dt._preload_global(components, preload_job=preload_job, progress=progress) + with pytest.raises(RuntimeError, match="fetch failed"): + dt._preload_global(components, preload_job=preload_job, progress=progress) mock_settings.handle.log.assert_any_call("Preload failed for interface_templates: fetch failed") def test_already_done_endpoint_progress_update_exception_swallowed( @@ -4171,7 +5518,7 @@ def test_already_done_endpoint_progress_update_exception_swallowed( def test_pending_future_exception_logged( self, mock_settings, mock_pynetbox, mock_graphql_requests, graphql_client, make_device_types ): - """Future raising while pending logs error and stores empty records (1153-1155).""" + """Future raising while pending logs error and re-raises.""" mock_nb_api = mock_pynetbox.api.return_value dt = make_device_types(nb_api=mock_nb_api) @@ -4193,7 +5540,8 @@ def test_pending_future_exception_logged( "finished_endpoints": set(), } components = [("interface_templates", "Interface Templates")] - dt._preload_global(components, preload_job=preload_job, progress=progress) + with pytest.raises(RuntimeError, match="network error"): + dt._preload_global(components, preload_job=preload_job, progress=progress) mock_settings.handle.log.assert_any_call("Preload failed for interface_templates: network error") def test_progress_updates_get_with_timeout_advances_task( @@ -4290,14 +5638,22 @@ def fake_fetch(endpoint_name, progress_callback=None, expected_total=None): progress = MagicMock() progress.add_task.return_value = 1 - with patch.object(dt, "_fetch_global_endpoint_records", side_effect=fake_fetch): - preload_job = dt.start_component_preload(progress=progress) - # Let the futures complete - dt.preload_all_components(preload_job=preload_job, progress=progress) + with patch.object( + dt, "_get_endpoint_totals", return_value={ep: 0 for ep, _ in dt._component_preload_targets()} + ): + with patch.object(dt, "_fetch_global_endpoint_records", side_effect=fake_fetch): + preload_job = dt.start_component_preload(progress=progress) + # Let the futures complete + dt.preload_all_components(preload_job=preload_job, progress=progress) # update_progress was called, which put items in progress_updates queue # pump_preload_progress or preload_all_components drained them progress.add_task.assert_called() + # Assert the advance reached progress.update — guards against a regression + # where the callback path silently stops publishing advances. + update_calls = [c for c in progress.update.call_args_list if c.kwargs.get("advance")] + total_advance = sum(c.kwargs["advance"] for c in update_calls) + assert total_advance >= 1, "progress.update was never called with advance>=1" class TestPreloadGlobalOwnExecutorProgressCallback: @@ -4321,11 +5677,17 @@ def fake_fetch(endpoint_name, progress_callback=None, expected_total=None): progress.add_task.return_value = 1 components = [("interface_templates", "Interface Templates")] - with patch.object(dt, "_fetch_global_endpoint_records", side_effect=fake_fetch): - dt._preload_global(components, progress_wrapper=None, progress=progress) + with patch.object(dt, "_get_endpoint_totals", return_value={"interface_templates": 0}): + with patch.object(dt, "_fetch_global_endpoint_records", side_effect=fake_fetch): + dt._preload_global(components, progress_wrapper=None, progress=progress) progress.add_task.assert_called() progress.stop_task.assert_called() + # Assert the advance reached progress.update — guards against a regression + # where the own-executor callback stops publishing advances. + update_calls = [c for c in progress.update.call_args_list if c.kwargs.get("advance")] + total_advance = sum(c.kwargs["advance"] for c in update_calls) + assert total_advance >= 1, "progress.update was never called with advance>=1" class TestUploadImagesRequestException: @@ -4778,3 +6140,412 @@ def test_multiple_legacy_mappings_logs_warning_with_context(self, make_device_ty log_calls = [str(c) for c in mock_settings.handle.log.call_args_list] assert any("only first mapping applied" in c for c in log_calls) assert any("MyDevice" in c for c in log_calls) + + +# --------------------------------------------------------------------------- +# _module_type_has_missing_components (lines 770-782) +# --------------------------------------------------------------------------- + + +class TestModuleTypeHasMissingComponents: + """Tests for NetBox._module_type_has_missing_components().""" + + def test_returns_true_when_component_missing(self, mock_settings, mock_pynetbox, make_device_types): + """Returns True when a YAML-defined component name is absent from the cache.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.version = "3.5" + + nb = NetBox(mock_settings, mock_settings.handle) + # Pre-populate cache with an empty interface set for module 42. + nb.device_types.cached_components["interface_templates"] = {("module", 42): {}} + + module_type = {"interfaces": [{"name": "xe-0/0/0"}]} + existing_module = MagicMock() + existing_module.id = 42 + + result = nb._module_type_has_missing_components(module_type, existing_module, ["interfaces"]) + + assert result is True + + def test_returns_false_when_all_components_present(self, mock_settings, mock_pynetbox, make_device_types): + """Returns False when all YAML-defined components exist in the cache.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.version = "3.5" + + nb = NetBox(mock_settings, mock_settings.handle) + existing_iface = MagicMock() + existing_iface.name = "xe-0/0/0" + nb.device_types.cached_components["interface_templates"] = {("module", 42): {"xe-0/0/0": existing_iface}} + + module_type = {"interfaces": [{"name": "xe-0/0/0"}]} + existing_module = MagicMock() + existing_module.id = 42 + + result = nb._module_type_has_missing_components(module_type, existing_module, ["interfaces"]) + + assert result is False + + def test_returns_false_when_no_components_in_yaml(self, mock_settings, mock_pynetbox, make_device_types): + """Returns False when the module type dict has no components under the key.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.version = "3.5" + + nb = NetBox(mock_settings, mock_settings.handle) + module_type = {} # no "interfaces" key + existing_module = MagicMock() + existing_module.id = 99 + + result = nb._module_type_has_missing_components(module_type, existing_module, ["interfaces"]) + + assert result is False + + +# --------------------------------------------------------------------------- +# filter_actionable_module_types _MISSING skip (line 847) +# --------------------------------------------------------------------------- + + +class TestFilterActionableModuleTypesMissingAttr: + """Tests for the _MISSING guard in filter_actionable_module_types.""" + + def test_missing_netbox_field_is_not_treated_as_change(self, mock_settings, mock_pynetbox, mock_graphql_requests): + """When existing module lacks an attribute, it's skipped — no false positive change.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.version = "3.5" + + mock_graphql_requests.side_effect = paginate_dispatch( + { + "manufacturer_list": [], + "device_type_list": [], + "module_type_list": [ + { + "id": "10", + "model": "Linecard-A", + "manufacturer": {"id": "5", "name": "Arista", "slug": "arista"}, + } + ], + "image_attachment_list": [], + } + ) + + nb = NetBox(mock_settings, mock_settings.handle) + nb.device_types._global_preload_done = True + + # existing_module is a spec=[] object so getattr(…, field, _MISSING) returns _MISSING + existing_module = MagicMock(spec=[]) + existing_module.id = 10 + existing_module.manufacturer = MagicMock() + existing_module.manufacturer.slug = "arista" + existing_module.model = "Linecard-A" + + all_module_types = {"arista": {"Linecard-A": existing_module}} + + module_type = { + "manufacturer": {"slug": "arista"}, + "model": "Linecard-A", + "slug": "linecard-a", + "part_number": "LC-123", + "src": "/repo/module-types/arista/linecard-a.yaml", + } + + with patch("glob.glob", return_value=[]): + actionable, _, changed_property_log = nb.filter_actionable_module_types( + [module_type], + all_module_types, + only_new=False, + ) + + # The _MISSING sentinel must prevent the module from being flagged for update. + # A MagicMock(spec=[]) has no attributes, so every field access returns _MISSING + # and the module should NOT appear in actionable or the change log. + assert actionable == [] + assert changed_property_log == [] + + +# --------------------------------------------------------------------------- +# log_module_type_changes non-empty log (lines 875-878) +# --------------------------------------------------------------------------- + + +class TestLogModuleTypeChanges: + """Tests for NetBox.log_module_type_changes().""" + + def test_non_empty_log_emits_verbose_output(self, mock_settings, mock_pynetbox): + """A non-empty changed_property_log triggers verbose logging.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.version = "3.5" + + nb = NetBox(mock_settings, mock_settings.handle) + mock_settings.handle.verbose_log.reset_mock() + + changed_property_log = [("cisco", "CM1", [("part_number", "old", "new")], [])] + nb.log_module_type_changes(changed_property_log) + + mock_settings.handle.verbose_log.assert_called() + + def test_empty_log_emits_nothing(self, mock_settings, mock_pynetbox): + """An empty changed_property_log does not trigger any logging calls.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.version = "3.5" + + nb = NetBox(mock_settings, mock_settings.handle) + mock_settings.handle.verbose_log.reset_mock() + + nb.log_module_type_changes([]) + + mock_settings.handle.verbose_log.assert_not_called() + + +# --------------------------------------------------------------------------- +# _try_update_module_type error handlers (lines 918-925) +# --------------------------------------------------------------------------- + + +class TestTryUpdateModuleTypeErrors: + """Tests for RequestError and retryable-exception handlers in _try_update_module_type.""" + + def _make_nb(self, mock_settings, mock_pynetbox): + mock_pynetbox.api.return_value.version = "3.5" + return NetBox(mock_settings, mock_settings.handle) + + def _make_module_type_res(self): + res = MagicMock() + res.id = 1 + res.manufacturer.name = "Cisco" + res.model = "CM1" + return res + + def test_request_error_returns_false_and_logs(self, mock_settings, mock_pynetbox): + """pynetbox.RequestError during update causes (False, False) return and log.""" + import pynetbox as real_pynb + + mock_pynetbox.api.return_value.version = "3.5" + mock_pynetbox.RequestError = real_pynb.RequestError + + nb = self._make_nb(mock_settings, mock_pynetbox) + mock_settings.handle.log.reset_mock() + + err = real_pynb.RequestError(MagicMock(status_code=400, content=b'{"detail":"bad"}')) + nb.netbox.dcim.module_types.update.side_effect = err + + curr_mt = {"part_number": "NEW-123"} + module_type_res = self._make_module_type_res() + module_type_res.part_number = "OLD-123" + + ok, updated = nb._try_update_module_type(curr_mt, module_type_res, "test.yaml") + + assert ok is False + assert updated is False + mock_settings.handle.log.assert_called() + + def test_retryable_exception_returns_false_and_logs(self, mock_settings, mock_pynetbox): + """A ConnectionError (retryable) after max retries causes (False, False) return.""" + import pynetbox as real_pynb + import requests + + mock_pynetbox.RequestError = real_pynb.RequestError + mock_pynetbox.api.return_value.version = "3.5" + + nb = self._make_nb(mock_settings, mock_pynetbox) + mock_settings.handle.log.reset_mock() + + nb.netbox.dcim.module_types.update.side_effect = requests.exceptions.ConnectionError("dropped") + + curr_mt = {"part_number": "NEW-123"} + module_type_res = self._make_module_type_res() + module_type_res.part_number = "OLD-123" + + with patch("core.netbox_api.time.sleep") as mock_sleep: + ok, updated = nb._try_update_module_type(curr_mt, module_type_res, "test.yaml") + + assert ok is False + assert updated is False + mock_settings.handle.log.assert_called() + # Prove the retry loop actually ran the full budget with the correct backoff. + from core.netbox_api import _MAX_RETRIES, _RETRY_BACKOFF + + assert nb.netbox.dcim.module_types.update.call_count == _MAX_RETRIES + 1 + assert mock_sleep.call_count == _MAX_RETRIES + assert [call.args[0] for call in mock_sleep.call_args_list] == list(_RETRY_BACKOFF[:_MAX_RETRIES]) + + +# --------------------------------------------------------------------------- +# _process_single_module_type: creation retryable exception (lines 979-983) +# --------------------------------------------------------------------------- + + +class TestProcessSingleModuleTypeCreateRetryable: + """Tests for the retryable-exception handler when creating a module type.""" + + def test_retryable_exception_on_create_returns_false(self, mock_settings, mock_pynetbox, mock_graphql_requests): + """ConnectionError during module type creation causes the method to return False.""" + from unittest.mock import patch + + import pynetbox as real_pynb + import requests + + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.version = "3.5" + mock_pynetbox.RequestError = real_pynb.RequestError + + nb = NetBox(mock_settings, mock_settings.handle) + mock_settings.handle.log.reset_mock() + + mock_nb_api.dcim.module_types.create.side_effect = requests.exceptions.ConnectionError("network down") + + curr_mt = { + "manufacturer": {"slug": "cisco"}, + "model": "CM-Retryable", + "slug": "cm-retryable", + } + + with patch("core.netbox_api.time.sleep") as mock_sleep: + result = nb._process_single_module_type( + curr_mt, + "test.yaml", + {}, + {}, + only_new=False, + ) + + assert result is False + mock_settings.handle.log.assert_called() + # Prove the retry loop actually ran the full budget with the correct backoff. + from core.netbox_api import _MAX_RETRIES, _RETRY_BACKOFF + + assert mock_nb_api.dcim.module_types.create.call_count == _MAX_RETRIES + 1 + assert mock_sleep.call_count == _MAX_RETRIES + assert [call.args[0] for call in mock_sleep.call_args_list] == list(_RETRY_BACKOFF[:_MAX_RETRIES]) + + +# --------------------------------------------------------------------------- +# _process_single_module_type: remove_components call (line 1033) +# --------------------------------------------------------------------------- + + +class TestProcessSingleModuleTypeRemoveComponents: + """Tests that remove_components=True calls device_types.remove_components.""" + + def test_remove_components_is_called_when_flag_set( + self, mock_settings, mock_pynetbox, mock_graphql_requests, make_device_types + ): + """When remove_components=True and there are component changes, remove_components is called.""" + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.version = "3.5" + + nb = NetBox(mock_settings, mock_settings.handle) + + # Build a pre-existing module type so the "else" path (update) is taken. + existing_module = MagicMock() + existing_module.id = 55 + existing_module.manufacturer.name = "Cisco" + existing_module.model = "CM-Remove" + + all_module_types = {"cisco": {"CM-Remove": existing_module}} + + # Populate cache so _compare_components returns a COMPONENT_REMOVED change. + stale_iface = MagicMock() + stale_iface.name = "xe-stale" + nb.device_types.cached_components["interface_templates"] = {("module", 55): {"xe-stale": stale_iface}} + nb.device_types._global_preload_done = True + + curr_mt = { + "manufacturer": {"slug": "cisco"}, + "model": "CM-Remove", + "slug": "cm-remove", + "interfaces": [], # empty → xe-stale should be detected as removed + } + + nb.device_types.remove_components = MagicMock() + nb.device_types.update_components = MagicMock() + + result = nb._process_single_module_type( + curr_mt, + "test.yaml", + all_module_types, + {}, + only_new=False, + remove_components=True, + ) + + assert result is True + nb.device_types.remove_components.assert_called_once() + # Verify the payload: exactly one COMPONENT_REMOVED change for "xe-stale". + from core.change_detector import ChangeType + + removal_changes = nb.device_types.remove_components.call_args.args[1] + assert len(removal_changes) == 1 + assert removal_changes[0].change_type == ChangeType.COMPONENT_REMOVED + assert removal_changes[0].component_name == "xe-stale" + + def test_component_reconciliation_continues_when_scalar_patch_fails( + self, mock_settings, mock_pynetbox, mock_graphql_requests, make_device_types + ): + """A failed scalar PATCH must NOT skip subsequent component reconciliation. + + Regression: previously the early ``return False`` on a failed + ``module_types.update`` left existing modules with property + component + diffs in a partial-sync state because component reconciliation was + skipped entirely. + """ + mock_nb_api = mock_pynetbox.api.return_value + mock_nb_api.version = "3.5" + + nb = NetBox(mock_settings, mock_settings.handle) + + existing_module = MagicMock() + existing_module.id = 77 + existing_module.manufacturer.name = "Cisco" + existing_module.model = "CM-Fail-Patch" + # Make the scalar diff non-empty so update() will be invoked and fail. + existing_module.part_number = "OLD_PN" + + all_module_types = {"cisco": {"CM-Fail-Patch": existing_module}} + + # Cache a stale interface so _compare_components yields a COMPONENT_REMOVED. + stale_iface = MagicMock() + stale_iface.name = "xe-stale" + nb.device_types.cached_components["interface_templates"] = {("module", 77): {"xe-stale": stale_iface}} + nb.device_types._global_preload_done = True + + curr_mt = { + "manufacturer": {"slug": "cisco"}, + "model": "CM-Fail-Patch", + "slug": "cm-fail-patch", + "part_number": "NEW_PN", # forces a scalar diff + "interfaces": [], # empty → xe-stale should be detected as removed + } + + # Force the scalar PATCH to fail with a pynetbox RequestError. + # pynetbox is mocked at module level, so we restore the real exception + # class first (otherwise `except pynetbox.RequestError` raises TypeError). + import pynetbox as _real_pynb + + mock_pynetbox.RequestError = _real_pynb.RequestError + request_error = _real_pynb.RequestError(MagicMock(status_code=400, content=b'{"detail":"boom"}')) + mock_nb_api.dcim.module_types.update = MagicMock(side_effect=request_error) + + nb.device_types.update_components = MagicMock() + nb.device_types.remove_components = MagicMock() + + result = nb._process_single_module_type( + curr_mt, + "test.yaml", + all_module_types, + {}, + only_new=False, + remove_components=True, + ) + + # The failed PATCH should NOT prevent component reconciliation. + # Verify the failing PATCH was actually attempted (regression: scalar + # diff detection silently skipping update() would still pass without + # this assertion). + mock_nb_api.dcim.module_types.update.assert_called_once() + assert result is True + nb.device_types.remove_components.assert_called_once() + assert nb.counter["module_updated"] == 0 + assert nb.counter["module_update_failed"] == 1 + failures = nb.outcomes.failures() + assert len(failures) == 1 + assert "CM-Fail-Patch" in failures[0].identity diff --git a/tests/test_outcomes.py b/tests/test_outcomes.py new file mode 100644 index 00000000..7f9527d0 --- /dev/null +++ b/tests/test_outcomes.py @@ -0,0 +1,106 @@ +"""Tests for ``core.outcomes.OutcomeRegistry``.""" + +from __future__ import annotations + +from core.outcomes import EntityKind, Outcome, OutcomeRegistry + + +def test_registry_starts_empty(): + reg = OutcomeRegistry() + assert reg.records == [] + assert reg.failures() == [] + assert reg.partials() == [] + assert reg.summary_by_kind() == {} + assert reg.render_failure_report() == [] + + +def test_registry_records_failures_and_partials(): + reg = OutcomeRegistry() + reg.record( + EntityKind.DEVICE_TYPE, + "Supermicro/SuperServer", + Outcome.FAILED, + reason="subdevice_role flip blocked", + blocking_objects=["bay-1", "bay-2"], + hint="re-run with --force-resolve-conflicts", + ) + reg.record( + EntityKind.MODULE_TYPE, + "Cisco/SFP-X", + Outcome.PARTIAL, + reason="component PATCH failed", + ) + reg.record(EntityKind.DEVICE_TYPE, "ok/ok", Outcome.UPDATED) + + failures = reg.failures() + assert len(failures) == 1 + assert failures[0].identity == "Supermicro/SuperServer" + assert failures[0].blocking_objects == ["bay-1", "bay-2"] + + partials = reg.partials() + assert len(partials) == 1 + assert partials[0].identity == "Cisco/SFP-X" + + summary = reg.summary_by_kind() + assert summary[EntityKind.DEVICE_TYPE] == {Outcome.FAILED: 1, Outcome.UPDATED: 1} + assert summary[EntityKind.MODULE_TYPE] == {Outcome.PARTIAL: 1} + + +def test_render_failure_report_includes_identity_and_reason(): + reg = OutcomeRegistry() + reg.record( + EntityKind.DEVICE_TYPE, + "Supermicro/SYS-6028TR-HTR", + Outcome.FAILED, + reason="subdevice_role parent->child blocked", + blocking_objects=["bay-A"], + hint="re-run with --force-resolve-conflicts", + ) + lines = reg.render_failure_report() + text = "\n".join(lines) + assert "Failed entities: 1" in text + assert "Supermicro/SYS-6028TR-HTR" in text + assert "subdevice_role parent->child blocked" in text + assert "bay-A" in text + assert "--force-resolve-conflicts" in text + + +def test_render_failure_report_truncates_long_blocking_lists(): + reg = OutcomeRegistry() + blockers = [f"bay-{i}" for i in range(12)] + reg.record( + EntityKind.DEVICE_TYPE, + "Vendor/Model", + Outcome.FAILED, + reason="constraint", + blocking_objects=blockers, + ) + text = "\n".join(reg.render_failure_report()) + assert "bay-0" in text + assert "bay-4" in text + # Truncation marker appears for >5 items. + assert "+7 more" in text + + +def test_render_failure_report_empty_when_only_successes(): + reg = OutcomeRegistry() + reg.record(EntityKind.DEVICE_TYPE, "a/b", Outcome.UPDATED) + reg.record(EntityKind.MODULE_TYPE, "c/d", Outcome.CREATED) + assert reg.render_failure_report() == [] + + +def test_render_failure_report_includes_partials_section(): + """Partial outcomes get their own section in the rendered report.""" + reg = OutcomeRegistry() + reg.record( + EntityKind.MODULE_TYPE, + "Nokia/IOM-s-3.0T", + Outcome.PARTIAL, + reason="image upload failed but properties applied", + ) + reg.record(EntityKind.DEVICE_TYPE, "v/no-reason-partial", Outcome.PARTIAL) + text = "\n".join(reg.render_failure_report()) + assert "Partial updates: 2" in text + assert "Nokia/IOM-s-3.0T" in text + assert "image upload failed but properties applied" in text + assert "v/no-reason-partial" in text diff --git a/tests/test_repo.py b/tests/test_repo.py index 789be249..4a4cd993 100644 --- a/tests/test_repo.py +++ b/tests/test_repo.py @@ -881,3 +881,177 @@ def test_parse_single_file_profile_null(tmp_path): result = parse_single_file(str(yaml_file)) assert result["profile"] is None + + +# --------------------------------------------------------------------------- +# get_racks_path (line 312) +# --------------------------------------------------------------------------- + + +class TestGetRacksPath: + """Tests for DTLRepo.get_racks_path().""" + + def _make_repo(self): + mock_args = MagicMock() + mock_args.url = "https://github.com/org/repo.git" + mock_args.branch = "master" + mock_handle = MagicMock() + with ( + patch("os.path.isdir", return_value=True), + patch("core.repo.Repo") as MockRepo, + ): + mock_git_repo = MagicMock() + mock_git_repo.remotes.origin.url = "https://github.com/org/repo.git" + ref = MagicMock() + ref.name = "origin/master" + mock_git_repo.remotes.origin.refs = [ref] + MockRepo.return_value = mock_git_repo + repo = DTLRepo(mock_args, "/tmp/repo", mock_handle) + return repo + + def test_get_racks_path_ends_with_rack_types(self): + repo = self._make_repo() + assert repo.get_racks_path().endswith("rack-types") + + +# --------------------------------------------------------------------------- +# parse_single_file generic Exception fallback (lines 231-232) +# --------------------------------------------------------------------------- + + +class TestParseSingleFileGenericException: + """Tests for the generic Exception handler in parse_single_file.""" + + def test_normalize_error_returns_error_string(self, tmp_path): + from core.repo import parse_single_file + + yaml_file = tmp_path / "device.yaml" + yaml_file.write_text("manufacturer: Cisco\nmodel: TestSwitch\n") + + with patch("core.repo.normalize_port_mappings", side_effect=RuntimeError("unexpected")): + result = parse_single_file(str(yaml_file)) + + assert isinstance(result, str) + assert result.startswith("Error:") + assert "unexpected" in result + + +# --------------------------------------------------------------------------- +# parse_files KeyboardInterrupt re-raise (lines 444-446) +# --------------------------------------------------------------------------- + + +class TestParseFilesKeyboardInterrupt: + """Tests that KeyboardInterrupt during parse_files is re-raised.""" + + def _make_repo(self): + mock_args = MagicMock() + mock_args.url = "https://github.com/org/repo.git" + mock_args.branch = "master" + mock_handle = MagicMock() + with ( + patch("os.path.isdir", return_value=True), + patch("core.repo.Repo") as MockRepo, + ): + mock_git_repo = MagicMock() + mock_git_repo.remotes.origin.url = "https://github.com/org/repo.git" + ref = MagicMock() + ref.name = "origin/master" + mock_git_repo.remotes.origin.refs = [ref] + MockRepo.return_value = mock_git_repo + repo = DTLRepo(mock_args, "/tmp/repo", mock_handle) + return repo + + def test_keyboard_interrupt_is_reraised(self): + import pytest + + repo = self._make_repo() + + with patch("core.repo.parse_single_file", side_effect=KeyboardInterrupt): + with pytest.raises(KeyboardInterrupt): + repo.parse_files(["fake_file.yaml"]) + + +# --------------------------------------------------------------------------- +# parse_files dedup: KeyError/TypeError path (lines 461-463) +# --------------------------------------------------------------------------- + + +class TestParseFilesKeyErrorDedup: + """Tests for the KeyError/TypeError dedup guard in parse_files.""" + + def _make_repo(self): + mock_args = MagicMock() + mock_args.url = "https://github.com/org/repo.git" + mock_args.branch = "master" + mock_handle = MagicMock() + with ( + patch("os.path.isdir", return_value=True), + patch("core.repo.Repo") as MockRepo, + ): + mock_git_repo = MagicMock() + mock_git_repo.remotes.origin.url = "https://github.com/org/repo.git" + ref = MagicMock() + ref.name = "origin/master" + mock_git_repo.remotes.origin.refs = [ref] + MockRepo.return_value = mock_git_repo + repo = DTLRepo(mock_args, "/tmp/repo", mock_handle) + return repo + + def test_item_without_manufacturer_is_included_without_dedup(self): + """Item missing 'manufacturer' key skips dedup and is appended as-is.""" + repo = self._make_repo() + # Return an item with no 'manufacturer' key. + item_no_mfr = {"model": "UnknownSwitch", "src": "a.yaml"} + + with patch("core.repo.parse_single_file", return_value=item_no_mfr): + result = repo.parse_files(["fake_file.yaml"]) + + assert item_no_mfr in result + + +# --------------------------------------------------------------------------- +# parse_files duplicate logging (lines 471-476) +# --------------------------------------------------------------------------- + + +class TestParseFilesDuplicateLogging: + """Tests for duplicate definition detection and logging in parse_files.""" + + def _make_repo(self): + mock_args = MagicMock() + mock_args.url = "https://github.com/org/repo.git" + mock_args.branch = "master" + mock_handle = MagicMock() + with ( + patch("os.path.isdir", return_value=True), + patch("core.repo.Repo") as MockRepo, + ): + mock_git_repo = MagicMock() + mock_git_repo.remotes.origin.url = "https://github.com/org/repo.git" + ref = MagicMock() + ref.name = "origin/master" + mock_git_repo.remotes.origin.refs = [ref] + MockRepo.return_value = mock_git_repo + repo = DTLRepo(mock_args, "/tmp/repo", mock_handle) + return repo, mock_handle + + def test_duplicate_key_logs_warning_and_records_definition(self): + """Two items with the same (manufacturer_slug, model) trigger duplicate logging.""" + repo, mock_handle = self._make_repo() + + item_a = {"manufacturer": {"slug": "cisco"}, "model": "X", "src": "a.yaml"} + item_b = {"manufacturer": {"slug": "cisco"}, "model": "X", "src": "b.yaml"} + + with patch("core.repo.parse_single_file", side_effect=[item_a, item_b]): + result = repo.parse_files(["a.yaml", "b.yaml"]) + + # Only the first item (sorted by src) should appear in results. + assert len(result) == 1 + # Warning must have been logged. + logged = [call.args[0] for call in mock_handle.log.call_args_list] + assert any("WARNING" in msg and "cisco" in msg for msg in logged) + # Duplicate definitions recorded on repo. + assert len(repo.duplicate_definitions) == 1 + assert repo.duplicate_definitions[0]["manufacturer"] == "cisco" + assert repo.duplicate_definitions[0]["model"] == "X" diff --git a/tests/test_schema_reader.py b/tests/test_schema_reader.py new file mode 100644 index 00000000..e4255fd9 --- /dev/null +++ b/tests/test_schema_reader.py @@ -0,0 +1,201 @@ +"""Tests for core/schema_reader.py.""" + +import json + +import pytest + +from core.schema_reader import load_properties_for_type, load_scalar_properties + + +class TestLoadScalarProperties: + """Tests for load_scalar_properties().""" + + def test_invalid_json_raises_value_error(self, tmp_path): + schema_file = tmp_path / "bad.json" + schema_file.write_text("not valid json {{{") + + with pytest.raises(ValueError, match="Invalid JSON"): + load_scalar_properties(str(schema_file)) + + def test_missing_properties_key_raises_value_error(self, tmp_path): + schema_file = tmp_path / "noprops.json" + schema_file.write_text('{"title": "MySchema"}') + + with pytest.raises(ValueError, match="no 'properties'"): + load_scalar_properties(str(schema_file)) + + def test_non_object_schema_root_raises_value_error(self, tmp_path): + """A JSON array or scalar at the schema root must raise ValueError.""" + schema_file = tmp_path / "array_root.json" + schema_file.write_text('[{"type": "string"}]') + + with pytest.raises(ValueError, match="root is not a JSON object"): + load_scalar_properties(str(schema_file)) + + def test_non_dict_property_entry_is_skipped(self, tmp_path): + """Malformed property entries (non-dict) must be silently skipped.""" + schema = { + "properties": { + "valid_prop": {"type": "string"}, + "broken_prop": None, + "also_broken": "shorthand", + } + } + schema_file = tmp_path / "schema.json" + schema_file.write_text(json.dumps(schema)) + + result = load_scalar_properties(str(schema_file)) + + assert "valid_prop" in result + assert "broken_prop" not in result + assert "also_broken" not in result + + def test_excludes_named_properties(self, tmp_path): + schema = { + "properties": { + "name": {"type": "string"}, + "manufacturer": {"type": "string"}, + } + } + schema_file = tmp_path / "schema.json" + schema_file.write_text(json.dumps(schema)) + + result = load_scalar_properties(str(schema_file), exclude={"manufacturer"}) + + assert "manufacturer" not in result + assert "name" in result + + def test_skips_array_and_object_types(self, tmp_path): + schema = { + "properties": { + "tags": {"type": "array"}, + "custom_fields": {"type": "object"}, + "part_number": {"type": "string"}, + } + } + schema_file = tmp_path / "schema.json" + schema_file.write_text(json.dumps(schema)) + + result = load_scalar_properties(str(schema_file)) + + assert "tags" not in result + assert "custom_fields" not in result + assert "part_number" in result + + def test_includes_ref_and_scalar_properties(self, tmp_path): + schema = { + "properties": { + "device_type": {"$ref": "#/definitions/DeviceType"}, + "u_height": {"type": "integer"}, + "is_full_depth": {"type": "boolean"}, + } + } + schema_file = tmp_path / "schema.json" + schema_file.write_text(json.dumps(schema)) + + result = load_scalar_properties(str(schema_file)) + + assert "device_type" in result + assert "u_height" in result + assert "is_full_depth" in result + + def test_skips_property_with_no_type_and_no_ref(self, tmp_path): + """Properties with no recognisable type (no $ref, no 'type' key) are excluded.""" + schema = { + "properties": { + "weird_prop": {"description": "no type at all"}, + "part_number": {"type": "string"}, + } + } + schema_file = tmp_path / "schema.json" + schema_file.write_text(json.dumps(schema)) + + result = load_scalar_properties(str(schema_file)) + + assert "weird_prop" not in result + assert "part_number" in result + + def test_skips_anyof_and_oneof_properties(self, tmp_path): + """anyOf/oneOf properties are excluded — their resolved type is unpredictable.""" + schema = { + "properties": { + "poly_prop": {"anyOf": [{"type": "string"}, {"type": "null"}]}, + "union_prop": {"oneOf": [{"type": "integer"}, {"$ref": "#/defs/X"}]}, + "model": {"type": "string"}, + } + } + schema_file = tmp_path / "schema.json" + schema_file.write_text(json.dumps(schema)) + + result = load_scalar_properties(str(schema_file)) + + assert "poly_prop" not in result + assert "union_prop" not in result + assert "model" in result + + def test_includes_nullable_scalar_union_type(self, tmp_path): + """A 'type' list whose members are all scalar types is included.""" + schema = { + "properties": { + "weight": {"type": ["number", "null"]}, + "label": {"type": ["string", "null"]}, + } + } + schema_file = tmp_path / "schema.json" + schema_file.write_text(json.dumps(schema)) + + result = load_scalar_properties(str(schema_file)) + + assert "weight" in result + assert "label" in result + + def test_skips_type_union_containing_array_or_object(self, tmp_path): + """A 'type' list that mixes scalars with array/object is excluded.""" + schema = { + "properties": { + "mixed": {"type": ["string", "array"]}, + "obj_or_null": {"type": ["object", "null"]}, + "valid": {"type": "string"}, + } + } + schema_file = tmp_path / "schema.json" + schema_file.write_text(json.dumps(schema)) + + result = load_scalar_properties(str(schema_file)) + + assert "mixed" not in result + assert "obj_or_null" not in result + assert "valid" in result + + +class TestLoadPropertiesForType: + """Tests for load_properties_for_type().""" + + def test_returns_empty_list_on_missing_file(self): + result = load_properties_for_type("/nonexistent/path/to/schema", "devicetype") + assert result == [] + + def test_returns_empty_list_on_invalid_json(self, tmp_path): + schema_file = tmp_path / "devicetype.json" + schema_file.write_text("invalid json !!!") + + result = load_properties_for_type(str(tmp_path), "devicetype") + + assert result == [] + + def test_returns_scalar_properties_from_valid_schema(self, tmp_path): + schema = { + "properties": { + "part_number": {"type": "string"}, + "u_height": {"type": "integer"}, + "tags": {"type": "array"}, + } + } + schema_file = tmp_path / "devicetype.json" + schema_file.write_text(json.dumps(schema)) + + result = load_properties_for_type(str(tmp_path), "devicetype") + + assert "part_number" in result + assert "u_height" in result + assert "tags" not in result diff --git a/tests/test_update_failure_resolver.py b/tests/test_update_failure_resolver.py new file mode 100644 index 00000000..cb438ed1 --- /dev/null +++ b/tests/test_update_failure_resolver.py @@ -0,0 +1,367 @@ +"""Tests for ``core.update_failure_resolver``.""" + +from __future__ import annotations + +from unittest.mock import MagicMock + +import pytest + +from core.update_failure_resolver import ( + FailureKind, + classify_device_type_update_failure, +) + + +SUBDEVICE_ROLE_ERROR_DICT = { + "subdevice_role": [ + "Must delete all device bay templates associated with this device before declassifying it as a parent device." + ] +} +SUBDEVICE_ROLE_ERROR_JSON = ( + '{"subdevice_role": ["Must delete all device bay templates associated ' + 'with this device before declassifying it as a parent device."]}' +) + + +def _make_netbox(*, devices=None, device_count=None, templates=None): + """Build a minimal mock pynetbox client with .dcim.devices and .device_bay_templates.""" + nb = MagicMock() + devices = devices or [] + nb.dcim.devices.filter.return_value = devices + nb.dcim.devices.count.return_value = device_count if device_count is not None else len(devices) + nb.dcim.device_bay_templates.filter.return_value = templates or [] + return nb + + +def test_classifier_unhandled_for_unrelated_error(): + nb = _make_netbox() + res = classify_device_type_update_failure( + {"some_other_field": ["nope"]}, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + assert res.kind == FailureKind.UNHANDLED + + +def test_classifier_unhandled_for_unparseable_payload(): + nb = _make_netbox() + res = classify_device_type_update_failure( + "totally not json", + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + assert res.kind == FailureKind.UNHANDLED + + +def test_classifier_recognises_subdevice_role_error_dict_safe_path(): + """Dict payload + zero dependent devices + blocking templates -> SUBDEVICE_ROLE_FLIP, actionable.""" + t1 = MagicMock() + t1.name = "bay-1" + t2 = MagicMock() + t2.name = "bay-2" + nb = _make_netbox(templates=[t1, t2], devices=[], device_count=0) + + res = classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=42, + device_type_yaml={}, + ) + + assert res.kind == FailureKind.SUBDEVICE_ROLE_FLIP + assert res.dependent_devices_count == 0 + assert res.blocking_objects == ["bay-1", "bay-2"] + assert len(res.remediation_steps) == 2 + assert res.is_actionable is True + assert "--force-resolve-conflicts" in res.hint + + +def test_classifier_accepts_json_string_payload(): + """Pynetbox sometimes returns the body as a JSON string; classifier must handle it.""" + t = MagicMock() + t.name = "t" + nb = _make_netbox(templates=[t], devices=[], device_count=0) + res = classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_JSON, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + assert res.kind == FailureKind.SUBDEVICE_ROLE_FLIP + + +def test_classifier_blocks_auto_resolve_when_dependent_devices_exist(): + """Live devices reference the type -> MANUAL_REQUIRED, no remediation.""" + d1 = MagicMock() + d1.name = "router-1" + d2 = MagicMock() + d2.name = "router-2" + nb = _make_netbox(devices=[d1, d2], device_count=2, templates=[MagicMock()]) + + res = classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + + assert res.kind == FailureKind.MANUAL_REQUIRED + assert res.dependent_devices_count == 2 + assert res.dependent_devices_sample == ["router-1", "router-2"] + assert res.is_actionable is False + assert res.remediation_steps == [] + assert "router-1" in res.hint + + +def test_classifier_returns_inspection_hint_when_no_blocking_templates(): + """If device-bay templates list is empty, the force-resolve hint must NOT be shown. + + The PATCH error fired but there are no templates to delete (race condition + or prior run already cleaned them up). Advertising --force-resolve-conflicts + in that case would be a guaranteed no-op that confuses the operator. + """ + nb = _make_netbox(templates=[], devices=[], device_count=0) + res = classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + assert res.kind == FailureKind.SUBDEVICE_ROLE_FLIP + assert res.is_actionable is False + assert res.remediation_steps == [] + assert "--force-resolve-conflicts" not in res.hint + + +def test_classifier_blocks_auto_resolve_when_yaml_still_lists_device_bays(): + """YAML still declares device-bays -> auto-deleting them would loop; MANUAL_REQUIRED.""" + nb = _make_netbox(templates=[MagicMock()], devices=[], device_count=0) + res = classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=1, + device_type_yaml={"device-bays": [{"name": "bay-1"}]}, + ) + assert res.kind == FailureKind.MANUAL_REQUIRED + assert res.is_actionable is False + + +def test_classifier_dependent_count_unknown_blocks_resolve(): + """If the count query raises, classifier must treat the type as unsafe (MANUAL_REQUIRED).""" + nb = MagicMock() + nb.dcim.device_bay_templates.filter.return_value = [MagicMock()] + nb.dcim.devices.filter.side_effect = RuntimeError("API down") + + res = classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + assert res.kind == FailureKind.MANUAL_REQUIRED + assert res.dependent_devices_count == -1 + assert res.is_actionable is False + + +def test_classifier_remediation_step_calls_template_delete(): + """Each remediation_steps entry must, when invoked, delete the corresponding template.""" + t = MagicMock() + t.name = "bay-1" + nb = _make_netbox(templates=[t], devices=[], device_count=0) + + res = classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + assert len(res.remediation_steps) == 1 + res.remediation_steps[0]() + t.delete.assert_called_once() + + +def test_classifier_handles_bytes_payload(): + """RequestError content can be bytes; classifier must decode before parsing.""" + nb = _make_netbox(templates=[MagicMock()], devices=[], device_count=0) + res = classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_JSON.encode("utf-8"), + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + assert res.kind == FailureKind.SUBDEVICE_ROLE_FLIP + + +@pytest.mark.parametrize( + ("payload", "expected_kind"), + [ + ( + {"subdevice_role": "Must delete all device bay templates declassifying it as a parent device"}, + FailureKind.SUBDEVICE_ROLE_FLIP, + ), + ( + {"subdevice_role": ["completely unrelated message"]}, + FailureKind.UNHANDLED, + ), + ], +) +def test_classifier_marker_matching_is_strict(payload, expected_kind): + """Classifier matches only when both required markers are present in the message.""" + nb = _make_netbox(templates=[MagicMock()], devices=[], device_count=0) + res = classify_device_type_update_failure( + payload, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + assert res.kind == expected_kind + + +def test_classifier_recognises_subdevice_role_error_string_marker(): + """Plain-string payload containing both markers must classify as SUBDEVICE_ROLE_FLIP.""" + nb = _make_netbox(templates=[MagicMock()], devices=[], device_count=0) + payload = ( + "Must delete all device bay templates associated with this device before declassifying it as a parent device." + ) + res = classify_device_type_update_failure( + payload, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + assert res.kind == FailureKind.SUBDEVICE_ROLE_FLIP + + +def test_classifier_handles_non_string_msg_in_subdevice_role_list(): + """If subdevice_role list contains non-string objects, classifier degrades gracefully.""" + + class _BadObj: + def __iter__(self): + raise TypeError("not iterable as expected") + + nb = _make_netbox(templates=[MagicMock()], devices=[], device_count=0) + res = classify_device_type_update_failure( + {"subdevice_role": _BadObj()}, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + # Should not raise; falls through to UNHANDLED because the markers won't match. + assert res.kind == FailureKind.UNHANDLED + + +def test_classifier_handles_template_query_failure(): + """If listing device-bay templates raises, classifier returns MANUAL_REQUIRED. + + When the template-listing call fails, the result must distinguish + "lookup failed" from "no templates" so the operator gets a connectivity + hint rather than a misleading "inspect for residual templates" message. + """ + nb = MagicMock() + nb.dcim.device_bay_templates.filter.side_effect = RuntimeError("API down") + nb.dcim.devices.filter.return_value = [] + nb.dcim.devices.count.return_value = 0 + res = classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + # Lookup failure → MANUAL_REQUIRED with connectivity hint (not a race-condition message). + assert res.kind == FailureKind.MANUAL_REQUIRED + assert "lookup failed" in res.description + assert res.is_actionable is False + + +def test_classifier_count_query_used_when_filter_returns_full_page(): + """When filter returns 5 records (page cap), classifier must call .count() for the real total.""" + devs = [MagicMock() for i in range(5)] + for i, d in enumerate(devs): + d.name = f"router-{i}" + nb = _make_netbox(devices=devs, device_count=137, templates=[MagicMock()]) + res = classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + assert res.kind == FailureKind.MANUAL_REQUIRED + assert res.dependent_devices_count == 137 + nb.dcim.devices.count.assert_called_once() + + +def test_classifier_count_fallback_when_count_query_fails(): + """If .count() raises, classifier falls back to len(filter_results).""" + devs = [MagicMock() for _ in range(5)] + for i, d in enumerate(devs): + d.name = f"router-{i}" + nb = MagicMock() + nb.dcim.device_bay_templates.filter.return_value = [MagicMock()] + nb.dcim.devices.filter.return_value = devs + nb.dcim.devices.count.side_effect = RuntimeError("boom") + res = classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=1, + device_type_yaml={}, + ) + assert res.kind == FailureKind.MANUAL_REQUIRED + assert res.dependent_devices_count == 5 + + +def test_new_filters_uses_device_type_id_key(): + """new_filters=True must call filter(device_type_id=...) not devicetype_id=... + + This matters because NetBox >= 4.1 changed the query param name. + Passing the wrong key causes pynetbox to silently return ALL templates. + """ + nb = _make_netbox() + classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=99, + device_type_yaml={}, + new_filters=True, + ) + nb.dcim.device_bay_templates.filter.assert_called_once_with(device_type_id=99) + + +def test_old_filters_uses_devicetype_id_key(): + """new_filters=False (default) must call filter(devicetype_id=...) for NetBox < 4.1.""" + nb = _make_netbox() + classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=99, + device_type_yaml={}, + new_filters=False, + ) + nb.dcim.device_bay_templates.filter.assert_called_once_with(devicetype_id=99) + + +def test_count_dependent_devices_uses_new_filter_key(): + """When new_filters=True, dcim.devices must be queried with device_type_id= not devicetype_id=.""" + nb = _make_netbox(devices=[], device_count=0) + classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=77, + device_type_yaml={}, + new_filters=True, + ) + nb.dcim.devices.filter.assert_called_once_with(device_type_id=77, limit=5) + + +def test_count_dependent_devices_uses_legacy_filter_key(): + """When new_filters=False (default), dcim.devices must be queried with devicetype_id=.""" + nb = _make_netbox(devices=[], device_count=0) + classify_device_type_update_failure( + SUBDEVICE_ROLE_ERROR_DICT, + netbox=nb, + device_type_id=77, + device_type_yaml={}, + new_filters=False, + ) + nb.dcim.devices.filter.assert_called_once_with(devicetype_id=77, limit=5) diff --git a/uv.lock b/uv.lock index 8af65e24..37555738 100644 --- a/uv.lock +++ b/uv.lock @@ -2,6 +2,15 @@ version = 1 revision = 3 requires-python = ">=3.12" +[[package]] +name = "attrs" +version = "26.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9a/8e/82a0fe20a541c03148528be8cac2408564a6c9a0cc7e9171802bc1d26985/attrs-26.1.0.tar.gz", hash = "sha256:d03ceb89cb322a8fd706d4fb91940737b6642aa36998fe130a9bc96c985eff32", size = 952055, upload-time = "2026-03-19T14:22:25.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/b4/17d4b0b2a2dc85a6df63d1157e028ed19f90d4cd97c36717afef2bc2f395/attrs-26.1.0-py3-none-any.whl", hash = "sha256:c647aa4a12dfbad9333ca4e71fe62ddc36f4e63b2d260a37a8b83d2f043ac309", size = 67548, upload-time = "2026-03-19T14:22:23.645Z" }, +] + [[package]] name = "certifi" version = "2025.11.12" @@ -77,6 +86,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, ] +[[package]] +name = "click" +version = "8.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/63/f9e1ea081ce35720d8b92acde70daaedace594dc93b693c869e0d5910718/click-8.3.3.tar.gz", hash = "sha256:398329ad4837b2ff7cbe1dd166a4c0f8900c3ca3a218de04466f38f6497f18a2", size = 328061, upload-time = "2026-04-22T15:11:27.506Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/44/c1221527f6a71a01ec6fbad7fa78f1d50dfa02217385cf0fa3eec7087d59/click-8.3.3-py3-none-any.whl", hash = "sha256:a2bf429bb3033c89fa4936ffb35d5cb471e3719e1f3c8a7c3fff0b8314305613", size = 110502, upload-time = "2026-04-22T15:11:25.044Z" }, +] + [[package]] name = "colorama" version = "0.4.6" @@ -185,6 +206,7 @@ dependencies = [ [package.dev-dependencies] dev = [ + { name = "interrogate" }, { name = "pre-commit" }, { name = "pytest" }, { name = "pytest-cov" }, @@ -205,6 +227,7 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ + { name = "interrogate", specifier = ">=1.7.0" }, { name = "pre-commit", specifier = ">=4.6.0" }, { name = "pytest", specifier = ">=9.0.3" }, { name = "pytest-cov", specifier = ">=6.0" }, @@ -282,6 +305,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] +[[package]] +name = "interrogate" +version = "1.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "click" }, + { name = "colorama" }, + { name = "py" }, + { name = "tabulate" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8b/22/74f7fcc96280eea46cf2bcbfa1354ac31de0e60a4be6f7966f12cef20893/interrogate-1.7.0.tar.gz", hash = "sha256:a320d6ec644dfd887cc58247a345054fc4d9f981100c45184470068f4b3719b0", size = 159636, upload-time = "2024-04-07T22:30:46.217Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/c9/6869a1dcf4aaf309b9543ec070be3ec3adebee7c9bec9af8c230494134b9/interrogate-1.7.0-py3-none-any.whl", hash = "sha256:b13ff4dd8403369670e2efe684066de9fcb868ad9d7f2b4095d8112142dc9d12", size = 46982, upload-time = "2024-04-07T22:30:44.277Z" }, +] + [[package]] name = "markdown-it-py" version = "4.0.0" @@ -355,6 +394,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/80/6e/4b28b62ecb6aae56769c34a8ff1d661473ec1e9519e2d5f8b2c150086b26/pre_commit-4.6.0-py2.py3-none-any.whl", hash = "sha256:e2cf246f7299edcabcf15f9b0571fdce06058527f0a06535068a86d38089f29b", size = 226472, upload-time = "2026-04-21T20:31:40.092Z" }, ] +[[package]] +name = "py" +version = "1.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/ff/fec109ceb715d2a6b4c4a85a61af3b40c723a961e8828319fbcb15b868dc/py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719", size = 207796, upload-time = "2021-11-04T17:17:01.377Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/f0/10642828a8dfb741e5f3fbaac830550a518a775c7fff6f04a007259b0548/py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378", size = 98708, upload-time = "2021-11-04T17:17:00.152Z" }, +] + [[package]] name = "pygments" version = "2.20.0" @@ -548,6 +596,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/04/be/d09147ad1ec7934636ad912901c5fd7667e1c858e19d355237db0d0cd5e4/smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e", size = 24303, upload-time = "2025-01-02T07:14:38.724Z" }, ] +[[package]] +name = "tabulate" +version = "0.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/46/58/8c37dea7bbf769b20d58e7ace7e5edfe65b849442b00ffcdd56be88697c6/tabulate-0.10.0.tar.gz", hash = "sha256:e2cfde8f79420f6deeffdeda9aaec3b6bc5abce947655d17ac662b126e48a60d", size = 91754, upload-time = "2026-03-04T18:55:34.402Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/55/db07de81b5c630da5cbf5c7df646580ca26dfaefa593667fc6f2fe016d2e/tabulate-0.10.0-py3-none-any.whl", hash = "sha256:f0b0622e567335c8fabaaa659f1b33bcb6ddfe2e496071b743aa113f8774f2d3", size = 39814, upload-time = "2026-03-04T18:55:31.284Z" }, +] + [[package]] name = "urllib3" version = "2.6.3"