diff --git a/changelog/68858.fixed.md b/changelog/68858.fixed.md new file mode 100644 index 000000000000..1abf62923488 --- /dev/null +++ b/changelog/68858.fixed.md @@ -0,0 +1 @@ +Fixed source package builds (DEB/RPM) failing with ``LookupError: hatchling is already being built`` by adding ``hatchling`` to the ``--only-binary`` allow-list so pip uses its universal wheel instead of attempting a circular source build. diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 27cf2ba78076..9b0c5b5c3aab 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -2288,6 +2288,14 @@ def prepend_root_dir(opts, path_options): def insert_system_path(opts, paths): """ Inserts path into python path taking into consideration 'root_dir' option. + + Paths are appended rather than prepended so that stdlib modules are never + shadowed by extension module directories (e.g. extmods/utils/). In Python + 3.14+ the ``forkserver`` start method spawns child processes with a fresh + interpreter and passes the parent's ``sys.path`` via preparation_data. If + an extmods directory sits before the stdlib entries it can accidentally + shadow stdlib modules (e.g. ``platform``, ``functools``), triggering + circular imports that crash the child. """ if isinstance(paths, str): paths = [paths] @@ -2295,7 +2303,7 @@ def insert_system_path(opts, paths): path_options = {"path": path, "root_dir": opts["root_dir"]} prepend_root_dir(path_options, path_options) if os.path.isdir(path_options["path"]) and path_options["path"] not in sys.path: - sys.path.insert(0, path_options["path"]) + sys.path.append(path_options["path"]) def minion_config( diff --git a/salt/modules/localemod.py b/salt/modules/localemod.py index 636f6d0db973..a4703978637e 100644 --- a/salt/modules/localemod.py +++ b/salt/modules/localemod.py @@ -107,6 +107,9 @@ def _localectl_set(locale=""): """ Use systemd's localectl command to set the LANG locale parameter, making sure not to trample on other params that have been set. + + Falls back to writing /etc/locale.conf directly when localectl set-locale + fails (e.g., when systemd-localed is not running in a container). """ locale_params = ( _parse_dbus_locale() @@ -115,9 +118,24 @@ def _localectl_set(locale=""): ) locale_params["LANG"] = str(locale) args = " ".join([f'{k}="{v}"' for k, v in locale_params.items() if v is not None]) - return not __salt__["cmd.retcode"]( - f"localectl set-locale {args}", python_shell=False + if not __salt__["cmd.retcode"](f"localectl set-locale {args}", python_shell=False): + return True + + # localectl set-locale failed (e.g., systemd-localed is not running in a + # container environment where D-Bus write access is unavailable). Write + # /etc/locale.conf directly; modern localectl status reads from that file + # without D-Bus, so get_locale() will see the change immediately. + log.debug("localectl set-locale failed; writing /etc/locale.conf directly") + locale_conf = "/etc/locale.conf" + if not __salt__["file.file_exists"](locale_conf): + __salt__["file.touch"](locale_conf) + __salt__["file.replace"]( + locale_conf, + "^LANG=.*", + f"LANG={locale}", + append_if_not_found=True, ) + return True def list_avail(): diff --git a/salt/modules/network.py b/salt/modules/network.py index 3411144fb279..8fd4d9bbffb7 100644 --- a/salt/modules/network.py +++ b/salt/modules/network.py @@ -937,7 +937,17 @@ def traceroute(host): """ ret = [] cmd = "traceroute {}".format(__utils__["network.sanitize_host"](host)) - out = __salt__["cmd.run"](cmd) + # Bound the wall-clock time so callers aren't blocked indefinitely when + # every hop times out (30 hops × 3 probes × 5 s = 450 s by default). + # 120 s is enough for a well-routed destination and still returns partial + # results (already-seen hops) for unreachable destinations. + out = __salt__["cmd.run"](cmd, timeout=120) + + # When cmd.run hits its timeout it returns the exception message as stdout + # rather than actual traceroute output. Detect that and bail early so the + # parser below doesn't try to interpret the error string as hop data. + if "Timed out after" in out: + return ret # Parse version of traceroute if __utils__["platform.is_sunos"]() or __utils__["platform.is_aix"](): @@ -1041,7 +1051,7 @@ def traceroute(host): # Parse anything else else: comps = line.split() - if len(comps) >= 8: + if len(comps) >= 9: result = { "count": comps[0], "hostname": comps[1], diff --git a/salt/transport/zeromq.py b/salt/transport/zeromq.py index be1d77d6db9c..8f5d65a07e2d 100644 --- a/salt/transport/zeromq.py +++ b/salt/transport/zeromq.py @@ -677,8 +677,13 @@ def _send_recv(self, socket, _TimeoutError=salt.ext.tornado.gen.TimeoutError): log.trace( "The request ended with an error while sending. reconnecting." ) + # Only reconnect if the client is still active. If close() was + # already called externally (context is None), do not create a + # new socket/context that would never be cleaned up. + _should_reconnect = self.context is not None self.close() - self.connect() + if _should_reconnect: + self.connect() send_recv_running = False break @@ -724,8 +729,13 @@ def _send_recv(self, socket, _TimeoutError=salt.ext.tornado.gen.TimeoutError): ) else: log.trace("The request ended with an error. reconnecting.") + # Only reconnect if the client is still active. If close() was + # already called externally (context is None), do not create a + # new socket/context that would never be cleaned up. + _should_reconnect = self.context is not None self.close() - self.connect() + if _should_reconnect: + self.connect() send_recv_running = False elif received: data = salt.payload.loads(recv) diff --git a/salt/utils/platform.py b/salt/utils/platform.py index 59a04b451bcd..5a39bee82ee1 100644 --- a/salt/utils/platform.py +++ b/salt/utils/platform.py @@ -3,6 +3,7 @@ """ import contextlib +import functools import multiprocessing import os import platform @@ -11,7 +12,36 @@ import distro -from salt.utils.decorators import memoize as real_memoize + +# Use a local wraps-based memoize rather than importing from salt.utils.decorators. +# This module is synced to the remote's extmods/utils/platform.py, and in +# Python 3.14+ (forkserver default start method) it can be accidentally +# imported as the stdlib ``platform`` module when extmods/utils/ sits at +# sys.path[0]. Importing from salt.utils.decorators in that context +# creates a circular import: +# salt.utils.decorators → salt.utils.versions → salt.version +# → import platform (ourselves!) → salt.utils.decorators (cycle) +# functools is part of the stdlib and has no such dependency. +# +# We cannot use functools.cache/lru_cache directly as the decorator because +# those produce functools._lru_cache_wrapper objects which fail +# inspect.isfunction(), causing the Salt loader to skip them when loading +# salt.utils.platform as a utils module (salt/loader/lazy.py line ~1109). +def real_memoize(func): + """Cache the result of a zero-or-more-argument function (stdlib-only, loader-safe).""" + cache = {} + _sentinel = object() + + @functools.wraps(func) + def _wrapper(*args, **kwargs): + key = (args, tuple(sorted(kwargs.items()))) + result = cache.get(key, _sentinel) + if result is _sentinel: + result = func(*args, **kwargs) + cache[key] = result + return result + + return _wrapper def linux_distribution(full_distribution_name=True): @@ -237,12 +267,20 @@ def is_aarch64(): def spawning_platform(): """ - Returns True if multiprocessing.get_start_method(allow_none=False) returns "spawn" + Returns True if the multiprocessing start method requires pickling to transfer + process state to the child. This is the case for both "spawn" and "forkserver". + + "spawn" is the default on Windows (Python >= 3.4) and macOS (Python >= 3.8). + Salt forces macOS to spawning by default on all Python versions. - This is the default for Windows Python >= 3.4 and macOS on Python >= 3.8. - Salt, however, will force macOS to spawning by default on all python versions + "forkserver" became the Linux default in Python 3.14 (via PEP 741). Like + "spawn", it transfers the Process object to the child via pickle rather than + inheriting it through a plain fork of the parent process. Salt must therefore + treat it identically: capture *args/**kwargs in __new__ so that __getstate__ + can reconstruct the object on the other side, and skip parent-inherited + logging teardown since the child starts with a clean file-descriptor table. """ - return multiprocessing.get_start_method(allow_none=False) == "spawn" + return multiprocessing.get_start_method(allow_none=False) in ("spawn", "forkserver") def get_machine_identifier(): diff --git a/tests/integration/modules/test_localemod.py b/tests/integration/modules/test_localemod.py index 407a459794f8..597308236e43 100644 --- a/tests/integration/modules/test_localemod.py +++ b/tests/integration/modules/test_localemod.py @@ -11,8 +11,16 @@ def _check_systemctl(): if not salt.utils.platform.is_linux(): _check_systemctl.memo = False else: - proc = subprocess.run(["localectl"], capture_output=True, check=False) - _check_systemctl.memo = b"No such file or directory" in proc.stderr + try: + proc = subprocess.run(["localectl"], capture_output=True, check=False) + _check_systemctl.memo = ( + b"No such file or directory" in proc.stderr + or b"Connection refused" in proc.stderr + or b"Failed to connect to bus" in proc.stderr + or b"Failed to get D-Bus connection" in proc.stderr + ) + except FileNotFoundError: + _check_systemctl.memo = True return _check_systemctl.memo diff --git a/tests/pytests/functional/modules/test_network.py b/tests/pytests/functional/modules/test_network.py index a05006bccd7b..7a959d3977e1 100644 --- a/tests/pytests/functional/modules/test_network.py +++ b/tests/pytests/functional/modules/test_network.py @@ -57,6 +57,7 @@ def test_network_netstat(network): @pytest.mark.skip_if_binaries_missing("traceroute") @pytest.mark.slow_test +@pytest.mark.timeout(150) def test_network_traceroute(network, url): """ network.traceroute diff --git a/tests/pytests/unit/loader/test_grains_cleanup.py b/tests/pytests/unit/loader/test_grains_cleanup.py index 9ae318b9eb69..f6d501ac4824 100644 --- a/tests/pytests/unit/loader/test_grains_cleanup.py +++ b/tests/pytests/unit/loader/test_grains_cleanup.py @@ -266,6 +266,17 @@ def test_clean_modules_removes_from_sys_modules(minion_opts): f"{loaded_base_name}.ext.{tag}", } + # Prefixes for modules that belong specifically to this loader's tag. + # clean_modules() only removes modules under these prefixes, so we only + # check these prefixes — not ALL salt.loaded.* modules. Checking the + # broader namespace would make the test sensitive to modules loaded by + # other tests that ran in the same process (e.g. salt.loaded.int.modules.* + # from execution-module unit tests). + tag_prefixes = ( + f"{loaded_base_name}.int.{tag}.", + f"{loaded_base_name}.ext.{tag}.", + ) + # Load some modules for key in list(loader.keys())[:5]: try: @@ -273,34 +284,23 @@ def test_clean_modules_removes_from_sys_modules(minion_opts): except Exception: # pylint: disable=broad-except pass - # Find modules that were loaded - loaded_before = [m for m in sys.modules if m.startswith(loaded_base_name)] + # Find tag-specific modules that were loaded + loaded_before = [ + m for m in sys.modules if any(m.startswith(p) for p in tag_prefixes) + ] assert len(loaded_before) > 0, "No modules were loaded for testing" # Clean modules loader.clean_modules() - # Verify actual loaded modules are removed but base stubs remain - remaining = [m for m in sys.modules if m.startswith(loaded_base_name)] - - # All remaining modules should be base stubs or utils modules (shared infrastructure) - # Filter out both base stubs and utils modules - unexpected = [] - for m in remaining: - # Skip base stubs - if m in expected_base_stubs: - continue - # Skip utils modules (shared infrastructure) - parts = m.split(".") - # Utils modules: salt.loaded.int.utils, salt.loaded.int.utils.*, etc. - if len(parts) >= 4 and parts[3] == "utils": - continue - # Anything else is unexpected - unexpected.append(m) + # All tag-specific modules should have been removed + remaining_tag = [ + m for m in sys.modules if any(m.startswith(p) for p in tag_prefixes) + ] assert ( - len(unexpected) == 0 - ), f"clean_modules() failed to remove {len(unexpected)} modules: {unexpected}" + len(remaining_tag) == 0 + ), f"clean_modules() failed to remove {len(remaining_tag)} modules: {remaining_tag}" # Base stubs should still be present for stub in expected_base_stubs: diff --git a/tests/pytests/unit/utils/test_platform.py b/tests/pytests/unit/utils/test_platform.py index 2d9c74b23987..70dafc24957c 100644 --- a/tests/pytests/unit/utils/test_platform.py +++ b/tests/pytests/unit/utils/test_platform.py @@ -1,3 +1,4 @@ +import multiprocessing import subprocess import salt.utils.platform @@ -45,3 +46,33 @@ def test_linux_distribution(): distro_version, distro_codename, ) + + +def test_spawning_platform_spawn(): + """ + spawning_platform() must return True when the multiprocessing start method + is "spawn" (Windows default, macOS default on Python >= 3.8). + """ + with patch.object(multiprocessing, "get_start_method", return_value="spawn"): + assert salt.utils.platform.spawning_platform() is True + + +def test_spawning_platform_forkserver(): + """ + spawning_platform() must return True when the multiprocessing start method + is "forkserver". Like "spawn", forkserver transfers the Process object to + the child via pickle, so Salt must prepare __getstate__/__setstate__ for it. + This is the Linux default starting with Python 3.14. + """ + with patch.object(multiprocessing, "get_start_method", return_value="forkserver"): + assert salt.utils.platform.spawning_platform() is True + + +def test_spawning_platform_fork(): + """ + spawning_platform() must return False when the multiprocessing start method + is "fork" (Linux default on Python < 3.14). Fork inherits process state + directly, so pickling is not required. + """ + with patch.object(multiprocessing, "get_start_method", return_value="fork"): + assert salt.utils.platform.spawning_platform() is False diff --git a/tools/pkg/build.py b/tools/pkg/build.py index 3bf51e7a300f..886ea70a6667 100644 --- a/tools/pkg/build.py +++ b/tools/pkg/build.py @@ -560,7 +560,7 @@ def onedir_dependencies( "-v", "--use-pep517", "--no-cache-dir", - "--only-binary=maturin,apache-libcloud,pymssql", + "--only-binary=maturin,apache-libcloud,pymssql,hatchling", ] if platform == "windows": python_bin = env_scripts_dir / "python" @@ -568,7 +568,9 @@ def onedir_dependencies( env["RELENV_BUILDENV"] = "1" python_bin = env_scripts_dir / "python3" install_args.append("--no-binary=:all:") - install_args.append("--only-binary=maturin,apache-libcloud,pymssql") + install_args.append( + "--only-binary=maturin,apache-libcloud,pymssql,cassandra-driver,hatchling" + ) # Cryptography needs openssl dir set to link to the proper openssl libs. if platform == "macos":