diff --git a/Snakefile b/Snakefile index 172e38af..41206e34 100644 --- a/Snakefile +++ b/Snakefile @@ -260,6 +260,16 @@ def collect_prepared_input(wildcards): return prepared_inputs +# Look up a per-algorithm container image override from config for HTCondor transfer. +# When a local .sif path is configured, it will be included in htcondor_transfer_input_files +# so the HTCondor executor transfers it to the EP alongside the job. +def get_algorithm_image(wildcards): + images = container_settings.images + override = images.get(wildcards.algorithm, "") + if override.endswith('.sif'): + return override + return None + # Run the pathway reconstruction algorithm rule reconstruct: input: collect_prepared_input @@ -269,6 +279,8 @@ rule reconstruct: # same name regardless of the inputs or parameters, and these aren't renamed until after the container command # terminates output: pathway_file = SEP.join([out_dir, '{dataset}-{algorithm}-{params}', 'raw-pathway.txt']) + resources: + htcondor_transfer_input_files=get_algorithm_image run: # Create a copy so that the updates are not written to the parameters logfile params = reconstruction_params(wildcards.algorithm, wildcards.params).copy() diff --git a/config/config.yaml b/config/config.yaml index 52ffd420..8fe053b2 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -41,6 +41,29 @@ containers: # requirements = versionGE(split(Target.CondorVersion)[1], "24.8.0") && (isenforcingdiskusage =!= true) enable_profiling: false + # Override the default container image for specific algorithms. + # Keys are algorithm names (as they appear in the algorithms list below). + # Values are interpreted based on the container framework: + # + # Image reference (e.g., "pathlinker:v3"): + # Prepends the registry prefix. Works with both Docker and Apptainer. + # + # Full image reference with registry (e.g., "ghcr.io/myorg/pathlinker:v3"): + # Used as-is (prefix NOT prepended). Works with both Docker and Apptainer. + # + # Local .sif file path (e.g., "images/pathlinker_v2.sif"): + # Apptainer/Singularity only. Skips pulling from registry and uses the + # pre-built .sif directly. When running via HTCondor with shared-fs-usage: none, + # .sif paths listed here are automatically included in htcondor_transfer_input_files. + # Ignored with a warning if the framework is Docker. + # + # Example (one of each type): + # images: + # omicsintegrator1: "images/omics-integrator-1_v2.sif" # local .sif (Apptainer only) + # pathlinker: "pathlinker:v1234" # image name only (base_url/owner prepended) + # omicsintegrator2: "some-other-owner/oi2:latest" # owner/image (base_url prepended) + # mincostflow: "ghcr.io/reed-compbio/mincostflow:v2" # full registry reference (used as-is) + # This list of algorithms should be generated by a script which checks the filesystem for installs. # It shouldn't be changed by mere mortals. (alternatively, we could add a path to executable for each algorithm # in the list to reduce the number of assumptions of the program at the cost of making the config a little more involved) diff --git a/spras/config/container_schema.py b/spras/config/container_schema.py index c4f29310..70276f63 100644 --- a/spras/config/container_schema.py +++ b/spras/config/container_schema.py @@ -7,7 +7,8 @@ """ import warnings -from dataclasses import dataclass +from dataclasses import dataclass, field +from typing import Optional from pydantic import BaseModel, ConfigDict @@ -21,6 +22,11 @@ class ContainerFramework(CaseInsensitiveEnum): apptainer = 'apptainer' dsub = 'dsub' + @property + def is_singularity_family(self) -> bool: + """True for both 'singularity' and 'apptainer', which are treated as synonyms.""" + return self in (ContainerFramework.singularity, ContainerFramework.apptainer) + class ContainerRegistry(BaseModel): base_url: str = "docker.io" "The domain of the registry" @@ -34,17 +40,20 @@ class ContainerSettings(BaseModel): framework: ContainerFramework = ContainerFramework.docker unpack_singularity: bool = False - model_config = ConfigDict(extra='forbid') enable_profiling: bool = False "A Boolean indicating whether to enable container runtime profiling (apptainer/singularity only)" registry: ContainerRegistry + images: dict[str, str] = {} + "Per-algorithm container image overrides. Keys are algorithm names; values are image references or local .sif file paths." + model_config = ConfigDict(extra='forbid', use_attribute_docstrings=True) @dataclass class ProcessedContainerSettings: framework: ContainerFramework = ContainerFramework.docker unpack_singularity: bool = False + base_url: str = "docker.io" prefix: str = DEFAULT_CONTAINER_PREFIX enable_profiling: bool = False hash_length: int = 7 @@ -57,6 +66,10 @@ class ProcessedContainerSettings: We prefer this `hash_length` in our container-running logic to avoid a (future) dependency diamond. """ + images: dict[str, str] = field(default_factory=dict) + """Per-algorithm container image overrides from config.""" + image_override: Optional[str] = None + """Resolved image override for the current algorithm. Set at runtime by runner.run().""" @staticmethod def from_container_settings(settings: ContainerSettings, hash_length: int) -> "ProcessedContainerSettings": @@ -65,18 +78,23 @@ def from_container_settings(settings: ContainerSettings, hash_length: int) -> "P container_framework = settings.framework # Unpack settings for running in singularity mode. Needed when running PRM containers if already in a container. - if settings.unpack_singularity and container_framework != "singularity": - warnings.warn("unpack_singularity is set to True, but the container framework is not singularity. This setting will have no effect.", stacklevel=2) + if settings.unpack_singularity and not container_framework.is_singularity_family: + warnings.warn("unpack_singularity is set to True, but the container framework is not singularity or apptainer. This setting will have no effect.", stacklevel=2) unpack_singularity = settings.unpack_singularity # Grab registry from the config, and if none is provided default to docker + container_base_url = "docker.io" container_prefix = DEFAULT_CONTAINER_PREFIX + if settings.registry and settings.registry.base_url != "": + container_base_url = settings.registry.base_url if settings.registry and settings.registry.base_url != "" and settings.registry.owner != "": container_prefix = settings.registry.base_url + "/" + settings.registry.owner return ProcessedContainerSettings( framework=container_framework, unpack_singularity=unpack_singularity, + base_url=container_base_url, prefix=container_prefix, - hash_length=hash_length + hash_length=hash_length, + images=dict(settings.images), ) diff --git a/spras/containers.py b/spras/containers.py index 124b9741..07def188 100644 --- a/spras/containers.py +++ b/spras/containers.py @@ -3,13 +3,14 @@ import re import subprocess import textwrap +import warnings from pathlib import Path, PurePath, PurePosixPath from typing import Iterator, List, Optional, Tuple, Union import docker import docker.errors -from spras.config.container_schema import ProcessedContainerSettings +from spras.config.container_schema import ContainerFramework, ProcessedContainerSettings from spras.logging import indent from spras.profiling import create_apptainer_container_stats, create_peer_cgroup from spras.util import hash_filename @@ -189,14 +190,48 @@ def run_container(container_suffix: str, command: List[str], volumes: List[Tuple @param network_disabled: Disables the network on the container. Only works for docker for now. This acts as a 'runtime assertion' that a container works w/o networking. @return: output from Singularity execute or Docker run """ - normalized_framework = container_settings.framework.casefold() + image_override = getattr(container_settings, 'image_override', None) + # Default: combine registry prefix with the algorithm's container suffix container = container_settings.prefix + "/" + container_suffix - if normalized_framework == 'docker': + + if image_override and image_override.endswith('.sif'): + # .sif overrides are only meaningful for apptainer/singularity; + # _resolve_singularity_image handles the actual .sif path. + # For other frameworks, warn and keep the default. + if not container_settings.framework.is_singularity_family: + warnings.warn( + f"Image override '{image_override}' is a .sif file, but the container framework is " + f"'{container_settings.framework}'. .sif overrides are only supported with " + f"apptainer/singularity. Falling back to default image.", + stacklevel=2 + ) + else: + print(f'Container image override (local .sif): {image_override}', flush=True) + elif image_override: + # Image reference override — determine how much of the URI is specified. + # Uses Docker's convention: if the first path component contains '.' or ':', + # it's a registry hostname and the reference is fully qualified. + if '/' in image_override: + first_component = image_override.split('/')[0] + if '.' in first_component or ':' in first_component: + # Full registry reference like "ghcr.io/myorg/image:tag" — use as-is + container = image_override + else: + # Owner/image like "some-other-owner/oi2:latest" — prepend base_url only + container = container_settings.base_url + "/" + image_override + else: + # Image name only like "pathlinker:v1234" — prepend full prefix (base_url/owner) + container = container_settings.prefix + "/" + image_override + print(f'Container image override: {container}', flush=True) + else: + print(f'Container image: {container}', flush=True) + + if container_settings.framework == ContainerFramework.docker: return run_container_docker(container, command, volumes, working_dir, environment, network_disabled) - elif normalized_framework == 'singularity' or normalized_framework == "apptainer": + elif container_settings.framework.is_singularity_family: return run_container_singularity(container, command, volumes, working_dir, out_dir, container_settings, environment) - elif normalized_framework == 'dsub': + elif container_settings.framework == ContainerFramework.dsub: return run_container_dsub(container, command, volumes, working_dir, environment) else: raise ValueError(f'{container_settings.framework} is not a recognized container framework. Choose "docker", "dsub", "apptainer", or "singularity".') @@ -345,6 +380,65 @@ def run_container_docker(container: str, command: List[str], volumes: List[Tuple return out +def _resolve_singularity_image(container: str, config: ProcessedContainerSettings): + """ + Determine the image that apptainer/singularity should run. + + Returns a path or URI suitable for Client.execute() or the profiling command. + The four cases are: + 1. unpack + local .sif --> unpack the .sif into a sandbox, return sandbox path + 2. unpack + registry --> pull .sif from registry, unpack into sandbox, return sandbox path + 3. local .sif, no unpack --> return the .sif path directly + 4. registry, no unpack --> return "docker://" so apptainer pulls at runtime + """ + from spython.main import Client + + image_override = getattr(config, 'image_override', None) + is_local_sif = image_override and image_override.endswith('.sif') + + if config.unpack_singularity: + unpacked_dir = Path("unpacked") + unpacked_dir.mkdir(exist_ok=True) + + if is_local_sif: + # Use pre-built .sif directly, skip pulling from registry + image_path = image_override + base_cont = Path(image_override).stem + else: + # The incoming image string is of the format //: e.g. + # hub.docker.com/reedcompbio/spras:latest + # Here we first produce a .sif image using the image name and tag (base_cont) + # and then expand that image into a sandbox directory. For example, + # hub.docker.com/reedcompbio/spras:latest --> spras_latest.sif --> ./spras_latest/ + path_elements = container.split("/") + base_cont = path_elements[-1] + base_cont = base_cont.replace(":", "_").split(":")[0] + sif_file = base_cont + ".sif" + + # Adding 'docker://' to the container indicates this is a Docker image Singularity must convert + image_path = Client.pull('docker://' + container, name=str(unpacked_dir / sif_file)) + + base_cont_path = unpacked_dir / Path(base_cont) + + # Check whether the directory for base_cont_path already exists. When running concurrent jobs, it's possible + # something else has already pulled/unpacked the container. + # Here, we expand the sif image from `image_path` to a directory indicated by `base_cont_path` + if not base_cont_path.exists(): + Client.build(recipe=image_path, image=str(base_cont_path), sandbox=True, sudo=False) + print(f'Resolved singularity image to sandbox: {base_cont_path}', flush=True) + return base_cont_path # sandbox directory + + if is_local_sif: + # Local .sif without unpacking — use directly + print(f'Resolved singularity image to local .sif: {image_override}', flush=True) + return image_override + + # No override, no unpacking — apptainer pulls and converts the Docker image at runtime + resolved = "docker://" + container + print(f'Resolved singularity image: {resolved}', flush=True) + return resolved + + def run_container_singularity(container: str, command: List[str], volumes: List[Tuple[PurePath, PurePath]], working_dir: str, out_dir: str, config: ProcessedContainerSettings, environment: Optional[dict[str, str]] = None): """ Runs a command in the container using Singularity. @@ -384,39 +478,8 @@ def run_container_singularity(container: str, command: List[str], volumes: List[ # https://docs.sylabs.io/guides/3.7/user-guide/environment_and_metadata.html#env-option singularity_options.extend(['--env', ",".join(env_to_items(environment))]) - # Handle unpacking singularity image if needed. Potentially needed for running nested unprivileged containers - expanded_image = None - if config.unpack_singularity: - # The incoming image string is of the format //: e.g. - # hub.docker.com/reedcompbio/spras:latest - # Here we first produce a .sif image using the image name and tag (base_cont) - # and then expand that image into a sandbox directory. For example, - # hub.docker.com/reedcompbio/spras:latest --> spras_latest.sif --> ./spras_latest/ - path_elements = container.split("/") - base_cont = path_elements[-1] - base_cont = base_cont.replace(":", "_").split(":")[0] - sif_file = base_cont + ".sif" - - # To allow caching unpacked singularity images without polluting git on local runs, - # we move all of the unpacked image files into a `.gitignore`d folder. - unpacked_dir = Path("unpacked") - unpacked_dir.mkdir(exist_ok=True) - - # Adding 'docker://' to the container indicates this is a Docker image Singularity must convert - image_path = Client.pull('docker://' + container, name=str(unpacked_dir / sif_file)) - - base_cont_path = unpacked_dir / Path(base_cont) - - # Check whether the directory for base_cont_path already exists. When running concurrent jobs, it's possible - # something else has already pulled/unpacked the container. - # Here, we expand the sif image from `image_path` to a directory indicated by `base_cont_path` - if not base_cont_path.exists(): - Client.build(recipe=image_path, image=str(base_cont_path), sandbox=True, sudo=False) - expanded_image = base_cont_path # This is the sandbox directory + image_to_run = _resolve_singularity_image(container, config) - # If not using the expanded sandbox image, we still need to prepend the docker:// prefix - # so apptainer knows to pull and convert the image format from docker to apptainer. - image_to_run = expanded_image if expanded_image else "docker://" + container if config.enable_profiling: # We won't end up using the spython client if profiling is enabled because # we need to run everything manually to set up the cgroup diff --git a/spras/runner.py b/spras/runner.py index fb9391f9..cc273605 100644 --- a/spras/runner.py +++ b/spras/runner.py @@ -1,3 +1,4 @@ +import copy from typing import Any, Mapping # supported algorithm imports @@ -43,9 +44,13 @@ def run(algorithm: str, inputs, output_file, args, container_settings): A generic interface to the algorithm-specific run functions """ algorithm_runner = get_algorithm(algorithm) + # Resolve per-algorithm image override so containers.py can use it + settings = copy.copy(container_settings) + if settings.images and algorithm in settings.images: + settings.image_override = settings.images[algorithm] # We can't use config.config here else we would get a cyclic dependency. # Since args is a dict here, we use the 'run_typeless' utility PRM function. - algorithm_runner.run_typeless(inputs, output_file, args, container_settings) + algorithm_runner.run_typeless(inputs, output_file, args, settings) def get_required_inputs(algorithm: str): diff --git a/test/test_config.py b/test/test_config.py index 41551c38..e56ca314 100644 --- a/test/test_config.py +++ b/test/test_config.py @@ -218,6 +218,33 @@ def test_config_container_registry(self): config.init_global(test_config) assert (config.config.container_settings.prefix == DEFAULT_CONTAINER_PREFIX) + def test_config_container_images(self): + test_config = get_test_config() + + # Default: no images key --> empty dict, image_override is None + config.init_global(test_config) + assert config.config.container_settings.images == {} + assert config.config.container_settings.image_override is None + + # Local .sif paths are stored as-is and signal HTCondor transfer via get_algorithm_image + test_config["containers"]["images"] = { + "pathlinker": "images/foo.sif", + "omicsintegrator1": "images/bar.sif", + } + config.init_global(test_config) + assert config.config.container_settings.images == { + "pathlinker": "images/foo.sif", + "omicsintegrator1": "images/bar.sif", + } + # Unconfigured algorithm → no .sif override, get_algorithm_image returns None + assert not config.config.container_settings.images.get("responsenet", "").endswith(".sif") + + # Non .sif image reference override (e.g. pinning a different tag) is stored as-is + test_config["containers"]["images"] = {"pathlinker": "pathlinker:v1234"} + config.init_global(test_config) + images = config.config.container_settings.images + assert images.get("pathlinker") == "pathlinker:v1234" + def test_error_dataset_label(self): test_config = get_test_config() error_test_dicts = [{"label": "test$"}, {"label": "@test'"}, {"label": "[test]"}, {"label": "test-test"}, diff --git a/test/test_container_image_resolution.py b/test/test_container_image_resolution.py new file mode 100644 index 00000000..a1c6b375 --- /dev/null +++ b/test/test_container_image_resolution.py @@ -0,0 +1,201 @@ +""" +Tests for the image resolution logic in containers.run_container() +and containers._resolve_singularity_image(). +We mock the framework-specific runners so no Docker/Apptainer install is needed. +""" +from pathlib import Path +from unittest.mock import patch + +import pytest + +from spras.config.container_schema import ContainerFramework, ProcessedContainerSettings + + +def make_settings(**overrides): + """Create a ProcessedContainerSettings with sensible defaults, then apply overrides. + + Accepts either ContainerFramework enum values or plain strings for 'framework'; + strings are resolved to their enum member so the property helpers work. + """ + defaults = dict( + framework=ContainerFramework.docker, + unpack_singularity=False, + base_url="docker.io", + prefix="docker.io/reedcompbio", + hash_length=7, + ) + defaults.update(overrides) + if isinstance(defaults["framework"], str): + defaults["framework"] = ContainerFramework(defaults["framework"]) + return ProcessedContainerSettings(**defaults) + + +CONTAINER_SUFFIX = "pathlinker:v2" +DUMMY_COMMAND = ["echo", "test"] +DUMMY_VOLUMES = [] +DUMMY_WORKDIR = "/workdir" +DUMMY_OUTDIR = "/output" + + +class TestRunContainerImageResolution: + """Test the 5 branches of image resolution in run_container(). + + Strategy: @patch replaces the real framework runner (run_container_docker / + run_container_singularity) with a MagicMock for the duration of the test. + The mock is injected as the last parameter of the test method (e.g. mock_docker). + After calling run_container(), we inspect mock.call_args[0][0] to read the + 'container' string that run_container() resolved and passed to the runner — + this is the value we want to assert on, without needing Docker or Apptainer. + """ + + @patch("spras.containers.run_container_docker", return_value="ok") + def test_no_override_uses_default(self, mock_docker): + """No image_override combines prefix/suffix using defaults.""" + settings = make_settings(framework="docker") + from spras.containers import run_container + + run_container(CONTAINER_SUFFIX, DUMMY_COMMAND, DUMMY_VOLUMES, DUMMY_WORKDIR, DUMMY_OUTDIR, settings) + # call_args[0] is the positional args tuple; [0] is the first arg (the container image string) + container_arg = mock_docker.call_args[0][0] + assert container_arg == "docker.io/reedcompbio/pathlinker:v2" + + @patch("spras.containers.run_container_docker", return_value="ok") + def test_suffix_override_prepends_prefix(self, mock_docker): + """Suffix-style override like 'pathlinker:v1234' --> prefix/override.""" + settings = make_settings(framework="docker", image_override="pathlinker:v1234") + from spras.containers import run_container + + run_container(CONTAINER_SUFFIX, DUMMY_COMMAND, DUMMY_VOLUMES, DUMMY_WORKDIR, DUMMY_OUTDIR, settings) + container_arg = mock_docker.call_args[0][0] + assert container_arg == "docker.io/reedcompbio/pathlinker:v1234" + + @patch("spras.containers.run_container_docker", return_value="ok") + def test_full_registry_override_used_as_is(self, mock_docker): + """Full registry ref like 'ghcr.io/myorg/image:tag' --> used verbatim.""" + settings = make_settings(framework="docker", image_override="ghcr.io/myorg/pathlinker:v1234") + from spras.containers import run_container + + run_container(CONTAINER_SUFFIX, DUMMY_COMMAND, DUMMY_VOLUMES, DUMMY_WORKDIR, DUMMY_OUTDIR, settings) + container_arg = mock_docker.call_args[0][0] + assert container_arg == "ghcr.io/myorg/pathlinker:v1234" + + @patch("spras.containers.run_container_docker", return_value="ok") + def test_owner_image_override_prepends_base_url(self, mock_docker): + """Owner/image override like 'someowner/oi2:latest' --> prepend base_url only.""" + settings = make_settings(framework="docker", image_override="someowner/oi2:latest") + from spras.containers import run_container + + run_container(CONTAINER_SUFFIX, DUMMY_COMMAND, DUMMY_VOLUMES, DUMMY_WORKDIR, DUMMY_OUTDIR, settings) + container_arg = mock_docker.call_args[0][0] + assert container_arg == "docker.io/someowner/oi2:latest" + + @patch("spras.containers.run_container_docker", return_value="ok") + def test_sif_override_with_docker_warns_and_falls_back(self, mock_docker): + """.sif override + docker framework --> warning + fallback to default image.""" + settings = make_settings(framework="docker", image_override="images/pathlinker_v2.sif") + from spras.containers import run_container + + with pytest.warns(UserWarning, match=r"\.sif file.*apptainer/singularity"): + run_container(CONTAINER_SUFFIX, DUMMY_COMMAND, DUMMY_VOLUMES, DUMMY_WORKDIR, DUMMY_OUTDIR, settings) + container_arg = mock_docker.call_args[0][0] + assert container_arg == "docker.io/reedcompbio/pathlinker:v2" + + @patch("spras.containers.run_container_singularity", return_value="ok") + def test_sif_override_with_singularity_passes_through(self, mock_singularity): + """.sif override + singularity --> container arg is still prefix/suffix (the .sif is + resolved inside run_container_singularity via config.image_override).""" + settings = make_settings(framework="singularity", image_override="images/pathlinker_v1234.sif") + from spras.containers import run_container + + run_container(CONTAINER_SUFFIX, DUMMY_COMMAND, DUMMY_VOLUMES, DUMMY_WORKDIR, DUMMY_OUTDIR, settings) + container_arg = mock_singularity.call_args[0][0] + # The actual .sif is used inside run_container_singularity; run_container itself + # passes the default container name and lets the inner function handle the override. + assert container_arg == "docker.io/reedcompbio/pathlinker:v2" + + +class TestResolveSingularityImage: + """Test _resolve_singularity_image(), the helper that determines which image + apptainer/singularity should run. + + Cases 1 and 2 (unpack_singularity=False) need no mocks at all. + Cases 3 and 4 (unpack_singularity=True) mock spython.main.Client so no + real apptainer installation is required. + """ + + def test_no_override_no_unpack_returns_docker_uri(self): + """Case 1: no override, no unpack --> 'docker://'.""" + settings = make_settings(framework="singularity", unpack_singularity=False) + from spras.containers import _resolve_singularity_image + + result = _resolve_singularity_image("docker.io/reedcompbio/pathlinker:v2", settings) + assert result == "docker://docker.io/reedcompbio/pathlinker:v2" + + def test_local_sif_no_unpack_returns_sif_path(self): + """Case 2: local .sif, no unpack --> returns the .sif path directly.""" + settings = make_settings(framework="singularity", unpack_singularity=False, + image_override="images/pathlinker_v2.sif") + from spras.containers import _resolve_singularity_image + + result = _resolve_singularity_image("docker.io/reedcompbio/pathlinker:v2", settings) + assert result == "images/pathlinker_v2.sif" + + @patch("spython.main.Client") + def test_local_sif_with_unpack_builds_sandbox(self, mock_client, tmp_path, monkeypatch): + """Case 3: local .sif + unpack --> skips pull, calls Client.build, returns sandbox path.""" + # Run in tmp_path so the 'unpacked' directory is created there + monkeypatch.chdir(tmp_path) + + settings = make_settings(framework="singularity", unpack_singularity=True, + image_override="images/pathlinker_v2.sif") + from spras.containers import _resolve_singularity_image + + result = _resolve_singularity_image("docker.io/reedcompbio/pathlinker:v2", settings) + + # Should NOT have called Client.pull (the .sif is already local) + mock_client.pull.assert_not_called() + # Should have called Client.build to unpack the .sif into a sandbox + mock_client.build.assert_called_once() + build_kwargs = mock_client.build.call_args + assert build_kwargs[1]["recipe"] == "images/pathlinker_v2.sif" + assert build_kwargs[1]["sandbox"] is True + # Return value is the sandbox directory: unpacked/ + assert result == Path("unpacked") / "pathlinker_v2" + + @patch("spython.main.Client") + def test_registry_with_unpack_pulls_and_builds(self, mock_client, tmp_path, monkeypatch): + """Case 4: registry + unpack --> calls Client.pull then Client.build, returns sandbox path.""" + monkeypatch.chdir(tmp_path) + mock_client.pull.return_value = str(tmp_path / "unpacked" / "pathlinker_v2.sif") + + settings = make_settings(framework="singularity", unpack_singularity=True) + from spras.containers import _resolve_singularity_image + + result = _resolve_singularity_image("docker.io/reedcompbio/pathlinker:v2", settings) + + # Should have pulled the docker image + mock_client.pull.assert_called_once() + pull_args = mock_client.pull.call_args + assert pull_args[0][0] == "docker://docker.io/reedcompbio/pathlinker:v2" + # Should have unpacked into sandbox + mock_client.build.assert_called_once() + assert mock_client.build.call_args[1]["sandbox"] is True + # Return value: unpacked/ with colon replaced by underscore + assert result == Path("unpacked") / "pathlinker_v2" + + @patch("spython.main.Client") + def test_unpack_skips_build_if_sandbox_exists(self, mock_client, tmp_path, monkeypatch): + """If the sandbox directory already exists (e.g. from a concurrent job), skip Client.build.""" + monkeypatch.chdir(tmp_path) + # Pre-create the sandbox directory + (tmp_path / "unpacked" / "pathlinker_v2").mkdir(parents=True) + + settings = make_settings(framework="singularity", unpack_singularity=True, + image_override="images/pathlinker_v2.sif") + from spras.containers import _resolve_singularity_image + + result = _resolve_singularity_image("docker.io/reedcompbio/pathlinker:v2", settings) + + mock_client.pull.assert_not_called() + mock_client.build.assert_not_called() + assert result == Path("unpacked") / "pathlinker_v2"