diff --git a/.gitignore b/.gitignore index 1e972cd884..3a581e2a3d 100644 --- a/.gitignore +++ b/.gitignore @@ -135,5 +135,6 @@ htmlcov/ .idea .vscode -# cProfile output +# profile outputs *.prof +pytest_profile_stats.txt diff --git a/docs/development/usage.rst b/docs/development/usage.rst index 37a706fc98..2edcea4dfb 100644 --- a/docs/development/usage.rst +++ b/docs/development/usage.rst @@ -67,6 +67,9 @@ There are a range of handy development functions that you might want to use to s * - Running ``pytest`` commands inside the ``poetry`` environment. - Make sure you have already installed ``tidy3d`` in ``poetry`` and you are in the root directory. - ``poetry run pytest`` + * - Analyze slow ``pytest`` runs with durations / cProfile / debug subset helpers. + - Use ``--debug`` to run only the first N collected tests or ``--profile`` to capture call stacks. + - ``python scripts/profile_pytest.py [options]`` * - Run ``coverage`` testing from the ``poetry`` environment. - - ``poetry run coverage run -m pytest`` @@ -84,4 +87,3 @@ There are a range of handy development functions that you might want to use to s - ``poetry run tidy3d develop replace-in-files`` - diff --git a/pyproject.toml b/pyproject.toml index 6c34a48e93..8a2177151a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,9 +309,11 @@ banned-module-level-imports = ["scipy", "matplotlib"] [tool.pytest.ini_options] # TODO: remove --assert=plain when https://github.com/scipy/scipy/issues/22236 is resolved -addopts = "--cov=tidy3d --doctest-modules -n auto --dist worksteal --assert=plain -m 'not numerical'" +addopts = "--cov=tidy3d --doctest-modules -n auto --dist worksteal --assert=plain -m 'not numerical and not perf'" markers = [ "numerical: marks numerical tests for adjoint gradients that require running simulations (deselect with '-m \"not numerical\"')", + "perf: marks tests which test the runtime of operations (deselect with '-m \"not perf\"')", + "slow: marks tests as slow (deselect with -m 'not slow')", ] env = ["MPLBACKEND=Agg", "OMP_NUM_THREADS=1", "TIDY3D_MICROWAVE__SUPPRESS_RF_LICENSE_WARNING=true"] doctest_optionflags = "NORMALIZE_WHITESPACE ELLIPSIS" diff --git a/scripts/profile_pytest.py b/scripts/profile_pytest.py new file mode 100755 index 0000000000..ddbb642018 --- /dev/null +++ b/scripts/profile_pytest.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python3 +"""Helper utilities for profiling ``pytest`` runs inside the Poetry env. + +This script can: +* run the full test suite (default) while surfacing the slowest tests via ``--durations``; +* run in "debug" mode to execute only the first N collected tests; and +* wrap ``pytest`` in ``cProfile`` to identify the most expensive function calls. + +Examples:: + + python scripts/profile_pytest.py # full suite with slowest 25 tests listed + python scripts/profile_pytest.py --debug --debug-limit 10 + python scripts/profile_pytest.py --profile --profile-output results.prof + python scripts/profile_pytest.py -t tests/test_components/test_scene.py \ + --pytest-args "-k basic" + +Forward any additional `pytest` CLI flags via ``--pytest-args"...`` and provide +explicit test targets with ``-t/--tests`` (defaults to the entire ``tests`` dir). +""" + +from __future__ import annotations + +import argparse +import re +import shlex +import shutil +import subprocess +import sys +from collections import defaultdict +from collections.abc import Iterable +from pathlib import Path + +try: + import pstats +except ImportError as exc: # pragma: no cover - stdlib module should exist + raise SystemExit("pstats from the standard library is required") from exc + +DURATION_LINE_RE = re.compile(r"^\s*(?P\d+(?:\.\d+)?)s\s+\w+\s+(?P\S+)\s*$") + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Profile pytest executions launched via Poetry.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "--debug", + action="store_true", + help="Run only a subset of collected tests (see --debug-limit).", + ) + parser.add_argument( + "--list-limit", + type=int, + default=30, + help="How many entries to show in aggregated duration summaries (set 0 for all).", + ) + parser.add_argument( + "--debug-limit", + type=int, + default=25, + help="Number of test node ids to execute when --debug is enabled.", + ) + parser.add_argument( + "--durations", + type=int, + default=0, + help="Pass-through value for pytest's --durations flag (use 0 for all tests).", + ) + parser.add_argument( + "--profile", + action="store_true", + help="Wrap pytest in cProfile and display the heaviest call sites afterward.", + ) + parser.add_argument( + "--profile-output", + default="results.prof", + help="Where to write the binary cProfile stats (used when --profile is set).", + ) + parser.add_argument( + "--profile-top", + type=int, + default=30, + help="How many rows of aggregated profile data to print.", + ) + parser.add_argument( + "--profile-sort", + choices=["cumulative", "tottime", "calls", "time"], + default="cumulative", + help="Sort order for the profile summary table.", + ) + parser.add_argument( + "-t", + "--tests", + action="append", + dest="tests", + metavar="PATH_OR_NODE", + help="Explicit pytest targets. Repeatable.", + ) + parser.add_argument( + "--pytest-args", + default="", + help="Extra pytest CLI args as a quoted string (e.g. '--maxfail=1 -k smoke').", + ) + return parser.parse_args() + + +def ensure_poetry_available() -> None: + if shutil.which("poetry") is None: + raise SystemExit("'poetry' command not found in PATH.") + + +def build_pytest_base(profile: bool, profile_output: Path) -> list[str]: + base_cmd = ["poetry", "run"] + if profile: + base_cmd += [ + "python", + "-m", + "cProfile", + "-o", + str(profile_output.resolve()), + "-m", + "pytest", + ] + else: + base_cmd.append("pytest") + return base_cmd + + +def collect_node_ids(extra_args: Iterable[str], tests: Iterable[str]) -> list[str]: + cmd = ["poetry", "run", "pytest", "--collect-only", "-q"] + cmd.extend(extra_args) + cmd.extend(tests) + print(f"Collecting tests via: {' '.join(shlex.quote(part) for part in cmd)}") + result = subprocess.run(cmd, capture_output=True, text=True, check=False) + sys.stdout.write(result.stdout) + sys.stderr.write(result.stderr) + if result.returncode != 0: + raise SystemExit(result.returncode) + + node_ids: list[str] = [] + for line in result.stdout.splitlines(): + stripped = line.strip() + if not stripped or stripped.startswith(("<", "collected ")): + continue + node_ids.append(stripped) + if not node_ids: + raise SystemExit("No tests collected; check your --tests / --pytest-args filters.") + return node_ids + + +def summarize_profile(stats_path: Path, sort: str, top: int) -> None: + if not stats_path.exists(): + print(f"Profile file {stats_path} not found; skipping summary.") + return + stats = pstats.Stats(str(stats_path)) + stats.sort_stats(sort) + print("\nTop profiled call sites (via cProfile):") + stats.print_stats(top) + + +def extract_durations_from_output(output: str) -> list[tuple[float, str]]: + """Parse pytest --durations lines from stdout.""" + + durations: list[tuple[float, str]] = [] + for line in output.splitlines(): + match = DURATION_LINE_RE.match(line) + if not match: + continue + secs = float(match.group("secs")) + nodeid = match.group("nodeid") + durations.append((secs, nodeid)) + return durations + + +def print_aggregated_durations( + durations: list[tuple[float, str]], + list_limit: int, +) -> None: + """Print durations aggregated by file and by test (collapsing parametrizations).""" + + if not durations: + print("\n[durations] no --durations lines found in pytest output.") + return + + by_file: dict[str, float] = defaultdict(float) + by_test: dict[str, float] = defaultdict(float) + + for secs, nodeid in durations: + base = nodeid.split("[", 1)[0] + file_name = base.split("::", 1)[0] + by_file[file_name] += secs + by_test[base] += secs + + def _print_section(title: str, mapping: dict[str, float]) -> None: + print(f"\nAggregated durations ({title}):") + items = sorted(mapping.items(), key=lambda kv: kv[1], reverse=True) + if list_limit > 0: + items = items[:list_limit] + for name, total in items: + print(f"{total:8.02f}s {name}") + + _print_section("by file", by_file) + _print_section("by test (parametrizations combined)", by_test) + + +def truncate_pytest_durations_output(output: str, limit: int) -> str: + """Keep pytest's duration section header, but show only the top `limit` lines.""" + lines = output.splitlines() + out_lines = [] + in_durations_section = False + kept = 0 + + for line in lines: + if "slowest" in line and "durations" in line: + in_durations_section = True + kept = 0 + out_lines.append(line) + continue + + if in_durations_section: + # Stop after we've shown N durations or reached next blank section + if not line.strip(): + in_durations_section = False + elif kept >= limit: + continue + else: + kept += 1 + + out_lines.append(line) + return "\n".join(out_lines) + + +def export_to_file(result, args, filtered_stdout, durations): + sys.stdout.write(filtered_stdout) + sys.stderr.write(result.stderr) + + # Write the filtered output to a file as well + output_file = "pytest_profile_stats.txt" + results_path = Path(output_file) + results_path.write_text(filtered_stdout) + + if durations: + print_aggregated_durations(durations, args.list_limit) + + with results_path.open("a") as f: + f.write("\n\n[Aggregated Durations]\n") + for secs, nodeid in durations: + f.write(f"{secs:.2f}s {nodeid}\n") + print(f"Stats were written to {output_file}") + + +def main() -> int: + args = parse_args() + ensure_poetry_available() + + if args.debug and args.debug_limit <= 0: + raise SystemExit("--debug-limit must be a positive integer.") + + tests = args.tests or ["tests"] + extra_args = shlex.split(args.pytest_args) + + # Handle debug collection (collect-only) + if args.debug: + collected = collect_node_ids(extra_args, tests) + pytest_targets = collected[: args.debug_limit] + print(f"\nDebug mode: running the first {len(pytest_targets)} collected test(s).") + else: + pytest_targets = tests + + # Build the full pytest command + base_cmd = build_pytest_base(args.profile, Path(args.profile_output)) + pytest_cmd = base_cmd + extra_args + if args.durations is not None: + pytest_cmd.append(f"--durations={args.durations}") + pytest_cmd.extend(pytest_targets) + + print(f"\nExecuting: {' '.join(shlex.quote(part) for part in pytest_cmd)}\n") + + # Run pytest + result = subprocess.run( + pytest_cmd, + check=False, + text=True, + capture_output=True, + ) + + # Extract and truncate outputs + filtered_stdout = truncate_pytest_durations_output(result.stdout, args.list_limit) + durations = extract_durations_from_output(result.stdout) if args.durations is not None else [] + + # Print once and export + export_to_file(result, args, filtered_stdout, durations) + + # Profile summary (if enabled) + if args.profile and result.returncode == 0: + summarize_profile(Path(args.profile_output), args.profile_sort, args.profile_top) + + return result.returncode + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/tests/test_components/autograd/test_autograd.py b/tests/test_components/autograd/test_autograd.py index b56a6345cb..98a7fb2dc7 100644 --- a/tests/test_components/autograd/test_autograd.py +++ b/tests/test_components/autograd/test_autograd.py @@ -49,8 +49,6 @@ TEST_CUSTOM_MEDIUM_SPEED = False TEST_POLYSLAB_SPEED = False -# whether to run numerical gradient tests, off by default because it runs real simulations -RUN_NUMERICAL = False _NUMERICAL_COMBINATION = ("polyslab", "mode") TEST_MODES = ("pipeline", "adjoint", "speed") @@ -242,7 +240,6 @@ def emulated_run_fwd(simulation, task_name, **run_kwargs) -> td.SimulationData: def emulated_run_bwd(simulation, task_name, **run_kwargs) -> td.SimulationData: """What gets called instead of ``web/api/autograd/autograd.py::_run_tidy3d_bwd``.""" - task_name_fwd = "".join(task_name.partition("_adjoint")[:-2]) # run the adjoint sim @@ -616,8 +613,8 @@ def field_vol_postprocess_fn(sim_data, mnt_data): for _, val in mnt_data.field_components.items(): value = value + abs(anp.sum(val.values)) intensity = anp.nan_to_num(anp.sum(sim_data.get_intensity(mnt_data.monitor.name).values)) - value += intensity - value += anp.sum(mnt_data.flux.values) + value = value + intensity + value = value + anp.sum(mnt_data.flux.values) return value field_point = td.FieldMonitor( @@ -630,8 +627,8 @@ def field_vol_postprocess_fn(sim_data, mnt_data): def field_point_postprocess_fn(sim_data, mnt_data): value = 0.0 for _, val in mnt_data.field_components.items(): - value += abs(anp.sum(abs(val.values))) - value += anp.sum(sim_data.get_intensity(mnt_data.monitor.name).values) + value = value + abs(anp.sum(abs(val.values))) + value = value + anp.sum(sim_data.get_intensity(mnt_data.monitor.name).values) return value return { @@ -691,7 +688,7 @@ def plot_sim(sim: td.Simulation, plot_eps: bool = True) -> None: args = [("polyslab", "mode")] -# args = [("polyslab", "mode")] +ASYNC_TEST_ARGS = args[:2] def get_functions(structure_key: str, monitor_key: str) -> dict[str, typing.Callable]: @@ -760,7 +757,7 @@ def test_polyslab_axis_ops(axis): basis_vecs = p.edge_basis_vectors(edges=edges) -@pytest.mark.skipif(not RUN_NUMERICAL, reason="Numerical gradient tests runs through web API.") +@pytest.mark.numerical @pytest.mark.parametrize("structure_key, monitor_key", (_NUMERICAL_COMBINATION,)) def test_autograd_numerical(structure_key, monitor_key): """Test an objective function through tidy3d autograd.""" @@ -859,6 +856,7 @@ def objective(*args): @pytest.mark.parametrize("structure_key, monitor_key", args) +@pytest.mark.slow def test_autograd_objective(use_emulated_run, structure_key, monitor_key): """Test an objective function through tidy3d autograd.""" @@ -891,31 +889,58 @@ def objective(*args): assert anp.all(grad != 0.0), "some gradients are 0" -@pytest.mark.parametrize("structure_key, monitor_key", args) -@pytest.mark.parametrize("use_task_names", [True, False]) -def test_autograd_async(use_emulated_run, structure_key, monitor_key, use_task_names): - """Test an objective function through tidy3d autograd.""" +def _compare_async_vs_sync(fn_dicts, local_gradient) -> None: + """Compare async vs non-async autograd for a subset of structure/monitor pairs.""" - fn_dict = get_functions(structure_key, monitor_key) - make_sim = fn_dict["sim"] - postprocess = fn_dict["postprocess"] + # synchronous objective: run() one sim after another + def objective_sync(*params): + total = 0.0 + for i, fn_dict in enumerate(fn_dicts): + sim = fn_dict["sim"](*params) + data = run( + sim, task_name=f"autograd_sync_{i}", verbose=False, local_gradient=local_gradient + ) + total = total + fn_dict["postprocess"](data) + return total - task_names = {"test_a", "adjoint", "_test"} + def objective_async(*params): + sims = {} + for i, fn_dict in enumerate(fn_dicts): + sim = fn_dict["sim"](*params) + key = f"autograd_{i}" + sims[key] = sim - def objective(*args): - if use_task_names: - sims = {task_name: make_sim(*args) for task_name in task_names} - else: - sims = [make_sim(*args)] * len(task_names) - batch_data = run_async(sims, verbose=False) - value = 0.0 - for _, sim_data in batch_data.items(): - value += postprocess(sim_data) - return value + batch_data = run_async(sims, verbose=False, local_gradient=local_gradient) - val, grad = ag.value_and_grad(objective)(params0) - print(val, grad) - assert anp.all(grad != 0.0), "some gradients are 0" + total = 0.0 + for i, fn_dict in enumerate(fn_dicts): + key = f"autograd_{i}" + total = total + fn_dict["postprocess"](batch_data[key]) + return total + + val_sync, grad_sync = ag.value_and_grad(objective_sync)(params0) + val_async, grad_async = ag.value_and_grad(objective_async)(params0) + + val_sync = float(val_sync) + val_async = float(val_async) + grad_sync = np.asarray(grad_sync) + grad_async = np.asarray(grad_async) + + np.testing.assert_allclose(val_async, val_sync, rtol=1e-8, atol=1e-10) + np.testing.assert_allclose(grad_async, grad_sync, rtol=1e-6, atol=1e-8) + + +@pytest.mark.slow +@pytest.mark.parametrize("local_gradient", [True, False]) +def test_autograd_async(use_emulated_run, local_gradient): + """Async autograd for a small subset; must match non-async autograd.""" + + # only use two structure/monitor combinations to keep this test cheap + fn_dicts = [ + get_functions(structure_key, monitor_key) for structure_key, monitor_key in ASYNC_TEST_ARGS + ] + + _compare_async_vs_sync(fn_dicts, local_gradient) class TestTupleGrads: @@ -995,11 +1020,9 @@ def obj(center: tuple, size: tuple) -> float: assert not np.allclose(dp_dsize, 0) -@pytest.mark.parametrize("structure_key, monitor_key", args) -def test_autograd_async_some_zero_grad(use_emulated_run, structure_key, monitor_key): +def test_autograd_async_some_zero_grad(use_emulated_run): """Test objective where only some simulations in batch have adjoint sources.""" - - fn_dict = get_functions(structure_key, monitor_key) + fn_dict = get_functions(args[0][0], args[0][1]) make_sim = fn_dict["sim"] postprocess = fn_dict["postprocess"] @@ -1039,6 +1062,7 @@ def objective(*args): grad = ag.grad(objective)(params0) +@pytest.mark.perf def test_autograd_speed_num_structures(use_emulated_run): """Test an objective function through tidy3d autograd.""" @@ -1141,8 +1165,9 @@ def objective_cylinder(params): @pytest.mark.parametrize("structure_key, monitor_key", args) -def test_autograd_server(use_emulated_run, structure_key, monitor_key): - """Test an objective function through tidy3d autograd.""" +@pytest.mark.slow +def test_autograd_local(use_emulated_run, structure_key, monitor_key): + """Test an objective function through tidy3d autograd with local gradients.""" fn_dict = get_functions(structure_key, monitor_key) make_sim = fn_dict["sim"] @@ -1151,7 +1176,7 @@ def test_autograd_server(use_emulated_run, structure_key, monitor_key): def objective(*args): """Objective function.""" sim = make_sim(*args) - data = run(sim, task_name="autograd_test", verbose=False, local_gradient=False) + data = run(sim, task_name="autograd_test", verbose=False, local_gradient=True) value = postprocess(data) return value @@ -1159,28 +1184,6 @@ def objective(*args): assert np.all(np.abs(grad) > 0), "some gradients are 0" -@pytest.mark.parametrize("structure_key, monitor_key", args) -def test_autograd_async_server(use_emulated_run, structure_key, monitor_key): - """Test an async objective function through tidy3d autograd.""" - - fn_dict = get_functions(structure_key, monitor_key) - make_sim = fn_dict["sim"] - postprocess = fn_dict["postprocess"] - - def objective(*args): - """Objective function.""" - sim = make_sim(*args) - sims = {"autograd_test1": sim, "autograd_test2": sim} - batch_data = run_async(sims, verbose=False, local_gradient=False) - value = 0.0 - for _, sim_data in batch_data.items(): - value = value + postprocess(sim_data) - return value - - val, grad = ag.value_and_grad(objective)(params0) - assert np.all(np.abs(grad) > 0), "some gradients are 0" - - @pytest.mark.parametrize("structure_key", ("custom_med",)) def test_sim_full_ops(structure_key): """make sure the autograd operations don't error on a simulation containing everything.""" @@ -2060,6 +2063,7 @@ def f(eps_inf, poles): assert np.allclose(grads_computed[field_path], np.conj(grad_poles[i][j])) +@pytest.mark.slow def test_custom_sellmeier(monkeypatch): """Test that computed CustomSellmeier derivatives match analytic mapping.""" @@ -2564,8 +2568,8 @@ def objective(params): print(g) -@pytest.mark.parametrize("structure_key", structure_keys_) -def test_multi_frequency_equivalence(use_emulated_run, structure_key): +@pytest.mark.slow +def test_multi_frequency_equivalence(use_emulated_run): """Test an objective function through tidy3d autograd.""" def objective_indi(params, structure_key) -> float: @@ -2595,6 +2599,7 @@ def objective_multi(params, structure_key) -> float: amps = get_amps(sim_data, "multi").sel(mode_index=0, direction="+") return power(amps) + structure_key = structure_keys_[0] params0_ = params0 + 1.0 # J_indi = objective_indi(params0_, structure_key) @@ -3022,10 +3027,9 @@ def objective(params): assert anp.all(grad != 0.0), "some gradients are 0 for conductivity-only test" -@pytest.mark.parametrize("structure_key, monitor_key", args) -def test_vjp_nan(use_emulated_run, structure_key, monitor_key): +def test_vjp_nan(use_emulated_run): """Test vjp data that has nan in it is flagged as an error.""" - + structure_key, monitor_key = args[0] fn_dict = get_functions(structure_key, monitor_key) make_sim = fn_dict["sim"] postprocess = fn_dict["postprocess"] diff --git a/tests/test_components/test_IO.py b/tests/test_components/test_IO.py index 83114f5678..fda5e6f890 100644 --- a/tests/test_components/test_IO.py +++ b/tests/test_components/test_IO.py @@ -176,16 +176,18 @@ def test_1a_simulation_load_export2(tmp_path): assert SIM2 == SIM3, "original and loaded simulations are not the same" +@pytest.mark.perf def test_validation_speed(tmp_path): sizes_bytes = [] times_sec = [] path = str(tmp_path / "simulation.json") _ = SIM - N_tests = 10 + N_tests = 2 # may be increased temporarily, makes it slow for routine tests + max_structures = np.log10(2) # may be increased temporarily, makes it slow for routine tests # adjust as needed, keeping small to speed tests up - num_structures = np.logspace(0, 2, N_tests).astype(int) + num_structures = np.logspace(0, max_structures, N_tests).astype(int) for n in num_structures: new_structures = [] diff --git a/tests/test_components/test_custom.py b/tests/test_components/test_custom.py index e52adf321e..3b13d5428e 100644 --- a/tests/test_components/test_custom.py +++ b/tests/test_components/test_custom.py @@ -718,6 +718,7 @@ def verify_custom_dispersive_medium_methods(mat, reduced_fields): @pytest.mark.parametrize("unstructured", [False, True]) +@pytest.mark.slow def test_custom_pole_residue(unstructured): """Custom pole residue medium.""" seed = 98345 @@ -776,6 +777,7 @@ def test_custom_pole_residue(unstructured): @pytest.mark.parametrize("unstructured", [False, True]) +@pytest.mark.slow def test_custom_sellmeier(unstructured): """Custom Sellmeier medium.""" seed = 897245 @@ -838,6 +840,7 @@ def test_custom_sellmeier(unstructured): @pytest.mark.parametrize("unstructured", [False, True]) +@pytest.mark.slow def test_custom_lorentz(unstructured): """Custom Lorentz medium.""" seed = 31342 @@ -991,6 +994,7 @@ def test_custom_debye(unstructured): @pytest.mark.parametrize("unstructured", [True]) +@pytest.mark.slow def test_custom_anisotropic_medium(unstructured): """Custom anisotropic medium.""" seed = 43243 diff --git a/tests/test_components/test_eme.py b/tests/test_components/test_eme.py index 5400d7bd36..0491e1eaf2 100644 --- a/tests/test_components/test_eme.py +++ b/tests/test_components/test_eme.py @@ -1032,6 +1032,7 @@ def _get_eme_mode_solver_data(num_sweep=0): ) +@pytest.mark.slow def _get_eme_field_data(num_sweep=0): dataset = _get_eme_field_dataset(num_sweep=num_sweep) kwargs = dataset.field_components @@ -1099,6 +1100,7 @@ def _get_eme_port_modes(num_sweep=0): return mode_data.updated_copy(n_complex=n_complex, **kwargs) +@pytest.mark.slow def test_eme_sim_data(): sim = make_eme_sim() mode_monitor_data = _get_eme_mode_solver_data() diff --git a/tests/test_components/test_scene.py b/tests/test_components/test_scene.py index b13dac7473..ffded2d94d 100644 --- a/tests/test_components/test_scene.py +++ b/tests/test_components/test_scene.py @@ -10,7 +10,7 @@ import tidy3d as td import tidy3d.components.scene as scene_mod -from tidy3d.components.scene import MAX_NUM_MEDIUMS +from tidy3d.components import scene from tidy3d.components.viz import STRUCTURE_EPS_CMAP, STRUCTURE_EPS_CMAP_R from tidy3d.exceptions import SetupError @@ -19,6 +19,7 @@ SCENE = td.Scene() SCENE_FULL = SIM_FULL.scene +TEST_MAX_NUM_MEDIUMS = 3 def test_scene_init(): @@ -240,11 +241,11 @@ def test_structure_eps_color_mapping_no_matplotlib( assert np.allclose(params.facecolor, expected) -def test_num_mediums(): +def test_num_mediums(monkeypatch): """Make sure we error if too many mediums supplied.""" - + monkeypatch.setattr(scene, "MAX_NUM_MEDIUMS", TEST_MAX_NUM_MEDIUMS) structures = [] - for i in range(MAX_NUM_MEDIUMS): + for i in range(TEST_MAX_NUM_MEDIUMS): structures.append( td.Structure(geometry=td.Box(size=(1, 1, 1)), medium=td.Medium(permittivity=i + 1)) ) diff --git a/tests/test_components/test_simulation.py b/tests/test_components/test_simulation.py index 3fdc73f02f..e691f68ffe 100644 --- a/tests/test_components/test_simulation.py +++ b/tests/test_components/test_simulation.py @@ -12,8 +12,7 @@ from matplotlib.testing.compare import compare_images import tidy3d as td -from tidy3d.components import simulation -from tidy3d.components.scene import MAX_NUM_MEDIUMS +from tidy3d.components import scene, simulation from tidy3d.components.simulation import MAX_NUM_SOURCES from tidy3d.exceptions import SetupError, Tidy3dError, Tidy3dKeyError from tidy3d.plugins.mode import ModeSolver @@ -29,6 +28,7 @@ SIM = td.Simulation(size=(1, 1, 1), run_time=1e-12, grid_spec=td.GridSpec(wavelength=1.0)) RTOL = 0.01 +TEST_MAX_NUM_MEDIUMS = 3 def test_sim_init(): @@ -1694,12 +1694,10 @@ def test_sim_validate_structure_bounds_pml(box_length, absorb_type, log_level): def test_num_mediums(monkeypatch): """Make sure we error if too many mediums supplied.""" - - max_num_mediums = 10 - monkeypatch.setattr(simulation, "MAX_NUM_MEDIUMS", max_num_mediums) + monkeypatch.setattr(simulation, "MAX_NUM_MEDIUMS", TEST_MAX_NUM_MEDIUMS) structures = [] grid_spec = td.GridSpec.auto(wavelength=1.0) - for i in range(max_num_mediums): + for i in range(TEST_MAX_NUM_MEDIUMS): structures.append( td.Structure(geometry=td.Box(size=(1, 1, 1)), medium=td.Medium(permittivity=i + 1)) ) @@ -3226,9 +3224,9 @@ def test_advanced_material_intersection(): sim = sim.updated_copy(structures=[struct1, struct2]) -def test_num_lumped_elements(): +def test_num_lumped_elements(monkeypatch): """Make sure we error if too many lumped elements supplied.""" - + monkeypatch.setattr(simulation, "MAX_NUM_MEDIUMS", TEST_MAX_NUM_MEDIUMS) resistor = td.LumpedResistor( size=(0, 1, 2), center=(0, 0, 0), name="R1", voltage_axis=2, resistance=75 ) @@ -3238,7 +3236,7 @@ def test_num_lumped_elements(): size=(5, 5, 5), grid_spec=grid_spec, structures=[], - lumped_elements=[resistor] * MAX_NUM_MEDIUMS, + lumped_elements=[resistor] * TEST_MAX_NUM_MEDIUMS, run_time=1e-12, ) with pytest.raises(pydantic.ValidationError): @@ -3246,7 +3244,7 @@ def test_num_lumped_elements(): size=(5, 5, 5), grid_spec=grid_spec, structures=[], - lumped_elements=[resistor] * (MAX_NUM_MEDIUMS + 1), + lumped_elements=[resistor] * (TEST_MAX_NUM_MEDIUMS + 1), run_time=1e-12, ) @@ -3748,7 +3746,6 @@ def test_messages_contain_object_names(): def test_structures_per_medium(monkeypatch): """Test if structures that share the same medium warn or error appropriately.""" - import tidy3d.components.scene as scene # Set low thresholds to keep the test fast; ensure len(structures) > MAX to avoid early return monkeypatch.setattr(scene, "WARN_STRUCTURES_PER_MEDIUM", 2) diff --git a/tests/test_package/test_parametric_variants.py b/tests/test_package/test_parametric_variants.py index ddd0d94bbc..f514ab7c3a 100644 --- a/tests/test_package/test_parametric_variants.py +++ b/tests/test_package/test_parametric_variants.py @@ -32,7 +32,8 @@ def test_graphene_defaults(): _ = graphene.numerical_conductivity(freqs) -@pytest.mark.parametrize("rng_seed", np.arange(0, 15)) +@pytest.mark.parametrize("rng_seed", np.arange(0, 8)) +@pytest.mark.slow def test_graphene(rng_seed): """test graphene for range of physical parameters""" rng = default_rng(rng_seed) diff --git a/tests/test_plugins/test_design.py b/tests/test_plugins/test_design.py index 376c68849b..5d05f2610c 100644 --- a/tests/test_plugins/test_design.py +++ b/tests/test_plugins/test_design.py @@ -18,7 +18,7 @@ SWEEP_METHODS = { "grid": tdd.MethodGrid(), "monte_carlo": tdd.MethodMonteCarlo(num_points=5, seed=1), - "bay_opt": tdd.MethodBayOpt(initial_iter=5, n_iter=2, seed=1), + "bay_opt": tdd.MethodBayOpt(initial_iter=3, n_iter=2, seed=2), "gen_alg": tdd.MethodGenAlg( solutions_per_pop=6, n_generations=2, @@ -323,15 +323,15 @@ def init_design_space(sweep_method): radius_variable = tdd.ParameterFloat( name="radius", span=(0, 1.5), - num_points=5, # note: only used for MethodGrid + num_points=3, # note: only used for MethodGrid ) num_spheres_variable = tdd.ParameterInt( name="num_spheres", - span=(0, 3), + span=(0, 2), ) - tag_variable = tdd.ParameterAny(name="tag", allowed_values=("tag1", "tag2", "tag3")) + tag_variable = tdd.ParameterAny(name="tag", allowed_values=("tag1", "tag2")) design_space = tdd.DesignSpace( parameters=[radius_variable, num_spheres_variable, tag_variable], @@ -344,6 +344,7 @@ def init_design_space(sweep_method): @pytest.mark.parametrize("sweep_method", SWEEP_METHODS.values()) +@pytest.mark.slow def test_sweep(sweep_method, monkeypatch): # Problem, simulate scattering cross section of sphere ensemble # simulation consists of `num_spheres` spheres of radius `radius`. diff --git a/tests/test_plugins/test_invdes.py b/tests/test_plugins/test_invdes.py index f3ba2f3b96..361019448c 100644 --- a/tests/test_plugins/test_invdes.py +++ b/tests/test_plugins/test_invdes.py @@ -368,6 +368,7 @@ def test_continue_run_fns(use_emulated_run): # noqa: F811 ) +@pytest.mark.slow def test_continue_run_from_file(use_emulated_run): # noqa: F811 """Test continuing an already run inverse design from file.""" result_orig = make_result(use_emulated_run) diff --git a/tests/test_web/test_webapi.py b/tests/test_web/test_webapi.py index 4fdd280bf0..211fc2d158 100644 --- a/tests/test_web/test_webapi.py +++ b/tests/test_web/test_webapi.py @@ -1061,18 +1061,22 @@ def test_job_run_accepts_pathlikes(monkeypatch, tmp_path, path_builder): [_pathlib_builder, _posix_builder, _str_builder, _fspath_builder], ids=["pathlib.Path", "posixpath_str", "str", "PathLike"], ) +@pytest.mark.slow def test_batch_run_accepts_pathlike_dir(monkeypatch, tmp_path, dir_builder): """Batch.run(path_dir=...) accepts any PathLike directory location.""" - sims = {"A": make_sim(), "B": make_sim()} + sims = {"A": make_sim()} out_dir = dir_builder(tmp_path, "batch_out") # Map task_ids to sims: upload() is patched to return task_name, which for dict input # corresponds to the dict keys ("A", "B"), so we map those. - apply_common_patches(monkeypatch, tmp_path, taskid_to_sim={"A": sims["A"], "B": sims["B"]}) + apply_common_patches(monkeypatch, tmp_path, taskid_to_sim={"A": sims["A"]}) b = Batch(simulations=sims, folder_name=PROJECT_NAME) b.run(path_dir=out_dir) - # Directory created and two .hdf5 outputs produced + # Directory created and .hdf5 output produced out_dir_str = os.fspath(out_dir) assert os.path.isdir(out_dir_str) + + batch_file = Path(out_dir) / "batch.hdf5" + assert batch_file.is_file()