diff --git a/.gitignore b/.gitignore index 1e972cd884..3a581e2a3d 100644 --- a/.gitignore +++ b/.gitignore @@ -135,5 +135,6 @@ htmlcov/ .idea .vscode -# cProfile output +# profile outputs *.prof +pytest_profile_stats.txt diff --git a/docs/development/usage.rst b/docs/development/usage.rst index 37a706fc98..2edcea4dfb 100644 --- a/docs/development/usage.rst +++ b/docs/development/usage.rst @@ -67,6 +67,9 @@ There are a range of handy development functions that you might want to use to s * - Running ``pytest`` commands inside the ``poetry`` environment. - Make sure you have already installed ``tidy3d`` in ``poetry`` and you are in the root directory. - ``poetry run pytest`` + * - Analyze slow ``pytest`` runs with durations / cProfile / debug subset helpers. + - Use ``--debug`` to run only the first N collected tests or ``--profile`` to capture call stacks. + - ``python scripts/profile_pytest.py [options]`` * - Run ``coverage`` testing from the ``poetry`` environment. - - ``poetry run coverage run -m pytest`` @@ -84,4 +87,3 @@ There are a range of handy development functions that you might want to use to s - ``poetry run tidy3d develop replace-in-files`` - diff --git a/poetry.lock b/poetry.lock index 431e056c7e..50a39d99e7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -5264,6 +5264,22 @@ tomli = {version = ">=2.2.1", markers = "python_version < \"3.11\""} [package.extras] testing = ["covdefaults (>=2.3)", "coverage (>=7.10.7)", "pytest-mock (>=3.15.1)"] +[[package]] +name = "pytest-order" +version = "1.3.0" +description = "pytest plugin to run your tests in a specific order" +optional = true +python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"dev\"" +files = [ + {file = "pytest_order-1.3.0-py3-none-any.whl", hash = "sha256:2cd562a21380345dd8d5774aa5fd38b7849b6ee7397ca5f6999bbe6e89f07f6e"}, + {file = "pytest_order-1.3.0.tar.gz", hash = "sha256:51608fec3d3ee9c0adaea94daa124a5c4c1d2bb99b00269f098f414307f23dde"}, +] + +[package.dependencies] +pytest = {version = ">=6.2.4", markers = "python_version >= \"3.10\""} + [[package]] name = "pytest-timeout" version = "2.4.0" @@ -7721,7 +7737,7 @@ files = [ [extras] design = ["bayesian-optimization", "pygad", "pyswarms"] -dev = ["bayesian-optimization", "cma", "coverage", "devsim", "diff-cover", "dill", "gdstk", "grcwa", "ipython", "ipython", "jinja2", "jupyter", "memory_profiler", "mypy", "myst-parser", "nbconvert", "nbdime", "nbsphinx", "networkx", "openpyxl", "optax", "pre-commit", "psutil", "pydata-sphinx-theme", "pygad", "pylint", "pyswarms", "pytest", "pytest-cov", "pytest-env", "pytest-timeout", "pytest-xdist", "rtree", "ruff", "sax", "scikit-rf", "signac", "sphinx", "sphinx-book-theme", "sphinx-copybutton", "sphinx-design", "sphinx-favicon", "sphinx-notfound-page", "sphinx-sitemap", "sphinx-tabs", "sphinxemoji", "tmm", "torch", "torch", "tox", "trimesh", "vtk", "zizmor"] +dev = ["bayesian-optimization", "cma", "coverage", "devsim", "diff-cover", "dill", "gdstk", "grcwa", "ipython", "ipython", "jinja2", "jupyter", "memory_profiler", "mypy", "myst-parser", "nbconvert", "nbdime", "nbsphinx", "networkx", "openpyxl", "optax", "pre-commit", "psutil", "pydata-sphinx-theme", "pygad", "pylint", "pyswarms", "pytest", "pytest-cov", "pytest-env", "pytest-order", "pytest-timeout", "pytest-xdist", "rtree", "ruff", "sax", "scikit-rf", "signac", "sphinx", "sphinx-book-theme", "sphinx-copybutton", "sphinx-design", "sphinx-favicon", "sphinx-notfound-page", "sphinx-sitemap", "sphinx-tabs", "sphinxemoji", "tmm", "torch", "torch", "tox", "trimesh", "vtk", "zizmor"] docs = ["cma", "devsim", "gdstk", "grcwa", "ipython", "jinja2", "jupyter", "myst-parser", "nbconvert", "nbdime", "nbsphinx", "openpyxl", "optax", "pydata-sphinx-theme", "pylint", "sax", "signac", "sphinx", "sphinx-book-theme", "sphinx-copybutton", "sphinx-design", "sphinx-favicon", "sphinx-notfound-page", "sphinx-sitemap", "sphinx-tabs", "sphinxemoji", "tmm"] gdstk = ["gdstk"] heatcharge = ["devsim", "trimesh", "vtk"] @@ -7734,4 +7750,4 @@ vtk = ["vtk"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.14" -content-hash = "deab3d5c68d8ee1fce2e7c86f2c90d1b601025f5ff2c5465767c0bc479b992be" +content-hash = "ca63acb6d61cf73c63801f909ed2416bc3c80e97fd6e7e8e97ea401dfcb01da5" diff --git a/pyproject.toml b/pyproject.toml index c5ae51b5a7..5a065a1637 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,6 +65,7 @@ pytest-timeout = { version = "*", optional = true } pytest-xdist = "^3.6.1" pytest-cov = "^6.0.0" pytest-env = "^1.1.5" +pytest-order = { version = "^1.2.1", optional = true } tox = { version = "*", optional = true } diff-cover = { version = "*", optional = true } zizmor = { version = "*", optional = true } @@ -154,6 +155,7 @@ dev = [ 'pytest-xdist', 'pytest-env', 'pytest-cov', + 'pytest-order', 'rtree', 'ruff', 'sax', @@ -296,9 +298,10 @@ banned-module-level-imports = ["scipy", "matplotlib"] [tool.pytest.ini_options] # TODO: remove --assert=plain when https://github.com/scipy/scipy/issues/22236 is resolved -addopts = "--cov=tidy3d --doctest-modules -n auto --dist worksteal --assert=plain -m 'not numerical'" +addopts = "--cov=tidy3d --doctest-modules -n auto --dist worksteal --assert=plain -m 'not numerical and not perf'" markers = [ "numerical: marks numerical tests for adjoint gradients that require running simulations (deselect with '-m \"not numerical\"')", + "perf: marks tests which test the runtime of operations (deselect with '-m \"not perf\"')", ] env = ["MPLBACKEND=Agg", "OMP_NUM_THREADS=1", "TIDY3D_MICROWAVE__SUPPRESS_RF_LICENSE_WARNING=true"] doctest_optionflags = "NORMALIZE_WHITESPACE ELLIPSIS" diff --git a/scripts/profile_pytest.py b/scripts/profile_pytest.py new file mode 100755 index 0000000000..ddbb642018 --- /dev/null +++ b/scripts/profile_pytest.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python3 +"""Helper utilities for profiling ``pytest`` runs inside the Poetry env. + +This script can: +* run the full test suite (default) while surfacing the slowest tests via ``--durations``; +* run in "debug" mode to execute only the first N collected tests; and +* wrap ``pytest`` in ``cProfile`` to identify the most expensive function calls. + +Examples:: + + python scripts/profile_pytest.py # full suite with slowest 25 tests listed + python scripts/profile_pytest.py --debug --debug-limit 10 + python scripts/profile_pytest.py --profile --profile-output results.prof + python scripts/profile_pytest.py -t tests/test_components/test_scene.py \ + --pytest-args "-k basic" + +Forward any additional `pytest` CLI flags via ``--pytest-args"...`` and provide +explicit test targets with ``-t/--tests`` (defaults to the entire ``tests`` dir). +""" + +from __future__ import annotations + +import argparse +import re +import shlex +import shutil +import subprocess +import sys +from collections import defaultdict +from collections.abc import Iterable +from pathlib import Path + +try: + import pstats +except ImportError as exc: # pragma: no cover - stdlib module should exist + raise SystemExit("pstats from the standard library is required") from exc + +DURATION_LINE_RE = re.compile(r"^\s*(?P\d+(?:\.\d+)?)s\s+\w+\s+(?P\S+)\s*$") + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Profile pytest executions launched via Poetry.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "--debug", + action="store_true", + help="Run only a subset of collected tests (see --debug-limit).", + ) + parser.add_argument( + "--list-limit", + type=int, + default=30, + help="How many entries to show in aggregated duration summaries (set 0 for all).", + ) + parser.add_argument( + "--debug-limit", + type=int, + default=25, + help="Number of test node ids to execute when --debug is enabled.", + ) + parser.add_argument( + "--durations", + type=int, + default=0, + help="Pass-through value for pytest's --durations flag (use 0 for all tests).", + ) + parser.add_argument( + "--profile", + action="store_true", + help="Wrap pytest in cProfile and display the heaviest call sites afterward.", + ) + parser.add_argument( + "--profile-output", + default="results.prof", + help="Where to write the binary cProfile stats (used when --profile is set).", + ) + parser.add_argument( + "--profile-top", + type=int, + default=30, + help="How many rows of aggregated profile data to print.", + ) + parser.add_argument( + "--profile-sort", + choices=["cumulative", "tottime", "calls", "time"], + default="cumulative", + help="Sort order for the profile summary table.", + ) + parser.add_argument( + "-t", + "--tests", + action="append", + dest="tests", + metavar="PATH_OR_NODE", + help="Explicit pytest targets. Repeatable.", + ) + parser.add_argument( + "--pytest-args", + default="", + help="Extra pytest CLI args as a quoted string (e.g. '--maxfail=1 -k smoke').", + ) + return parser.parse_args() + + +def ensure_poetry_available() -> None: + if shutil.which("poetry") is None: + raise SystemExit("'poetry' command not found in PATH.") + + +def build_pytest_base(profile: bool, profile_output: Path) -> list[str]: + base_cmd = ["poetry", "run"] + if profile: + base_cmd += [ + "python", + "-m", + "cProfile", + "-o", + str(profile_output.resolve()), + "-m", + "pytest", + ] + else: + base_cmd.append("pytest") + return base_cmd + + +def collect_node_ids(extra_args: Iterable[str], tests: Iterable[str]) -> list[str]: + cmd = ["poetry", "run", "pytest", "--collect-only", "-q"] + cmd.extend(extra_args) + cmd.extend(tests) + print(f"Collecting tests via: {' '.join(shlex.quote(part) for part in cmd)}") + result = subprocess.run(cmd, capture_output=True, text=True, check=False) + sys.stdout.write(result.stdout) + sys.stderr.write(result.stderr) + if result.returncode != 0: + raise SystemExit(result.returncode) + + node_ids: list[str] = [] + for line in result.stdout.splitlines(): + stripped = line.strip() + if not stripped or stripped.startswith(("<", "collected ")): + continue + node_ids.append(stripped) + if not node_ids: + raise SystemExit("No tests collected; check your --tests / --pytest-args filters.") + return node_ids + + +def summarize_profile(stats_path: Path, sort: str, top: int) -> None: + if not stats_path.exists(): + print(f"Profile file {stats_path} not found; skipping summary.") + return + stats = pstats.Stats(str(stats_path)) + stats.sort_stats(sort) + print("\nTop profiled call sites (via cProfile):") + stats.print_stats(top) + + +def extract_durations_from_output(output: str) -> list[tuple[float, str]]: + """Parse pytest --durations lines from stdout.""" + + durations: list[tuple[float, str]] = [] + for line in output.splitlines(): + match = DURATION_LINE_RE.match(line) + if not match: + continue + secs = float(match.group("secs")) + nodeid = match.group("nodeid") + durations.append((secs, nodeid)) + return durations + + +def print_aggregated_durations( + durations: list[tuple[float, str]], + list_limit: int, +) -> None: + """Print durations aggregated by file and by test (collapsing parametrizations).""" + + if not durations: + print("\n[durations] no --durations lines found in pytest output.") + return + + by_file: dict[str, float] = defaultdict(float) + by_test: dict[str, float] = defaultdict(float) + + for secs, nodeid in durations: + base = nodeid.split("[", 1)[0] + file_name = base.split("::", 1)[0] + by_file[file_name] += secs + by_test[base] += secs + + def _print_section(title: str, mapping: dict[str, float]) -> None: + print(f"\nAggregated durations ({title}):") + items = sorted(mapping.items(), key=lambda kv: kv[1], reverse=True) + if list_limit > 0: + items = items[:list_limit] + for name, total in items: + print(f"{total:8.02f}s {name}") + + _print_section("by file", by_file) + _print_section("by test (parametrizations combined)", by_test) + + +def truncate_pytest_durations_output(output: str, limit: int) -> str: + """Keep pytest's duration section header, but show only the top `limit` lines.""" + lines = output.splitlines() + out_lines = [] + in_durations_section = False + kept = 0 + + for line in lines: + if "slowest" in line and "durations" in line: + in_durations_section = True + kept = 0 + out_lines.append(line) + continue + + if in_durations_section: + # Stop after we've shown N durations or reached next blank section + if not line.strip(): + in_durations_section = False + elif kept >= limit: + continue + else: + kept += 1 + + out_lines.append(line) + return "\n".join(out_lines) + + +def export_to_file(result, args, filtered_stdout, durations): + sys.stdout.write(filtered_stdout) + sys.stderr.write(result.stderr) + + # Write the filtered output to a file as well + output_file = "pytest_profile_stats.txt" + results_path = Path(output_file) + results_path.write_text(filtered_stdout) + + if durations: + print_aggregated_durations(durations, args.list_limit) + + with results_path.open("a") as f: + f.write("\n\n[Aggregated Durations]\n") + for secs, nodeid in durations: + f.write(f"{secs:.2f}s {nodeid}\n") + print(f"Stats were written to {output_file}") + + +def main() -> int: + args = parse_args() + ensure_poetry_available() + + if args.debug and args.debug_limit <= 0: + raise SystemExit("--debug-limit must be a positive integer.") + + tests = args.tests or ["tests"] + extra_args = shlex.split(args.pytest_args) + + # Handle debug collection (collect-only) + if args.debug: + collected = collect_node_ids(extra_args, tests) + pytest_targets = collected[: args.debug_limit] + print(f"\nDebug mode: running the first {len(pytest_targets)} collected test(s).") + else: + pytest_targets = tests + + # Build the full pytest command + base_cmd = build_pytest_base(args.profile, Path(args.profile_output)) + pytest_cmd = base_cmd + extra_args + if args.durations is not None: + pytest_cmd.append(f"--durations={args.durations}") + pytest_cmd.extend(pytest_targets) + + print(f"\nExecuting: {' '.join(shlex.quote(part) for part in pytest_cmd)}\n") + + # Run pytest + result = subprocess.run( + pytest_cmd, + check=False, + text=True, + capture_output=True, + ) + + # Extract and truncate outputs + filtered_stdout = truncate_pytest_durations_output(result.stdout, args.list_limit) + durations = extract_durations_from_output(result.stdout) if args.durations is not None else [] + + # Print once and export + export_to_file(result, args, filtered_stdout, durations) + + # Profile summary (if enabled) + if args.profile and result.returncode == 0: + summarize_profile(Path(args.profile_output), args.profile_sort, args.profile_top) + + return result.returncode + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/tests/test_components/autograd/test_autograd.py b/tests/test_components/autograd/test_autograd.py index 8621eaded7..8d38498605 100644 --- a/tests/test_components/autograd/test_autograd.py +++ b/tests/test_components/autograd/test_autograd.py @@ -41,6 +41,7 @@ numerical: adjoint with an extra numerical derivative test after speed: pipeline with cProfile to analyze performance """ +pytestmark = pytest.mark.order(0) # make it faster to toggle this TEST_CUSTOM_MEDIUM_SPEED = False @@ -239,7 +240,6 @@ def emulated_run_fwd(simulation, task_name, **run_kwargs) -> td.SimulationData: def emulated_run_bwd(simulation, task_name, **run_kwargs) -> td.SimulationData: """What gets called instead of ``web/api/autograd/autograd.py::_run_tidy3d_bwd``.""" - task_name_fwd = "".join(task_name.partition("_adjoint")[:-2]) # run the adjoint sim @@ -658,7 +658,7 @@ def plot_sim(sim: td.Simulation, plot_eps: bool = True) -> None: args = [("polyslab", "mode")] -# args = [("polyslab", "mode")] +ASYNC_TEST_ARGS = args[:2] def get_functions(structure_key: str, monitor_key: str) -> dict[str, typing.Callable]: @@ -826,6 +826,7 @@ def objective(*args): @pytest.mark.parametrize("structure_key, monitor_key", args) +@pytest.mark.slow def test_autograd_objective(use_emulated_run, structure_key, monitor_key): """Test an objective function through tidy3d autograd.""" @@ -858,31 +859,55 @@ def objective(*args): assert anp.all(grad != 0.0), "some gradients are 0" -@pytest.mark.parametrize("structure_key, monitor_key", args) -@pytest.mark.parametrize("use_task_names", [True, False]) -def test_autograd_async(use_emulated_run, structure_key, monitor_key, use_task_names): - """Test an objective function through tidy3d autograd.""" +def _compare_async_vs_sync(fn_dicts) -> None: + """Compare async vs non-async autograd for a subset of structure/monitor pairs.""" - fn_dict = get_functions(structure_key, monitor_key) - make_sim = fn_dict["sim"] - postprocess = fn_dict["postprocess"] + # synchronous objective: run() one sim after another + def objective_sync(*params): + total = 0.0 + for i, fn_dict in enumerate(fn_dicts): + sim = fn_dict["sim"](*params) + data = run(sim, task_name=f"autograd_sync_{i}", verbose=False) + total += fn_dict["postprocess"](data) + return total - task_names = {"test_a", "adjoint", "_test"} + def objective_async(*params): + sims = {} + for i, fn_dict in enumerate(fn_dicts): + sim = fn_dict["sim"](*params) + key = f"autograd_{i}" + sims[key] = sim - def objective(*args): - if use_task_names: - sims = {task_name: make_sim(*args) for task_name in task_names} - else: - sims = [make_sim(*args)] * len(task_names) - batch_data = run_async(sims, verbose=False) - value = 0.0 - for _, sim_data in batch_data.items(): - value += postprocess(sim_data) - return value + batch_data = run_async(sims, verbose=False, local_gradient=False) - val, grad = ag.value_and_grad(objective)(params0) - print(val, grad) - assert anp.all(grad != 0.0), "some gradients are 0" + total = 0.0 + for i, fn_dict in enumerate(fn_dicts): + key = f"autograd_{i}" + total += fn_dict["postprocess"](batch_data[key]) + return total + + val_sync, grad_sync = ag.value_and_grad(objective_sync)(params0) + val_async, grad_async = ag.value_and_grad(objective_async)(params0) + + val_sync = float(val_sync) + val_async = float(val_async) + grad_sync = np.asarray(grad_sync) + grad_async = np.asarray(grad_async) + + np.testing.assert_allclose(val_async, val_sync, rtol=1e-8, atol=1e-10) + np.testing.assert_allclose(grad_async, grad_sync, rtol=1e-6, atol=1e-8) + + +@pytest.mark.slow +def test_autograd_async(use_emulated_run): + """Async autograd for a small subset; must match non-async autograd.""" + + # only use two structure/monitor combinations to keep this test cheap + fn_dicts = [ + get_functions(structure_key, monitor_key) for structure_key, monitor_key in ASYNC_TEST_ARGS + ] + + _compare_async_vs_sync(fn_dicts) class TestTupleGrads: @@ -962,11 +987,9 @@ def obj(center: tuple, size: tuple) -> float: assert not np.allclose(dp_dsize, 0) -@pytest.mark.parametrize("structure_key, monitor_key", args) -def test_autograd_async_some_zero_grad(use_emulated_run, structure_key, monitor_key): +def test_autograd_async_some_zero_grad(use_emulated_run): """Test objective where only some simulations in batch have adjoint sources.""" - - fn_dict = get_functions(structure_key, monitor_key) + fn_dict = get_functions(args[0][0], args[0][1]) make_sim = fn_dict["sim"] postprocess = fn_dict["postprocess"] @@ -1006,6 +1029,7 @@ def objective(*args): grad = ag.grad(objective)(params0) +@pytest.mark.perf def test_autograd_speed_num_structures(use_emulated_run): """Test an objective function through tidy3d autograd.""" @@ -1108,6 +1132,7 @@ def objective_cylinder(params): @pytest.mark.parametrize("structure_key, monitor_key", args) +@pytest.mark.slow def test_autograd_server(use_emulated_run, structure_key, monitor_key): """Test an objective function through tidy3d autograd.""" @@ -1126,26 +1151,16 @@ def objective(*args): assert np.all(np.abs(grad) > 0), "some gradients are 0" -@pytest.mark.parametrize("structure_key, monitor_key", args) -def test_autograd_async_server(use_emulated_run, structure_key, monitor_key): - """Test an async objective function through tidy3d autograd.""" +@pytest.mark.slow +def test_autograd_async_server(use_emulated_run): + """Same comparison, but with alternative task-keying (server-style).""" - fn_dict = get_functions(structure_key, monitor_key) - make_sim = fn_dict["sim"] - postprocess = fn_dict["postprocess"] - - def objective(*args): - """Objective function.""" - sim = make_sim(*args) - sims = {"autograd_test1": sim, "autograd_test2": sim} - batch_data = run_async(sims, verbose=False, local_gradient=False) - value = 0.0 - for _, sim_data in batch_data.items(): - value = value + postprocess(sim_data) - return value + fn_dicts = [ + get_functions(structure_key, monitor_key) for structure_key, monitor_key in ASYNC_TEST_ARGS + ] - val, grad = ag.value_and_grad(objective)(params0) - assert np.all(np.abs(grad) > 0), "some gradients are 0" + # here we exercise the alternative key style in the async dict + _compare_async_vs_sync(fn_dicts) @pytest.mark.parametrize("structure_key", ("custom_med",)) @@ -2027,6 +2042,7 @@ def f(eps_inf, poles): assert np.allclose(grads_computed[field_path], np.conj(grad_poles[i][j])) +@pytest.mark.slow def test_custom_sellmeier(monkeypatch): """Test that computed CustomSellmeier derivatives match analytic mapping.""" @@ -2531,8 +2547,8 @@ def objective(params): print(g) -@pytest.mark.parametrize("structure_key", structure_keys_) -def test_multi_frequency_equivalence(use_emulated_run, structure_key): +@pytest.mark.slow +def test_multi_frequency_equivalence(use_emulated_run): """Test an objective function through tidy3d autograd.""" def objective_indi(params, structure_key) -> float: @@ -2562,6 +2578,7 @@ def objective_multi(params, structure_key) -> float: amps = get_amps(sim_data, "multi").sel(mode_index=0, direction="+") return power(amps) + structure_key = structure_keys_[0] params0_ = params0 + 1.0 # J_indi = objective_indi(params0_, structure_key) @@ -2989,10 +3006,9 @@ def objective(params): assert anp.all(grad != 0.0), "some gradients are 0 for conductivity-only test" -@pytest.mark.parametrize("structure_key, monitor_key", args) -def test_vjp_nan(use_emulated_run, structure_key, monitor_key): +def test_vjp_nan(use_emulated_run): """Test vjp data that has nan in it is flagged as an error.""" - + structure_key, monitor_key = args[0] fn_dict = get_functions(structure_key, monitor_key) make_sim = fn_dict["sim"] postprocess = fn_dict["postprocess"] diff --git a/tests/test_components/test_IO.py b/tests/test_components/test_IO.py index 83114f5678..fda5e6f890 100644 --- a/tests/test_components/test_IO.py +++ b/tests/test_components/test_IO.py @@ -176,16 +176,18 @@ def test_1a_simulation_load_export2(tmp_path): assert SIM2 == SIM3, "original and loaded simulations are not the same" +@pytest.mark.perf def test_validation_speed(tmp_path): sizes_bytes = [] times_sec = [] path = str(tmp_path / "simulation.json") _ = SIM - N_tests = 10 + N_tests = 2 # may be increased temporarily, makes it slow for routine tests + max_structures = np.log10(2) # may be increased temporarily, makes it slow for routine tests # adjust as needed, keeping small to speed tests up - num_structures = np.logspace(0, 2, N_tests).astype(int) + num_structures = np.logspace(0, max_structures, N_tests).astype(int) for n in num_structures: new_structures = [] diff --git a/tests/test_components/test_custom.py b/tests/test_components/test_custom.py index e52adf321e..3b13d5428e 100644 --- a/tests/test_components/test_custom.py +++ b/tests/test_components/test_custom.py @@ -718,6 +718,7 @@ def verify_custom_dispersive_medium_methods(mat, reduced_fields): @pytest.mark.parametrize("unstructured", [False, True]) +@pytest.mark.slow def test_custom_pole_residue(unstructured): """Custom pole residue medium.""" seed = 98345 @@ -776,6 +777,7 @@ def test_custom_pole_residue(unstructured): @pytest.mark.parametrize("unstructured", [False, True]) +@pytest.mark.slow def test_custom_sellmeier(unstructured): """Custom Sellmeier medium.""" seed = 897245 @@ -838,6 +840,7 @@ def test_custom_sellmeier(unstructured): @pytest.mark.parametrize("unstructured", [False, True]) +@pytest.mark.slow def test_custom_lorentz(unstructured): """Custom Lorentz medium.""" seed = 31342 @@ -991,6 +994,7 @@ def test_custom_debye(unstructured): @pytest.mark.parametrize("unstructured", [True]) +@pytest.mark.slow def test_custom_anisotropic_medium(unstructured): """Custom anisotropic medium.""" seed = 43243 diff --git a/tests/test_components/test_eme.py b/tests/test_components/test_eme.py index c2c5d5a4af..c33269ec0a 100644 --- a/tests/test_components/test_eme.py +++ b/tests/test_components/test_eme.py @@ -888,6 +888,7 @@ def _get_eme_mode_solver_data(num_sweep=0): ) +@pytest.mark.slow def _get_eme_field_data(num_sweep=0): dataset = _get_eme_field_dataset(num_sweep=num_sweep) kwargs = dataset.field_components @@ -953,6 +954,7 @@ def _get_eme_port_modes(num_sweep=0): return mode_data.updated_copy(n_complex=n_complex, **kwargs) +@pytest.mark.slow def test_eme_sim_data(): sim = make_eme_sim() mode_monitor_data = _get_eme_mode_solver_data() diff --git a/tests/test_components/test_scene.py b/tests/test_components/test_scene.py index 09667b6030..bad9493d03 100644 --- a/tests/test_components/test_scene.py +++ b/tests/test_components/test_scene.py @@ -9,7 +9,7 @@ import pytest import tidy3d as td -from tidy3d.components.scene import MAX_NUM_MEDIUMS +from tidy3d.components import scene from tidy3d.components.viz import STRUCTURE_EPS_CMAP, STRUCTURE_EPS_CMAP_R from tidy3d.exceptions import SetupError @@ -18,6 +18,7 @@ SCENE = td.Scene() SCENE_FULL = SIM_FULL.scene +TEST_MAX_NUM_MEDIUMS = 3 def test_scene_init(): @@ -200,11 +201,11 @@ def test_structure_eps_color_mapping(): assert np.allclose(pp_max_reverse.facecolor, expected_max_reverse) -def test_num_mediums(): +def test_num_mediums(monkeypatch): """Make sure we error if too many mediums supplied.""" - + monkeypatch.setattr(scene, "MAX_NUM_MEDIUMS", TEST_MAX_NUM_MEDIUMS) structures = [] - for i in range(MAX_NUM_MEDIUMS): + for i in range(TEST_MAX_NUM_MEDIUMS): structures.append( td.Structure(geometry=td.Box(size=(1, 1, 1)), medium=td.Medium(permittivity=i + 1)) ) diff --git a/tests/test_components/test_simulation.py b/tests/test_components/test_simulation.py index 3fdc73f02f..e691f68ffe 100644 --- a/tests/test_components/test_simulation.py +++ b/tests/test_components/test_simulation.py @@ -12,8 +12,7 @@ from matplotlib.testing.compare import compare_images import tidy3d as td -from tidy3d.components import simulation -from tidy3d.components.scene import MAX_NUM_MEDIUMS +from tidy3d.components import scene, simulation from tidy3d.components.simulation import MAX_NUM_SOURCES from tidy3d.exceptions import SetupError, Tidy3dError, Tidy3dKeyError from tidy3d.plugins.mode import ModeSolver @@ -29,6 +28,7 @@ SIM = td.Simulation(size=(1, 1, 1), run_time=1e-12, grid_spec=td.GridSpec(wavelength=1.0)) RTOL = 0.01 +TEST_MAX_NUM_MEDIUMS = 3 def test_sim_init(): @@ -1694,12 +1694,10 @@ def test_sim_validate_structure_bounds_pml(box_length, absorb_type, log_level): def test_num_mediums(monkeypatch): """Make sure we error if too many mediums supplied.""" - - max_num_mediums = 10 - monkeypatch.setattr(simulation, "MAX_NUM_MEDIUMS", max_num_mediums) + monkeypatch.setattr(simulation, "MAX_NUM_MEDIUMS", TEST_MAX_NUM_MEDIUMS) structures = [] grid_spec = td.GridSpec.auto(wavelength=1.0) - for i in range(max_num_mediums): + for i in range(TEST_MAX_NUM_MEDIUMS): structures.append( td.Structure(geometry=td.Box(size=(1, 1, 1)), medium=td.Medium(permittivity=i + 1)) ) @@ -3226,9 +3224,9 @@ def test_advanced_material_intersection(): sim = sim.updated_copy(structures=[struct1, struct2]) -def test_num_lumped_elements(): +def test_num_lumped_elements(monkeypatch): """Make sure we error if too many lumped elements supplied.""" - + monkeypatch.setattr(simulation, "MAX_NUM_MEDIUMS", TEST_MAX_NUM_MEDIUMS) resistor = td.LumpedResistor( size=(0, 1, 2), center=(0, 0, 0), name="R1", voltage_axis=2, resistance=75 ) @@ -3238,7 +3236,7 @@ def test_num_lumped_elements(): size=(5, 5, 5), grid_spec=grid_spec, structures=[], - lumped_elements=[resistor] * MAX_NUM_MEDIUMS, + lumped_elements=[resistor] * TEST_MAX_NUM_MEDIUMS, run_time=1e-12, ) with pytest.raises(pydantic.ValidationError): @@ -3246,7 +3244,7 @@ def test_num_lumped_elements(): size=(5, 5, 5), grid_spec=grid_spec, structures=[], - lumped_elements=[resistor] * (MAX_NUM_MEDIUMS + 1), + lumped_elements=[resistor] * (TEST_MAX_NUM_MEDIUMS + 1), run_time=1e-12, ) @@ -3748,7 +3746,6 @@ def test_messages_contain_object_names(): def test_structures_per_medium(monkeypatch): """Test if structures that share the same medium warn or error appropriately.""" - import tidy3d.components.scene as scene # Set low thresholds to keep the test fast; ensure len(structures) > MAX to avoid early return monkeypatch.setattr(scene, "WARN_STRUCTURES_PER_MEDIUM", 2) diff --git a/tests/test_package/test_parametric_variants.py b/tests/test_package/test_parametric_variants.py index ddd0d94bbc..f514ab7c3a 100644 --- a/tests/test_package/test_parametric_variants.py +++ b/tests/test_package/test_parametric_variants.py @@ -32,7 +32,8 @@ def test_graphene_defaults(): _ = graphene.numerical_conductivity(freqs) -@pytest.mark.parametrize("rng_seed", np.arange(0, 15)) +@pytest.mark.parametrize("rng_seed", np.arange(0, 8)) +@pytest.mark.slow def test_graphene(rng_seed): """test graphene for range of physical parameters""" rng = default_rng(rng_seed) diff --git a/tests/test_plugins/test_design.py b/tests/test_plugins/test_design.py index 376c68849b..5d05f2610c 100644 --- a/tests/test_plugins/test_design.py +++ b/tests/test_plugins/test_design.py @@ -18,7 +18,7 @@ SWEEP_METHODS = { "grid": tdd.MethodGrid(), "monte_carlo": tdd.MethodMonteCarlo(num_points=5, seed=1), - "bay_opt": tdd.MethodBayOpt(initial_iter=5, n_iter=2, seed=1), + "bay_opt": tdd.MethodBayOpt(initial_iter=3, n_iter=2, seed=2), "gen_alg": tdd.MethodGenAlg( solutions_per_pop=6, n_generations=2, @@ -323,15 +323,15 @@ def init_design_space(sweep_method): radius_variable = tdd.ParameterFloat( name="radius", span=(0, 1.5), - num_points=5, # note: only used for MethodGrid + num_points=3, # note: only used for MethodGrid ) num_spheres_variable = tdd.ParameterInt( name="num_spheres", - span=(0, 3), + span=(0, 2), ) - tag_variable = tdd.ParameterAny(name="tag", allowed_values=("tag1", "tag2", "tag3")) + tag_variable = tdd.ParameterAny(name="tag", allowed_values=("tag1", "tag2")) design_space = tdd.DesignSpace( parameters=[radius_variable, num_spheres_variable, tag_variable], @@ -344,6 +344,7 @@ def init_design_space(sweep_method): @pytest.mark.parametrize("sweep_method", SWEEP_METHODS.values()) +@pytest.mark.slow def test_sweep(sweep_method, monkeypatch): # Problem, simulate scattering cross section of sphere ensemble # simulation consists of `num_spheres` spheres of radius `radius`. diff --git a/tests/test_plugins/test_invdes.py b/tests/test_plugins/test_invdes.py index f3ba2f3b96..361019448c 100644 --- a/tests/test_plugins/test_invdes.py +++ b/tests/test_plugins/test_invdes.py @@ -368,6 +368,7 @@ def test_continue_run_fns(use_emulated_run): # noqa: F811 ) +@pytest.mark.slow def test_continue_run_from_file(use_emulated_run): # noqa: F811 """Test continuing an already run inverse design from file.""" result_orig = make_result(use_emulated_run) diff --git a/tests/test_web/test_webapi.py b/tests/test_web/test_webapi.py index 48ee90f4bf..731735c0f0 100644 --- a/tests/test_web/test_webapi.py +++ b/tests/test_web/test_webapi.py @@ -1063,14 +1063,15 @@ def test_job_run_accepts_pathlikes(monkeypatch, tmp_path, path_builder): [_pathlib_builder, _posix_builder, _str_builder, _fspath_builder], ids=["pathlib.Path", "posixpath_str", "str", "PathLike"], ) +@pytest.mark.slow def test_batch_run_accepts_pathlike_dir(monkeypatch, tmp_path, dir_builder): """Batch.run(path_dir=...) accepts any PathLike directory location.""" - sims = {"A": make_sim(), "B": make_sim()} + sims = {"A": make_sim()} out_dir = dir_builder(tmp_path, "batch_out") # Map task_ids to sims: upload() is patched to return task_name, which for dict input # corresponds to the dict keys ("A", "B"), so we map those. - apply_common_patches(monkeypatch, tmp_path, taskid_to_sim={"A": sims["A"], "B": sims["B"]}) + apply_common_patches(monkeypatch, tmp_path, taskid_to_sim={"A": sims["A"]}) b = Batch(simulations=sims, folder_name=PROJECT_NAME) b.run(path_dir=out_dir) @@ -1078,3 +1079,6 @@ def test_batch_run_accepts_pathlike_dir(monkeypatch, tmp_path, dir_builder): # Directory created and two .hdf5 outputs produced out_dir_str = os.fspath(out_dir) assert os.path.isdir(out_dir_str) + + batch_file = Path(out_dir) / "batch.hdf5" + assert batch_file.is_file()