55
66import json
77import os
8- import re
9- import subprocess
108
119import pytest
1210
1311from isaaclab_arena .tests .utils .constants import TestConstants
12+ from isaaclab_arena .tests .utils .subprocess import run_subprocess
1413
1514HEADLESS = True
1615NUM_STEPS = 2
@@ -23,41 +22,24 @@ def write_jobs_config_to_file(jobs: list[dict], tmp_file_path: str):
2322 json .dump (jobs_config , f , indent = 4 )
2423
2524
26- def run_eval_runner_and_check_no_failures (jobs_config_path : str , headless : bool = HEADLESS ):
27- """Run the eval_runner and verify no jobs failed .
25+ def run_eval_runner (jobs_config_path : str , headless : bool = HEADLESS ):
26+ """Run the eval_runner as a subprocess with timeout .
2827
29- Args:
30- jobs_config_path: Path to the jobs config JSON file
31- headless: Whether to run in headless mode
28+ --continue_on_error is NOT passed, so the eval_runner re-raises on the
29+ first job failure, exiting non-zero. run_subprocess() detects that and
30+ raises CalledProcessError, which surfaces as a test failure.
3231
33- Raises:
34- AssertionError: If any jobs failed
32+ Args:
33+ jobs_config_path: Path to the jobs config JSON file.
34+ headless: Whether to run in headless mode.
3535 """
3636 args = [TestConstants .python_path , f"{ TestConstants .evaluation_dir } /eval_runner.py" ]
3737 args .append ("--eval_jobs_config" )
3838 args .append (jobs_config_path )
3939 if headless :
4040 args .append ("--headless" )
4141
42- result = subprocess .run (args , capture_output = True , text = True , check = True )
43- output = result .stdout + result .stderr
44-
45- # Parse the output to find job statuses in the table
46- # The table format is:
47- # | Job Name | Status | ...
48- # | gr1_open_microwave_cracker_box | completed | ...
49- status_pattern = r"\|\s+([^|]+?)\s+\|\s+(pending|running|completed|failed)\s+\|"
50- matches = re .findall (status_pattern , output , re .IGNORECASE )
51-
52- # Filter out the header row
53- job_statuses = [(name .strip (), status .strip ()) for name , status in matches if name .strip () != "Job Name" ]
54-
55- # Check for failed jobs
56- failed_jobs = [name for name , status in job_statuses if status .lower () == "failed" ]
57-
58- if failed_jobs :
59- print ("\n " + output ) # Print full output for debugging
60- raise AssertionError (f"The following jobs failed: { ', ' .join (failed_jobs )} \n All job statuses: { job_statuses } " )
42+ run_subprocess (args )
6143
6244
6345@pytest .mark .with_subprocess
@@ -90,7 +72,7 @@ def test_eval_runner_two_jobs_zero_action(tmp_path):
9072
9173 temp_config_path = str (tmp_path / "test_eval_runner_two_jobs_zero_action.json" )
9274 write_jobs_config_to_file (jobs , temp_config_path )
93- run_eval_runner_and_check_no_failures (temp_config_path )
75+ run_eval_runner (temp_config_path )
9476
9577
9678@pytest .mark .with_subprocess
@@ -123,7 +105,7 @@ def test_eval_runner_multiple_environments(tmp_path):
123105
124106 temp_config_path = str (tmp_path / "test_eval_runner_multiple_environments.json" )
125107 write_jobs_config_to_file (jobs , temp_config_path )
126- run_eval_runner_and_check_no_failures (temp_config_path )
108+ run_eval_runner (temp_config_path )
127109
128110
129111@pytest .mark .with_subprocess
@@ -156,18 +138,18 @@ def test_eval_runner_different_embodiments(tmp_path):
156138
157139 temp_config_path = str (tmp_path / "test_eval_runner_different_embodiments.json" )
158140 write_jobs_config_to_file (jobs , temp_config_path )
159- run_eval_runner_and_check_no_failures (temp_config_path )
141+ run_eval_runner (temp_config_path )
160142
161143
162144@pytest .mark .with_subprocess
163145def test_eval_runner_from_existing_config ():
164146 """Test eval_runner using the zero_action_jobs_config.json and verify no jobs failed."""
165147 config_path = f"{ TestConstants .arena_environments_dir } /eval_jobs_configs/zero_action_jobs_config.json"
166148 assert os .path .exists (config_path ), f"Config file not found: { config_path } "
167- run_eval_runner_and_check_no_failures (config_path )
149+ run_eval_runner (config_path )
168150
169151
170- @pytest .mark .with_subprocess
152+ @pytest .mark .skip ( reason = "CI takes 1000s to cold-start camera rendering." )
171153def test_eval_runner_enable_cameras (tmp_path ):
172154 """Test eval_runner with enable_cameras set to true."""
173155 jobs = [
@@ -198,4 +180,4 @@ def test_eval_runner_enable_cameras(tmp_path):
198180
199181 temp_config_path = str (tmp_path / "test_eval_runner_enable_cameras.json" )
200182 write_jobs_config_to_file (jobs , temp_config_path )
201- run_eval_runner_and_check_no_failures (temp_config_path )
183+ run_eval_runner (temp_config_path )
0 commit comments