|
1 | 1 | import asyncio |
| 2 | +import os |
2 | 3 | from collections.abc import Awaitable, Callable |
3 | | -from typing import cast |
4 | | -from eval_protocol.models import EvaluationRow |
| 4 | +from typing import Any, cast |
| 5 | +from eval_protocol.models import EvaluationRow, EvaluateResult, Status |
5 | 6 | from eval_protocol.pytest.types import Dataset, EvaluationInputParam, TestFunction |
6 | 7 |
|
7 | 8 |
|
@@ -41,3 +42,70 @@ async def execute_pytest( |
41 | 42 | return test_func(processed_dataset, **evaluation_test_kwargs) |
42 | 43 | test_func = cast(Callable[[], EvaluationRow], test_func) |
43 | 44 | return test_func(**evaluation_test_kwargs) |
| 45 | + |
| 46 | + |
| 47 | +async def execute_pytest_with_exception_handling( |
| 48 | + test_func: TestFunction, |
| 49 | + evaluation_test_kwargs: dict[str, Any], |
| 50 | + processed_row: EvaluationRow | None = None, |
| 51 | + processed_dataset: list[EvaluationRow] | None = None, |
| 52 | +) -> EvaluationRow | list[EvaluationRow]: |
| 53 | + """Helper function to execute pytest with consistent exception handling. |
| 54 | +
|
| 55 | + Args: |
| 56 | + test_func: The test function to execute |
| 57 | + evaluation_test_kwargs: Kwargs for the evaluation function |
| 58 | + processed_row: Single row for pointwise evaluation (mutually exclusive with processed_dataset) |
| 59 | + processed_dataset: Dataset for groupwise/all evaluation (mutually exclusive with processed_row) |
| 60 | +
|
| 61 | + Returns: |
| 62 | + The result of execute_pytest, or the input data with error results on exception |
| 63 | + """ |
| 64 | + try: |
| 65 | + if processed_row is not None: |
| 66 | + return await execute_pytest( |
| 67 | + test_func, |
| 68 | + processed_row=processed_row, |
| 69 | + evaluation_test_kwargs=evaluation_test_kwargs, |
| 70 | + ) |
| 71 | + else: |
| 72 | + return await execute_pytest( |
| 73 | + test_func, |
| 74 | + processed_dataset=processed_dataset, |
| 75 | + evaluation_test_kwargs=evaluation_test_kwargs, |
| 76 | + ) |
| 77 | + except Exception as e: |
| 78 | + if os.getenv("EP_RAISE_EVAL_EXCEPTIONS", "true").strip() == "false": |
| 79 | + # Handle single row case |
| 80 | + if processed_row is not None: |
| 81 | + result = processed_row |
| 82 | + result.evaluation_result = EvaluateResult( |
| 83 | + score=0.0, |
| 84 | + is_score_valid=False, |
| 85 | + reason=f"Error during evaluation: {type(e).__name__}: {e}", |
| 86 | + ) |
| 87 | + if result.eval_metadata is not None: |
| 88 | + result.eval_metadata.status = Status.error( |
| 89 | + f"Error during evaluation: {type(e).__name__}: {e}", |
| 90 | + ) |
| 91 | + return result |
| 92 | + # Handle list of rows case |
| 93 | + elif processed_dataset is not None: |
| 94 | + results = processed_dataset |
| 95 | + for row in results: |
| 96 | + row.evaluation_result = EvaluateResult( |
| 97 | + score=0.0, |
| 98 | + is_score_valid=False, |
| 99 | + reason=f"Error during evaluation: {type(e).__name__}: {e}", |
| 100 | + ) |
| 101 | + if row.eval_metadata is not None: |
| 102 | + row.eval_metadata.status = Status.error( |
| 103 | + f"Error during evaluation: {type(e).__name__}: {e}", |
| 104 | + ) |
| 105 | + return results |
| 106 | + else: |
| 107 | + # This should never happen since one of processed_row/processed_dataset must be provided |
| 108 | + raise ValueError("Neither processed_row nor processed_dataset was provided") |
| 109 | + # Default: raise exceptions unless explicitly disabled |
| 110 | + else: |
| 111 | + raise |
0 commit comments