-
Notifications
You must be signed in to change notification settings - Fork 16
Expand file tree
/
Copy pathutils.py
More file actions
465 lines (387 loc) · 17.4 KB
/
utils.py
File metadata and controls
465 lines (387 loc) · 17.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
import asyncio
from collections.abc import Sequence
import os
import re
import sys
from dataclasses import replace
from typing import Any, Literal
from litellm.cost_calculator import cost_per_token
from tqdm import tqdm
from eval_protocol.dataset_logger.dataset_logger import DatasetLogger
from eval_protocol.models import (
CostMetrics,
CompletionParams,
EvalMetadata,
EvaluationRow,
EvaluationThreshold,
EvaluationThresholdDict,
Status,
)
from eval_protocol.pytest.rollout_processor import RolloutProcessor
from eval_protocol.pytest.types import (
RolloutProcessorConfig,
)
from eval_protocol.pytest.exception_config import get_default_exception_handler_config
import logging
import json
import pandas as pd
AggregationMethod = Literal["mean", "max", "min", "bootstrap"]
async def run_tasks_with_eval_progress(pointwise_tasks: list, run_idx: int):
"""
Run evaluation tasks with a progress bar and proper cancellation handling.
Args:
pointwise_tasks: List of asyncio tasks to execute
run_idx: Run index for progress bar positioning and naming
Returns:
Results from all tasks
"""
eval_position = run_idx + 2 # Position after rollout progress bar
with tqdm(
total=len(pointwise_tasks),
desc=f" Eval {run_idx + 1}",
unit="eval",
file=sys.__stderr__,
leave=False,
position=eval_position,
dynamic_ncols=True,
miniters=1,
mininterval=0.1,
bar_format="{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]",
) as eval_pbar:
async def task_with_progress(task):
try:
result = await task
return result
finally:
eval_pbar.update(1)
wrapped_tasks = [task_with_progress(task) for task in pointwise_tasks]
try:
results = await asyncio.gather(*wrapped_tasks)
return results
except Exception:
# Propagate cancellation to the real tasks and await them to quiesce
for task in pointwise_tasks:
task.cancel()
await asyncio.gather(*pointwise_tasks, return_exceptions=True)
raise
async def run_tasks_with_run_progress(execute_run_func, num_runs, config):
"""
Run tasks with a parallel runs progress bar, preserving original logic.
Args:
execute_run_func: The execute_run function to call
num_runs: Number of runs to execute
config: Configuration to pass to execute_run_func
"""
with tqdm(
total=num_runs,
desc="Runs (Parallel)",
unit="run",
file=sys.__stderr__,
position=0,
leave=True,
dynamic_ncols=True,
miniters=1,
bar_format="{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]",
) as run_pbar:
async def execute_run_with_progress(run_idx: int, config):
result = await execute_run_func(run_idx, config)
run_pbar.update(1)
return result
tasks = []
for run_idx in range(num_runs):
tasks.append(asyncio.create_task(execute_run_with_progress(run_idx, config)))
try:
await asyncio.gather(*tasks)
except Exception:
# Propagate cancellation to tasks and await them to quiesce
for task in tasks:
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
raise
def calculate_bootstrap_scores(all_scores: list[float]) -> float:
"""
Calculate bootstrap confidence intervals for individual scores.
Args:
all_scores: List of individual scores from all rows
Returns:
Mean bootstrap score
"""
if not all_scores:
return 0.0
# Create DataFrame (single column of scores)
battles = pd.DataFrame({"score": all_scores})
# Bootstrap sampling for calculating relative performance
bootstrap_means = [battles.sample(frac=1.0, replace=True)["score"].mean() for _ in range(100)]
# Calculate final scores
bootstraps = pd.Series(bootstrap_means)
mean_score = bootstraps.mean()
return float(mean_score)
def aggregate(scores: list[float], method: AggregationMethod) -> float:
if not scores:
return 0.0
if method == "mean":
return sum(scores) / len(scores)
if method == "max":
return max(scores)
if method == "min":
return min(scores)
if method == "bootstrap":
return calculate_bootstrap_scores(scores)
def log_eval_status_and_rows(
eval_metadata: EvalMetadata | None,
rows: list[EvaluationRow] | None,
status: Status,
passed: bool,
logger: DatasetLogger,
) -> None:
"""Update eval status and emit rows to the given logger.
If no rows are provided, emits a minimal placeholder row so downstream
consumers still observe a terminal status.
"""
if eval_metadata is None:
return
eval_metadata.status = status
eval_metadata.passed = passed
rows_to_log: list[EvaluationRow] = rows or []
if not rows_to_log:
error_row = EvaluationRow(messages=[], eval_metadata=eval_metadata, evaluation_result=None)
logger.log(error_row)
else:
for r in rows_to_log:
if r.eval_metadata is not None:
r.eval_metadata.status = status
logger.log(r)
def parse_ep_max_rows(default_value: int | None) -> int | None:
"""Read EP_MAX_DATASET_ROWS env override as int or None.
Assumes the environment variable was already validated by plugin.py.
"""
raw = os.getenv("EP_MAX_DATASET_ROWS")
if raw is None:
return default_value
# plugin.py stores "None" as string for the "all" case
return None if raw.lower() == "none" else int(raw)
def parse_ep_num_runs(default_value: int) -> int:
"""Read EP_NUM_RUNS env override as int.
Assumes the environment variable was already validated by plugin.py.
"""
raw = os.getenv("EP_NUM_RUNS")
return int(raw) if raw is not None else default_value
def parse_ep_max_concurrent_rollouts(default_value: int) -> int:
"""Read EP_MAX_CONCURRENT_ROLLOUTS env override as int.
Assumes the environment variable was already validated by plugin.py.
"""
raw = os.getenv("EP_MAX_CONCURRENT_ROLLOUTS")
return int(raw) if raw is not None else default_value
def parse_ep_completion_params(
completion_params: Sequence[CompletionParams | None] | None,
) -> Sequence[CompletionParams | None]:
"""Apply EP_INPUT_PARAMS_JSON overrides to completion_params.
Reads the environment variable set by plugin.py and applies deep merge to each completion param.
"""
if completion_params is None:
return []
try:
_env_override = os.getenv("EP_INPUT_PARAMS_JSON")
if _env_override:
override_obj = json.loads(_env_override) # pyright: ignore[reportAny]
if isinstance(override_obj, dict):
# Apply override to each completion_params item
return [deep_update_dict(dict(cp), override_obj) for cp in completion_params if cp is not None] # pyright: ignore[reportUnknownArgumentType]
except Exception:
pass
return completion_params
def parse_ep_passed_threshold(
default_value: float | EvaluationThresholdDict | EvaluationThreshold | None,
) -> EvaluationThreshold | None:
"""Read EP_PASSED_THRESHOLD env override as float or dict.
Assumes the environment variable was already validated by plugin.py.
Supports both float values (e.g., "0.8") and JSON dict format (e.g., '{"success":0.8}').
"""
raw = os.getenv("EP_PASSED_THRESHOLD")
if raw is not None:
try:
success_value = float(raw)
return EvaluationThreshold(success=success_value)
except ValueError:
raise ValueError(f"EP_PASSED_THRESHOLD env var exists but can't be parsed: {raw}")
if isinstance(default_value, float):
return EvaluationThreshold(success=default_value)
if isinstance(default_value, dict):
return EvaluationThreshold(**default_value)
if isinstance(default_value, EvaluationThreshold):
return default_value
return None
def deep_update_dict(base: dict[str, Any], override: dict[str, Any]) -> dict[str, Any]: # pyright: ignore[reportExplicitAny]
"""Recursively update nested dictionaries in-place and return base."""
for key, value in override.items(): # pyright: ignore[reportAny]
if isinstance(value, dict) and isinstance(base.get(key), dict):
deep_update_dict(base[key], value) # pyright: ignore[reportAny, reportUnknownArgumentType]
else:
base[key] = value
return base
async def rollout_processor_with_retry(
rollout_processor: RolloutProcessor,
fresh_dataset: list[EvaluationRow],
config: RolloutProcessorConfig,
run_idx: int = 0,
):
"""
Wrapper around rollout_processor that handles retry logic using the Python backoff library.
Provides configurable exception handling with automatic retry for specific exception types:
- Retryable exceptions (e.g., ConnectionError, TimeoutError) are automatically retried with backoff
- Fail-fast exceptions (e.g., ValueError, TypeError) are not retried and return immediately
- Unknown exceptions can be configured to either re-raise or return as failed rows
The backoff behavior (exponential/constant, delays, max attempts) is fully configurable
through the ExceptionHandlerConfig in the RolloutProcessorConfig.
Yields results as they complete, allowing for concurrent processing while handling
retries transparently in the background.
"""
# Use provided exception handler config or fall back to default
# Environment variable overrides are automatically applied in __post_init__
exception_config = config.exception_handler_config or get_default_exception_handler_config()
try:
# Create initial batch of tasks (preserves indexing for mock processors)
try:
base_tasks = rollout_processor(fresh_dataset, config)
except Exception as e:
print(f"❌ Rollout processor failed to initialize: {e}")
raise e
# Create a single backoff-decorated retry function that can be reused
@exception_config.get_backoff_decorator() # pyright: ignore[reportUntypedFunctionDecorator]
async def execute_row_with_backoff_retry(row: EvaluationRow):
"""Execute rollout for a single row with backoff retry."""
retry_config = replace(config, kwargs={**(config.kwargs or {}), "start_server": False})
retry_tasks = rollout_processor([row], retry_config)
return await retry_tasks[0]
async def execute_row_with_backoff(task: asyncio.Task, row: EvaluationRow) -> EvaluationRow: # pyright: ignore[reportMissingTypeArgument, reportUnknownParameterType]
"""Execute a single row task with backoff retry."""
try:
# Try original task first
result = await task # pyright: ignore[reportUnknownVariableType]
result.rollout_status = Status.rollout_finished()
return result # pyright: ignore[reportUnknownVariableType]
except Exception as e:
# NOTE: we perform these checks because we don't put the backoff decorator on initial batch call. we don't want to retry whole batch if anything fails.
# Check if this exception should be retried
is_retryable = any(isinstance(e, exc_type) for exc_type in exception_config.retryable_exceptions)
giveup_func = exception_config.backoff_config.giveup_func
should_giveup = giveup_func and giveup_func(e)
if is_retryable and not should_giveup:
# Use shared backoff function for retryable exceptions
try:
result = await execute_row_with_backoff_retry(row)
result.rollout_status = Status.rollout_finished()
return result
except Exception as retry_error:
# Backoff gave up
logging.error(
f"❌ Rollout failed, (retried {exception_config.backoff_config.max_tries} times): {repr(retry_error)}"
)
row.rollout_status = Status.rollout_error(str(retry_error))
return row
else:
# Non-retryable exception - fail immediately
logging.error(f"❌ Rollout failed (non-retryable error encountered): {repr(e)}")
row.rollout_status = Status.rollout_error(repr(e))
return row
async def execute_row_with_backoff_and_log(task: asyncio.Task, row: EvaluationRow) -> EvaluationRow: # pyright: ignore[reportMissingTypeArgument, reportUnknownParameterType]
"""Execute a single row task with backoff retry and logging."""
result = await execute_row_with_backoff(task, row)
# Log the row after execution completes (success or failure)
config.logger.log(result)
return result
# Process all tasks concurrently with backoff retry
retry_tasks = [
asyncio.create_task(execute_row_with_backoff_and_log(task, fresh_dataset[i]))
for i, task in enumerate(base_tasks)
]
position = run_idx + 1 # Position 0 is reserved for main run bar, so shift up by 1
with tqdm(
total=len(retry_tasks),
desc=f" Run {run_idx + 1}",
unit="rollout",
file=sys.__stderr__,
leave=False,
position=position,
dynamic_ncols=True,
miniters=1,
mininterval=0.1,
bar_format="{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]",
) as rollout_pbar:
# Yield results as they complete
for task in asyncio.as_completed(retry_tasks):
result = await task
rollout_pbar.update(1)
yield result
finally:
rollout_processor.cleanup()
def sanitize_filename(text: str) -> str:
"""Sanitize text for use in filenames by replacing special characters with dashes."""
safe = re.sub(r"[^A-Za-z0-9._-]+", "-", text.strip())
return safe[:120]
def extract_effort_tag(params: dict) -> str | None: # pyright: ignore[reportMissingTypeArgument, reportUnknownParameterType]
"""
Extract effort tag from completion parameters for use in file naming.
Args:
params: Completion parameters dictionary
Returns:
Effort tag string if found, None otherwise
"""
try:
if not isinstance(params, dict): # pyright: ignore[reportUnnecessaryIsInstance]
return None # pyright: ignore[reportUnreachable]
# Common locations
if "extra_body" in params and isinstance(params["extra_body"], dict):
eb = params["extra_body"] # pyright: ignore[reportUnknownVariableType]
if isinstance(eb.get("reasoning"), dict) and "effort" in eb["reasoning"]: # pyright: ignore[reportUnknownMemberType]
return str(eb["reasoning"]["effort"]).lower() # pyright: ignore[reportUnknownArgumentType]
if "reasoning_effort" in eb:
return str(eb["reasoning_effort"]).lower() # pyright: ignore[reportUnknownArgumentType]
if "reasoning" in params and isinstance(params["reasoning"], dict) and "effort" in params["reasoning"]:
return str(params["reasoning"]["effort"]).lower() # pyright: ignore[reportUnknownArgumentType]
except Exception:
return None
return None
def add_cost_metrics(row: EvaluationRow) -> None:
"""Calculate and add cost metrics for an EvaluationRow based on its usage data."""
# Can't calculate cost without usage stats or model info
if not row.execution_metadata.usage or not row.input_metadata.completion_params:
row.execution_metadata.cost_metrics = CostMetrics(
input_cost=0.0,
output_cost=0.0,
total_cost_dollar=0.0,
)
return
model = row.input_metadata.completion_params.get("model", "unknown")
provider = row.input_metadata.completion_params.get("provider")
# Pydantic AI mapping to LiteLLM format
# TODO: make more generic for other frameworks too.
provider_mapping = {
"fireworks": "fireworks_ai",
"together": "together_ai",
"openai": "", # No prefix needed
"azure": "azure",
"deepseek": "deepseek",
"openrouter": "openrouter",
"grok": "grok",
"github": "github",
"heroku": "heroku",
}
if provider and provider in provider_mapping:
litellm_prefix = provider_mapping[provider]
model_id = f"{litellm_prefix}/{model}" if litellm_prefix else model
else:
model_id = model
usage = row.execution_metadata.usage
input_tokens = usage.prompt_tokens or 0
output_tokens = usage.completion_tokens or 0
input_cost, output_cost = cost_per_token(
model=model_id, prompt_tokens=input_tokens, completion_tokens=output_tokens
)
total_cost = input_cost + output_cost
# Set all cost metrics on the row
row.execution_metadata.cost_metrics = CostMetrics(
input_cost=input_cost,
output_cost=output_cost,
total_cost_dollar=total_cost,
)