-
Notifications
You must be signed in to change notification settings - Fork 16
Expand file tree
/
Copy pathplugin.py
More file actions
265 lines (236 loc) · 8.98 KB
/
plugin.py
File metadata and controls
265 lines (236 loc) · 8.98 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
"""
Pytest plugin for Eval Protocol developer ergonomics.
Adds a discoverable CLI flag `--ep-max-rows` to control how many rows
evaluation_test processes. This sets the environment variable
`EP_MAX_DATASET_ROWS` so the core decorator can apply it uniformly to
both URL datasets and in-memory input_messages.
Usage:
- CLI: pytest --ep-max-rows=2 # or --ep-max-rows=all for no limit
- Defaults: If not provided, no override is applied (tests use the
max_dataset_rows value set in the decorator).
"""
import logging
import os
from typing import Optional
import json
import pathlib
def pytest_addoption(parser) -> None:
group = parser.getgroup("eval-protocol")
group.addoption(
"--ep-max-rows",
action="store",
default=None,
help=(
"Limit number of dataset rows processed by evaluation_test. "
"Pass an integer (e.g., 2, 50) or 'all' for no limit."
),
)
group.addoption(
"--ep-num-runs",
action="store",
default=None,
help=("Override the number of runs for evaluation_test. Pass an integer (e.g., 1, 5, 10)."),
)
group.addoption(
"--ep-max-concurrent-rollouts",
action="store",
default=None,
help=("Override the maximum number of concurrent rollouts. Pass an integer (e.g., 8, 50, 100)."),
)
group.addoption(
"--ep-print-summary",
action="store_true",
default=False,
help=("Print a concise summary line (suite/model/effort/agg score) at the end of each evaluation_test."),
)
group.addoption(
"--ep-summary-json",
action="store",
default=None,
help=("Write a JSON summary artifact at the given path (e.g., ./outputs/aime_low.json)."),
)
group.addoption(
"--ep-input-param",
action="append",
default=None,
help=(
"Override rollout input parameters. Can be used multiple times. "
"Format: key=value or JSON via @path.json. Examples: "
"--ep-input-param temperature=0 --ep-input-param @params.json"
),
)
group.addoption(
"--ep-reasoning-effort",
action="store",
default=None,
help=(
"Set reasoning.effort for providers that support it (e.g., Fireworks) via LiteLLM extra_body. "
"Values: low|medium|high|none"
),
)
group.addoption(
"--ep-max-retry",
action="store",
default=None,
help=("Failed rollouts (with rollout_status.code indicating error) will be retried up to this many times."),
)
group.addoption(
"--ep-fail-on-max-retry",
action="store",
default="true",
choices=["true", "false"],
help=(
"Whether to fail the entire rollout when permanent failures occur after max retries. "
"Default: true (fail on permanent failures). Set to 'false' to continue with remaining rollouts."
),
)
group.addoption(
"--ep-success-threshold",
action="store",
default=None,
help=("Override the success threshold for evaluation_test. Pass a float between 0.0 and 1.0 (e.g., 0.8)."),
)
group.addoption(
"--ep-se-threshold",
action="store",
default=None,
help=(
"Override the standard error threshold for evaluation_test. "
"Pass a float >= 0.0 (e.g., 0.05). If only this is set, success threshold defaults to 0.0."
),
)
def _normalize_max_rows(val: Optional[str]) -> Optional[str]:
if val is None:
return None
s = val.strip().lower()
if s == "all":
return "None"
# Validate int; if invalid, ignore and return None (no override)
try:
int(s)
return s
except ValueError:
return None
def _normalize_number(val: Optional[str]) -> Optional[str]:
if val is None:
return None
s = val.strip()
# Validate int; if invalid, ignore and return None (no override)
try:
num = int(s)
if num <= 0:
return None # num_runs must be positive
return str(num)
except ValueError:
return None
def _normalize_success_threshold(val: Optional[str]) -> Optional[float]:
"""Normalize success threshold value as float between 0.0 and 1.0."""
if val is None:
return None
try:
threshold_float = float(val.strip())
if 0.0 <= threshold_float <= 1.0:
return threshold_float
else:
return None # threshold must be between 0 and 1
except ValueError:
return None
def _normalize_se_threshold(val: Optional[str]) -> Optional[float]:
"""Normalize standard error threshold value as float >= 0.0."""
if val is None:
return None
try:
threshold_float = float(val.strip())
if threshold_float >= 0.0:
return threshold_float
else:
return None # standard error must be >= 0
except ValueError:
return None
def _build_passed_threshold_env(success: Optional[float], se: Optional[float]) -> Optional[str]:
"""Build the EP_PASSED_THRESHOLD environment variable value from the two separate thresholds."""
if success is None and se is None:
return None
if se is None:
return str(success)
else:
success_val = success if success is not None else 0.0
threshold_dict = {"success": success_val, "standard_error": se}
return json.dumps(threshold_dict)
def pytest_configure(config) -> None:
# Quiet LiteLLM INFO spam early in pytest session unless user set a level
try:
if os.environ.get("LITELLM_LOG") is None:
os.environ["LITELLM_LOG"] = "ERROR"
_llog = logging.getLogger("LiteLLM")
_llog.setLevel(logging.CRITICAL)
_llog.propagate = False
for _h in list(_llog.handlers):
_llog.removeHandler(_h)
except Exception:
pass
cli_val = config.getoption("--ep-max-rows")
norm = _normalize_max_rows(cli_val)
if norm is not None:
os.environ["EP_MAX_DATASET_ROWS"] = norm
num_runs_val = config.getoption("--ep-num-runs")
norm_runs = _normalize_number(num_runs_val)
if norm_runs is not None:
os.environ["EP_NUM_RUNS"] = norm_runs
max_concurrent_val = config.getoption("--ep-max-concurrent-rollouts")
norm_concurrent = _normalize_number(max_concurrent_val)
if norm_concurrent is not None:
os.environ["EP_MAX_CONCURRENT_ROLLOUTS"] = norm_concurrent
if config.getoption("--ep-print-summary"):
os.environ["EP_PRINT_SUMMARY"] = "1"
summary_json_path = config.getoption("--ep-summary-json")
if summary_json_path:
os.environ["EP_SUMMARY_JSON"] = summary_json_path
max_retry = config.getoption("--ep-max-retry")
norm_max_retry = _normalize_number(max_retry)
if norm_max_retry is not None:
os.environ["EP_MAX_RETRY"] = norm_max_retry
fail_on_max_retry = config.getoption("--ep-fail-on-max-retry")
if fail_on_max_retry is not None:
os.environ["EP_FAIL_ON_MAX_RETRY"] = fail_on_max_retry
success_threshold_val = config.getoption("--ep-success-threshold")
se_threshold_val = config.getoption("--ep-se-threshold")
norm_success = _normalize_success_threshold(success_threshold_val)
norm_se = _normalize_se_threshold(se_threshold_val)
threshold_env = _build_passed_threshold_env(norm_success, norm_se)
if threshold_env is not None:
os.environ["EP_PASSED_THRESHOLD"] = threshold_env
# Allow ad-hoc overrides of input params via CLI flags
try:
merged: dict = {}
input_params_opts = config.getoption("--ep-input-param")
if input_params_opts:
for opt in input_params_opts:
if opt is None:
continue
opt = str(opt)
if opt.startswith("@"): # load JSON file
p = pathlib.Path(opt[1:])
if p.is_file():
with open(p, "r", encoding="utf-8") as f:
obj = json.load(f)
if isinstance(obj, dict):
merged.update(obj)
elif "=" in opt:
k, v = opt.split("=", 1)
# Try parse JSON values, fallback to string
try:
merged[k] = json.loads(v)
except Exception:
merged[k] = v
reasoning_effort = config.getoption("--ep-reasoning-effort")
if reasoning_effort:
# Always place under extra_body to avoid LiteLLM rejecting top-level params
eb = merged.setdefault("extra_body", {})
# Convert "none" string to None value for API compatibility
eb["reasoning_effort"] = None if reasoning_effort.lower() == "none" else str(reasoning_effort)
if merged:
os.environ["EP_INPUT_PARAMS_JSON"] = json.dumps(merged)
except Exception:
# best effort, do not crash pytest session
pass