-
Notifications
You must be signed in to change notification settings - Fork 16
Expand file tree
/
Copy pathtest_direct_run.py
More file actions
83 lines (75 loc) · 2.52 KB
/
test_direct_run.py
File metadata and controls
83 lines (75 loc) · 2.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from eval_protocol.models import Message, EvaluationRow, EvaluateResult
from eval_protocol.pytest import SingleTurnRolloutProcessor, evaluation_test
from typing import List
import pytest
@evaluation_test(
input_messages=[
[
Message(role="user", content="What is the capital of France?"),
],
[
Message(role="user", content="What is the capital of the moon?"),
],
],
completion_params=[{"model": "fireworks_ai/accounts/fireworks/models/gpt-oss-120b"}],
rollout_processor=SingleTurnRolloutProcessor(),
mode="all",
)
def test_direct_run(rows: List[EvaluationRow]) -> List[EvaluationRow]:
"""Run math evaluation on sample dataset using pytest interface."""
for idx, row in enumerate(rows):
row.evaluation_result = EvaluateResult(score=idx, reason="test")
return rows
def test_direct_run_main():
rows = [
EvaluationRow(
messages=[
Message(role="user", content="What is the capital of France?"),
],
),
EvaluationRow(
messages=[
Message(role="user", content="What is the capital of the moon?"),
],
),
]
res = test_direct_run(rows)
assert res[0].evaluation_result.score == 0
assert res[1].evaluation_result.score == 1
@evaluation_test(
input_messages=[
[
Message(role="user", content="What is the capital of France?"),
],
[
Message(role="user", content="What is the capital of the moon?"),
],
],
completion_params=[{"model": "fireworks_ai/accounts/fireworks/models/gpt-oss-120b"}],
rollout_processor=SingleTurnRolloutProcessor(),
mode="all",
)
async def test_direct_run_async(rows: List[EvaluationRow]) -> List[EvaluationRow]:
"""Run math evaluation on sample dataset using pytest interface."""
for idx, row in enumerate(rows):
row.evaluation_result = EvaluateResult(score=idx, reason="test")
return rows
@pytest.mark.asyncio
async def test_direct_run_async_main():
rows = [
EvaluationRow(
messages=[
Message(role="user", content="1"),
],
),
EvaluationRow(
messages=[
Message(role="user", content="2"),
],
),
]
res = await test_direct_run_async(rows)
assert res[0].messages[0].content == "1"
assert res[1].messages[0].content == "2"
assert res[0].evaluation_result.score == 0
assert res[1].evaluation_result.score == 1