-
Notifications
You must be signed in to change notification settings - Fork 16
Expand file tree
/
Copy pathllm_judge_langfuse.py
More file actions
60 lines (52 loc) · 1.5 KB
/
llm_judge_langfuse.py
File metadata and controls
60 lines (52 loc) · 1.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
"""
Example for using Langfuse with the aha judge.
"""
from datetime import datetime
import os
import pytest
from eval_protocol import (
evaluation_test,
aha_judge,
multi_turn_assistant_to_ground_truth,
EvaluationRow,
SingleTurnRolloutProcessor,
create_langfuse_adapter,
DynamicDataLoader,
)
from eval_protocol.quickstart import aha_judge
def langfuse_data_generator():
adapter = create_langfuse_adapter()
return adapter.get_evaluation_rows(
to_timestamp=datetime(2025, 9, 12, 0, 11, 18),
limit=711,
sample_size=50,
sleep_between_gets=3.0,
max_retries=5,
)
@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Skip in CI")
@pytest.mark.parametrize(
"completion_params",
[
{"model": "gpt-4.1"},
{
"max_tokens": 131000,
"extra_body": {"reasoning_effort": "medium"},
"model": "fireworks_ai/accounts/fireworks/models/gpt-oss-120b",
},
{
"max_tokens": 131000,
"extra_body": {"reasoning_effort": "low"},
"model": "fireworks_ai/accounts/fireworks/models/gpt-oss-20b",
},
],
)
@evaluation_test(
data_loaders=DynamicDataLoader(
generators=[langfuse_data_generator],
preprocess_fn=multi_turn_assistant_to_ground_truth,
),
rollout_processor=SingleTurnRolloutProcessor(),
max_concurrent_evaluations=2,
)
async def test_llm_judge(row: EvaluationRow) -> EvaluationRow:
return await aha_judge(row)