Skip to content

Commit 3adfc10

Browse files
committed
update
1 parent c952929 commit 3adfc10

File tree

4 files changed

+4
-3
lines changed

4 files changed

+4
-3
lines changed

eval_protocol/quickstart/llm_judge_braintrust.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
input_rows=input_rows,
5252
rollout_processor=SingleTurnRolloutProcessor(),
5353
preprocess_fn=multi_turn_assistant_to_ground_truth,
54-
max_concurrent_rollouts=64,
54+
max_concurrent_evaluations=2,
5555
)
5656
async def test_llm_judge(row: EvaluationRow) -> EvaluationRow:
5757
return await aha_judge(row)

eval_protocol/quickstart/llm_judge_langfuse.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,7 @@
5151
input_rows=[input_rows],
5252
rollout_processor=SingleTurnRolloutProcessor(),
5353
preprocess_fn=multi_turn_assistant_to_ground_truth,
54-
max_concurrent_rollouts=64,
55-
max_concurrent_evaluations=16,
54+
max_concurrent_evaluations=2,
5655
)
5756
async def test_llm_judge(row: EvaluationRow) -> EvaluationRow:
5857
return await aha_judge(row)

eval_protocol/quickstart/llm_judge_langsmith.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ def fetch_langsmith_traces_as_evaluation_rows(
7676
input_rows=[input_rows],
7777
rollout_processor=SingleTurnRolloutProcessor(),
7878
preprocess_fn=multi_turn_assistant_to_ground_truth,
79+
max_concurrent_evaluations=2,
7980
)
8081
async def test_llm_judge_langsmith(row: EvaluationRow) -> EvaluationRow:
8182
"""LLM Judge evaluation over LangSmith-sourced rows, persisted locally by Eval Protocol.

eval_protocol/quickstart/llm_judge_openai_responses.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@
5757
input_rows=[input_rows],
5858
rollout_processor=SingleTurnRolloutProcessor(),
5959
preprocess_fn=multi_turn_assistant_to_ground_truth,
60+
max_concurrent_evaluations=2,
6061
)
6162
async def test_llm_judge_openai_responses(row: EvaluationRow) -> EvaluationRow:
6263
return await aha_judge(row)

0 commit comments

Comments
 (0)