diff --git a/eval_protocol/__init__.py b/eval_protocol/__init__.py index 751610a6..768c7e9b 100644 --- a/eval_protocol/__init__.py +++ b/eval_protocol/__init__.py @@ -29,7 +29,12 @@ from .resources import create_llm_resource from .reward_function import RewardFunction from .typed_interface import reward_function -from .quickstart import aha_judge, multi_turn_assistant_to_ground_truth, assistant_to_ground_truth +from .quickstart.aha_judge import aha_judge +from .utils.evaluation_row_utils import ( + multi_turn_assistant_to_ground_truth, + assistant_to_ground_truth, + filter_longest_conversation, +) from .pytest import evaluation_test, SingleTurnRolloutProcessor, RemoteRolloutProcessor, GithubActionRolloutProcessor from .pytest.remote_rollout_processor import create_elasticsearch_config_from_env from .pytest.parameterize import DefaultParameterIdGenerator @@ -102,6 +107,7 @@ "aha_judge", "multi_turn_assistant_to_ground_truth", "assistant_to_ground_truth", + "filter_longest_conversation", "evaluation_test", "SingleTurnRolloutProcessor", "OpenAIResponsesAdapter", diff --git a/eval_protocol/pytest/tracing_utils.py b/eval_protocol/pytest/tracing_utils.py index 0c729d14..14ac03cf 100644 --- a/eval_protocol/pytest/tracing_utils.py +++ b/eval_protocol/pytest/tracing_utils.py @@ -8,7 +8,7 @@ from eval_protocol.adapters.fireworks_tracing import FireworksTracingAdapter from eval_protocol.data_loader.dynamic_data_loader import DynamicDataLoader from eval_protocol.models import EvaluationRow, Status -from eval_protocol.quickstart.utils import filter_longest_conversation +from eval_protocol.utils.evaluation_row_utils import filter_longest_conversation from eval_protocol.types.remote_rollout_processor import DataLoaderConfig, RolloutMetadata, InitRequest from eval_protocol.pytest.types import RolloutProcessorConfig diff --git a/eval_protocol/quickstart/__init__.py b/eval_protocol/quickstart/__init__.py index eed4fd21..85232248 100644 --- a/eval_protocol/quickstart/__init__.py +++ b/eval_protocol/quickstart/__init__.py @@ -1,4 +1,8 @@ -from .llm_judge import aha_judge -from .utils import multi_turn_assistant_to_ground_truth, assistant_to_ground_truth +""" +Quickstart modules for various evaluation scenarios. +""" + +from eval_protocol.quickstart.aha_judge.llm_judge import aha_judge +from eval_protocol.utils.evaluation_row_utils import multi_turn_assistant_to_ground_truth, assistant_to_ground_truth __all__ = ["aha_judge", "multi_turn_assistant_to_ground_truth", "assistant_to_ground_truth"] diff --git a/eval_protocol/quickstart/aha_judge/__init__.py b/eval_protocol/quickstart/aha_judge/__init__.py new file mode 100644 index 00000000..d3d98068 --- /dev/null +++ b/eval_protocol/quickstart/aha_judge/__init__.py @@ -0,0 +1,4 @@ +from eval_protocol.quickstart.aha_judge.llm_judge import aha_judge +from eval_protocol.utils.evaluation_row_utils import multi_turn_assistant_to_ground_truth, assistant_to_ground_truth + +__all__ = ["aha_judge", "multi_turn_assistant_to_ground_truth", "assistant_to_ground_truth"] diff --git a/eval_protocol/quickstart/aha_judge/llm_judge.py b/eval_protocol/quickstart/aha_judge/llm_judge.py new file mode 100644 index 00000000..b8e0a212 --- /dev/null +++ b/eval_protocol/quickstart/aha_judge/llm_judge.py @@ -0,0 +1,90 @@ +""" +Default LLM judge for Eval Protocol. Inspired by Arena-Hard-Auto. +""" + +from typing import Optional + +from eval_protocol.models import EvaluationRow, EvaluateResult, MetricResult +from eval_protocol.adapters.base import BaseAdapter +from eval_protocol.quickstart.aha_judge.utils import ( + JUDGE_CONFIGS, + LABEL_TO_SCORE, + run_single_judgment, +) +from eval_protocol.utils.evaluation_row_utils import serialize_message + +from openai import AsyncOpenAI + + +async def aha_judge( + row: EvaluationRow, judge_name: str = "kimi-k2-instruct-0905", adapter: Optional[BaseAdapter] = None +) -> EvaluationRow: + """ + LLM Judge evaluation using Arena-Hard-Auto style pairwise comparisons for a single row. + + Compares model response against ground truth using an LLM judge: + 1. Extracts the question from messages[:-1] + 2. Compares messages[-1] (new model response) vs ground_truth (baseline response) + 3. Runs two judgment rounds (A vs B, B vs A) to reduce position bias + 4. Returns individual scores for bootstrap aggregation + + Args: + row: Single EvaluationRow object with messages, ground_truth, and tools + judge_name: Name of the judge configuration to use + adapter: Optional adapter to push scores back to (if provided) + + Returns: + Same row with updated evaluation_result containing individual judgment scores + """ + + if not row.messages: + return row + + judge_config = JUDGE_CONFIGS[judge_name] + + # Extract question and answers + question_text = "\n".join([serialize_message(msg) for msg in row.messages[:-1]]) + model_a_answer = str(row.ground_truth) + model_b_answer = serialize_message(row.messages[-1]) + + async with AsyncOpenAI(api_key=judge_config.get("api_key"), base_url=judge_config.get("base_url")) as client: + # Run two judgment rounds in sequence (A vs B, then B vs A) + result1 = await run_single_judgment( + question_text, model_a_answer, model_b_answer, row.tools, judge_config, client + ) + result2 = await run_single_judgment( + question_text, model_b_answer, model_a_answer, row.tools, judge_config, client + ) + + if not result1 or not result2 or not result1.get("score") or not result2.get("score"): + # If either judgment failed, mark as invalid (don't include in distribution) + final_score = 0.0 + reason = "Failed to get judgment scores" + metrics = {} + is_score_valid = False + else: + # Convert judgment scores to numerical scores + game1_score = 1 - LABEL_TO_SCORE[result1["score"]] + game2_score = LABEL_TO_SCORE[result2["score"]] + final_score = (game1_score + game2_score) / 2 + + reason = f"LLM Judge comparison: Round 1: {result1['score']}, Round 2: {result2['score']}" + metrics = { + "round1_judgment": MetricResult(score=game1_score, reason=result1["judgment"]), + "round2_judgment": MetricResult(score=game2_score, reason=result2["judgment"]), + } + is_score_valid = True + + row.evaluation_result = EvaluateResult( + score=final_score, + reason=reason, + metrics=metrics, + is_score_valid=is_score_valid, + ) + + # Upload score to adapter if provided + if adapter and row.evaluation_result and row.evaluation_result.is_score_valid: + model_name = row.input_metadata.completion_params.get("model", "unknown_model") + adapter.upload_score(row, model_name) + + return row diff --git a/eval_protocol/quickstart/aha_judge/llm_judge_braintrust.py b/eval_protocol/quickstart/aha_judge/llm_judge_braintrust.py new file mode 100644 index 00000000..1eb360f8 --- /dev/null +++ b/eval_protocol/quickstart/aha_judge/llm_judge_braintrust.py @@ -0,0 +1,63 @@ +""" +Example for using Braintrust with the aha judge. +""" + +import os + +import pytest + +# Skip entire module in CI to prevent import-time side effects +if os.environ.get("CI") == "true": + pytest.skip("Skip quickstart in CI", allow_module_level=True) + +from eval_protocol import ( + evaluation_test, + aha_judge, + EvaluationRow, + SingleTurnRolloutProcessor, + DynamicDataLoader, + create_braintrust_adapter, + multi_turn_assistant_to_ground_truth, +) + + +# uncomment when dataloader is fixed +def braintrust_data_generator(): + adapter = create_braintrust_adapter() + return adapter.get_evaluation_rows( + btql_query=f""" + select: * + from: project_logs('{os.getenv("BRAINTRUST_PROJECT_ID")}') traces + filter: is_root = true + limit: 10 + """ + ) + + +@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Skip in CI") +@pytest.mark.parametrize( + "completion_params", + [ + {"model": "gpt-4.1"}, + { + "max_tokens": 131000, + "extra_body": {"reasoning_effort": "medium"}, + "model": "fireworks_ai/accounts/fireworks/models/gpt-oss-120b", + }, + { + "max_tokens": 131000, + "extra_body": {"reasoning_effort": "low"}, + "model": "fireworks_ai/accounts/fireworks/models/gpt-oss-20b", + }, + ], +) +@evaluation_test( + data_loaders=DynamicDataLoader( + generators=[braintrust_data_generator], + preprocess_fn=multi_turn_assistant_to_ground_truth, + ), + rollout_processor=SingleTurnRolloutProcessor(), + max_concurrent_evaluations=2, +) +async def test_llm_judge(row: EvaluationRow) -> EvaluationRow: + return await aha_judge(row) diff --git a/eval_protocol/quickstart/llm_judge_langfuse.py b/eval_protocol/quickstart/aha_judge/llm_judge_langfuse.py similarity index 96% rename from eval_protocol/quickstart/llm_judge_langfuse.py rename to eval_protocol/quickstart/aha_judge/llm_judge_langfuse.py index 21459494..1ad5fb62 100644 --- a/eval_protocol/quickstart/llm_judge_langfuse.py +++ b/eval_protocol/quickstart/aha_judge/llm_judge_langfuse.py @@ -10,15 +10,13 @@ from eval_protocol import ( evaluation_test, aha_judge, - multi_turn_assistant_to_ground_truth, EvaluationRow, SingleTurnRolloutProcessor, create_langfuse_adapter, DynamicDataLoader, + multi_turn_assistant_to_ground_truth, ) -from eval_protocol.quickstart import aha_judge - def langfuse_data_generator(): adapter = create_langfuse_adapter() diff --git a/eval_protocol/quickstart/llm_judge_langsmith.py b/eval_protocol/quickstart/aha_judge/llm_judge_langsmith.py similarity index 100% rename from eval_protocol/quickstart/llm_judge_langsmith.py rename to eval_protocol/quickstart/aha_judge/llm_judge_langsmith.py index 20d12730..30528a98 100644 --- a/eval_protocol/quickstart/llm_judge_langsmith.py +++ b/eval_protocol/quickstart/aha_judge/llm_judge_langsmith.py @@ -27,11 +27,11 @@ from eval_protocol import ( evaluation_test, aha_judge, - multi_turn_assistant_to_ground_truth, EvaluationRow, SingleTurnRolloutProcessor, LangSmithAdapter, DynamicDataLoader, + multi_turn_assistant_to_ground_truth, ) diff --git a/eval_protocol/quickstart/llm_judge_openai_responses.py b/eval_protocol/quickstart/aha_judge/llm_judge_openai_responses.py similarity index 100% rename from eval_protocol/quickstart/llm_judge_openai_responses.py rename to eval_protocol/quickstart/aha_judge/llm_judge_openai_responses.py index aafc8fb7..b7aa5110 100644 --- a/eval_protocol/quickstart/llm_judge_openai_responses.py +++ b/eval_protocol/quickstart/aha_judge/llm_judge_openai_responses.py @@ -22,11 +22,11 @@ from eval_protocol import ( evaluation_test, aha_judge, - multi_turn_assistant_to_ground_truth, EvaluationRow, SingleTurnRolloutProcessor, OpenAIResponsesAdapter, DynamicDataLoader, + multi_turn_assistant_to_ground_truth, ) diff --git a/eval_protocol/quickstart/aha_judge/utils.py b/eval_protocol/quickstart/aha_judge/utils.py new file mode 100644 index 00000000..0ebc8432 --- /dev/null +++ b/eval_protocol/quickstart/aha_judge/utils.py @@ -0,0 +1,133 @@ +""" +Arena-Hard-Auto utility functions adapted for Eval Protocol. +""" + +import os +import re +from typing import Dict, Any, Optional + +OG_ARENA_HARD_PROMPT = """Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better. + +Begin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers. + +When evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information. + +Then consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive. + +Then consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt. + +After providing your explanation, you must output only one of the following choices as your final verdict with a label: + +1. Assistant A is significantly better: [[A>>B]] +2. Assistant A is slightly better: [[A>B]] +3. Tie, relatively the same: [[A=B]] +4. Assistant B is slightly better: [[B>A]] +5. Assistant B is significantly better: [[B>>A]] + +Example output: "My final verdict is tie: [[A=B]]".""" + + +# Judge model configurations for Arena-Hard-Auto style evaluation +# Each config specifies the model, parameters, and concurrency limits for LLM judges +JUDGE_CONFIGS = { + "gpt-4.1": { + "model": "gpt-4.1", + "temperature": 0.0, + "max_tokens": 16000, + }, + "gemini-2.5-pro": { + "model": "gemini-2.5-pro", + "temperature": 1.0, + "max_tokens": 32000, + "api_key": os.getenv("GEMINI_API_KEY"), + "base_url": "https://generativelanguage.googleapis.com/v1beta/openai/", + }, + "gemini-2.5-flash": { + "model": "gemini-2.5-flash", + "temperature": 1.0, + "max_tokens": 32000, + "api_key": os.getenv("GEMINI_API_KEY"), + "base_url": "https://generativelanguage.googleapis.com/v1beta/openai/", + }, + "kimi-k2-instruct-0905": { + "model": "accounts/fireworks/models/kimi-k2-instruct-0905", + "temperature": 0.6, # Kimi recommended temperature + "max_tokens": 131000, + "api_key": os.getenv("FIREWORKS_API_KEY"), + "base_url": "https://api.fireworks.ai/inference/v1", + }, +} + +LABEL_TO_SCORE = { + "A>>B": 1.0, + "B<B": 6 / 7, + "BA": 1 / 7, + "A<>A": 0.0, +} + + +def get_score(judgment, patterns): + """Extract judgment score from text. From arena-hard-auto/gen_judgment.py""" + for pattern in patterns: + pattern = re.compile(pattern) + + matches = pattern.findall(judgment.upper()) + matches = [m for m in matches if m != ""] + + if len(set(matches)) > 0: + return matches[-1].strip("\n") + return None + + +async def run_single_judgment( + question_text: str, answer_a: str, answer_b: str, tools, judge_config, client +) -> Optional[Dict[str, Any]]: + """Run a single pairwise judgment between two answers.""" + user_prompt = f"""<|User Prompt|> +{question_text} + +<|The Start of Assistant A's Answer|> +{answer_a} +<|The End of Assistant A's Answer|> + +<|The Start of Assistant B's Answer|> +{answer_b} +<|The End of Assistant B's Answer|> + +<|Available Tools|> +{tools} +<|End of Available Tools|> + +{OG_ARENA_HARD_PROMPT}""" + + messages = [{"role": "user", "content": user_prompt}] + + try: + api_params = { + "model": judge_config["model"], + "messages": messages, + "temperature": judge_config["temperature"], + "max_tokens": judge_config["max_tokens"], + } + + if tools: + api_params["tools"] = tools + api_params["tool_choice"] = "none" + + response = await client.chat.completions.create(**api_params) + judgment_text = response.choices[0].message.content + if not judgment_text: + return None + + except Exception as e: + print(f"Error getting judgment from OpenAI: {e}") + return None + + score = get_score(judgment_text, [r"\[\[([AB<>=]+)\]\]", r"\[([AB<>=]+)\]"]) + return {"score": score, "judgment": judgment_text, "prompt": messages} diff --git a/eval_protocol/utils/evaluation_row_utils.py b/eval_protocol/utils/evaluation_row_utils.py new file mode 100644 index 00000000..d89f0c55 --- /dev/null +++ b/eval_protocol/utils/evaluation_row_utils.py @@ -0,0 +1,136 @@ +""" +Utility functions for processing and transforming EvaluationRow objects. + +This module contains functions that work with EvaluationRow objects for various +preprocessing, filtering, and transformation tasks commonly used across the +evaluation pipeline. +""" + +from typing import List + +from eval_protocol.models import EvaluationRow, Message + + +def serialize_message(msg: Message) -> str: + """ + Convert a Message object to a string representation. + + Args: + msg: Message object to serialize + + Returns: + String representation of the message including role, content, and tool calls + """ + parts = [f"{msg.role}: {msg.content}"] + + if msg.tool_calls: + for tool_call in msg.tool_calls: + tool_name = tool_call.function.name + tool_args = tool_call.function.arguments + parts.append(f"[Tool Call: {tool_name}({tool_args})]") + + return "\n".join(parts) + + +def filter_longest_conversation(data: List[EvaluationRow]) -> List[EvaluationRow]: + """ + Filter out the longest conversation from a list of evaluation rows that share the same rollout_id. + + Args: + data: List of EvaluationRow objects that share the same rollout_id + + Returns: + List containing only the EvaluationRow with the most messages (longest conversation) + """ + if not data: + return data + + if len(data) == 1: + return data + + # Find the row with the most messages (longest conversation) + longest_row = max(data, key=lambda row: len(row.messages)) + + return [longest_row] + + +def multi_turn_assistant_to_ground_truth(data: List[EvaluationRow]) -> List[EvaluationRow]: + """ + Split multi-turn conversations into rows, with each assistant message as ground truth. + + Args: + data: List of EvaluationRow objects + + Returns: + List of expanded EvaluationRow objects, one for each assistant message + """ + expanded_rows = [] + seen_traces: set[str] = set() + + for row in data: + messages = row.messages + tools = row.tools + input_metadata = row.input_metadata + + assistant_positions = [] + for i, message in enumerate(messages): + if message.role == "assistant": + assistant_positions.append(i) + + # Create separate evaluation rows on each assistant message (where the comparison model will respond) + for pos in assistant_positions: + messages_before_assistant = messages[:pos] + assistant_message = messages[pos] + + # In this case, we trace every request, so we need to filter out duplicates + curr_trace = "\n".join(serialize_message(m) for m in messages_before_assistant) + if curr_trace in seen_traces: + continue + seen_traces.add(curr_trace) + + ground_truth_message = serialize_message(assistant_message) + + expanded_rows.append( + EvaluationRow( + messages=messages_before_assistant, + tools=tools, + input_metadata=input_metadata, + ground_truth=ground_truth_message, + ) + ) + + return expanded_rows + + +def assistant_to_ground_truth(data: List[EvaluationRow]) -> List[EvaluationRow]: + """ + Extract the last assistant message as ground truth and remove it from the conversation. + + Args: + data: List of EvaluationRow objects + + Returns: + List of EvaluationRow objects with last assistant message moved to ground_truth + """ + processed_rows = [] + + for row in data: + messages = row.messages.copy() # Don't modify original + + if messages[-1].role == "assistant": + assistant_message = messages[-1] + messages = messages[:-1] + ground_truth_message = serialize_message(assistant_message) + else: + raise ValueError("Last message is not from assistant") + + processed_rows.append( + EvaluationRow( + messages=messages, + tools=row.tools, + input_metadata=row.input_metadata, + ground_truth=ground_truth_message, + ) + ) + + return processed_rows diff --git a/tests/github_actions/test_github_actions_rollout.py b/tests/github_actions/test_github_actions_rollout.py index a4c80dcf..48dd46d6 100644 --- a/tests/github_actions/test_github_actions_rollout.py +++ b/tests/github_actions/test_github_actions_rollout.py @@ -14,7 +14,7 @@ from eval_protocol.pytest.github_action_rollout_processor import GithubActionRolloutProcessor from eval_protocol.types.remote_rollout_processor import DataLoaderConfig from eval_protocol.adapters.fireworks_tracing import FireworksTracingAdapter -from eval_protocol.quickstart.utils import filter_longest_conversation +from eval_protocol.utils.evaluation_row_utils import filter_longest_conversation ROLLOUT_IDS = set() diff --git a/tests/remote_server/test_remote_fireworks.py b/tests/remote_server/test_remote_fireworks.py index d27ace02..ea133ccf 100644 --- a/tests/remote_server/test_remote_fireworks.py +++ b/tests/remote_server/test_remote_fireworks.py @@ -21,7 +21,7 @@ from eval_protocol.pytest import evaluation_test from eval_protocol.pytest.remote_rollout_processor import RemoteRolloutProcessor from eval_protocol.adapters.fireworks_tracing import FireworksTracingAdapter -from eval_protocol.quickstart.utils import filter_longest_conversation +from eval_protocol.utils.evaluation_row_utils import filter_longest_conversation from eval_protocol.types.remote_rollout_processor import DataLoaderConfig ROLLOUT_IDS = set() diff --git a/tests/remote_server/test_remote_langfuse.py b/tests/remote_server/test_remote_langfuse.py index 35828570..8c66b136 100644 --- a/tests/remote_server/test_remote_langfuse.py +++ b/tests/remote_server/test_remote_langfuse.py @@ -21,7 +21,7 @@ from eval_protocol.pytest import evaluation_test from eval_protocol.pytest.remote_rollout_processor import RemoteRolloutProcessor from eval_protocol.adapters.langfuse import create_langfuse_adapter -from eval_protocol.quickstart.utils import filter_longest_conversation +from eval_protocol.utils.evaluation_row_utils import filter_longest_conversation from eval_protocol.types.remote_rollout_processor import DataLoaderConfig ROLLOUT_IDS = set() diff --git a/tests/test_quickstart_utils.py b/tests/test_quickstart_utils.py index de185c84..1b7fb18f 100644 --- a/tests/test_quickstart_utils.py +++ b/tests/test_quickstart_utils.py @@ -3,7 +3,7 @@ import pytest from eval_protocol.models import EvaluationRow, InputMetadata, Message -from eval_protocol.quickstart.utils import ( +from eval_protocol.utils.evaluation_row_utils import ( multi_turn_assistant_to_ground_truth, serialize_message, assistant_to_ground_truth,