|
| 1 | +""" |
| 2 | +Default LLM judge for Eval Protocol. Inspired by Arena-Hard-Auto. |
| 3 | +""" |
| 4 | + |
| 5 | +import os |
| 6 | +from datetime import datetime, timedelta |
| 7 | +from typing import List, Dict, Any, Optional |
| 8 | +import pandas as pd |
| 9 | +from tqdm import tqdm |
| 10 | + |
| 11 | +import pytest |
| 12 | + |
| 13 | +from eval_protocol.models import EvaluateResult, EvaluationRow, MetricResult |
| 14 | +from eval_protocol.pytest import evaluation_test |
| 15 | +from eval_protocol.pytest.default_single_turn_rollout_process import SingleTurnRolloutProcessor |
| 16 | +from eval_protocol.quickstart.utils import pairwise_judgment |
| 17 | + |
| 18 | +# Langfuse client setup |
| 19 | +try: |
| 20 | + from langfuse import get_client # pyright: ignore[reportPrivateImportUsage] |
| 21 | + |
| 22 | + LANGFUSE_AVAILABLE = True |
| 23 | + langfuse = get_client() |
| 24 | +except ImportError: |
| 25 | + LANGFUSE_AVAILABLE = False |
| 26 | + langfuse = None |
| 27 | + |
| 28 | + |
| 29 | +def fetch_langfuse_traces_as_evaluation_rows( |
| 30 | + hours_back: int = 168, tags: Optional[List[str]] = None |
| 31 | +) -> List[EvaluationRow]: |
| 32 | + try: |
| 33 | + from eval_protocol.adapters.langfuse import create_langfuse_adapter |
| 34 | + |
| 35 | + if not os.getenv("LANGFUSE_PUBLIC_KEY") or not os.getenv("LANGFUSE_SECRET_KEY"): |
| 36 | + raise ValueError("LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY must be set") |
| 37 | + |
| 38 | + adapter = create_langfuse_adapter( |
| 39 | + public_key=os.getenv("LANGFUSE_PUBLIC_KEY"), # pyright: ignore[reportArgumentType] |
| 40 | + secret_key=os.getenv("LANGFUSE_SECRET_KEY"), # pyright: ignore[reportArgumentType] |
| 41 | + host=os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com"), |
| 42 | + ) |
| 43 | + |
| 44 | + now = datetime.now() |
| 45 | + from_timestamp = now - timedelta(hours=hours_back) |
| 46 | + |
| 47 | + return adapter.get_evaluation_rows( |
| 48 | + limit=20, from_timestamp=from_timestamp, to_timestamp=now, include_tool_calls=True, tags=tags |
| 49 | + ) |
| 50 | + |
| 51 | + except Exception as e: |
| 52 | + print(f"❌ LangfuseAdapter failed: {e}") |
| 53 | + return [] |
| 54 | + |
| 55 | + |
| 56 | +@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Skip in CI") |
| 57 | +@pytest.mark.asyncio |
| 58 | +@evaluation_test( |
| 59 | + input_rows=[fetch_langfuse_traces_as_evaluation_rows()], |
| 60 | + completion_params=[{"model": "gpt-4o"}], |
| 61 | + rollout_processor=SingleTurnRolloutProcessor(), |
| 62 | + split_multi_turn=True, |
| 63 | + mode="all", |
| 64 | +) |
| 65 | +async def test_llm_judge(rows: list[EvaluationRow]) -> list[EvaluationRow]: |
| 66 | + """ |
| 67 | + Simplified LLM Judge for Arena-Hard-Auto style pairwise comparisons. |
| 68 | +
|
| 69 | + Each row contains: |
| 70 | + - messages[:-1]: Question/prompt (conversation context) |
| 71 | + - messages[-1]: Model B's answer (comparison model response) |
| 72 | + - ground_truth: Model A's answer (original assistant response) |
| 73 | + """ |
| 74 | + |
| 75 | + if not rows: |
| 76 | + print("❌ No evaluation rows provided") |
| 77 | + return rows |
| 78 | + |
| 79 | + print(f"🔄 Processing {len(rows)} evaluation rows for LLM judging...") |
| 80 | + |
| 81 | + model_name = rows[0].input_metadata.completion_params.get("model", "unknown_model") |
| 82 | + |
| 83 | + # Generate judgments directly from rows |
| 84 | + import concurrent.futures |
| 85 | + from concurrent.futures import ThreadPoolExecutor |
| 86 | + |
| 87 | + def run_judgment(row: EvaluationRow) -> Optional[Dict[str, Any]]: |
| 88 | + """Run pairwise judgment for a single evaluation row.""" |
| 89 | + if not row.messages: |
| 90 | + return None |
| 91 | + |
| 92 | + # Extract question and answers |
| 93 | + question_text = "\n".join([f"{msg.role}: {msg.content}" for msg in row.messages[:-1]]) |
| 94 | + model_a_answer = row.ground_truth # Original response |
| 95 | + model_b_answer = row.messages[-1].content # Comparison model response |
| 96 | + |
| 97 | + games = [] |
| 98 | + |
| 99 | + # Round 1: A vs B (original vs comparison) |
| 100 | + result1 = pairwise_judgment( |
| 101 | + question_text=question_text, |
| 102 | + answer_a=model_a_answer, |
| 103 | + answer_b=model_b_answer, |
| 104 | + ) |
| 105 | + games.append(result1) |
| 106 | + |
| 107 | + # Round 2: B vs A (comparison vs original) |
| 108 | + result2 = pairwise_judgment( |
| 109 | + question_text=question_text, |
| 110 | + answer_a=model_b_answer, |
| 111 | + answer_b=model_a_answer, |
| 112 | + ) |
| 113 | + games.append(result2) |
| 114 | + |
| 115 | + row.evaluation_result = EvaluateResult( |
| 116 | + score=0.0, |
| 117 | + reason=f"LLM Judge comparison: Round 1: {result1['score']}, Round 2: {result2['score']}" |
| 118 | + if result1 and result2 |
| 119 | + else "Failed to get judgement scores", |
| 120 | + metrics={ |
| 121 | + "round1_judgment": MetricResult( |
| 122 | + score=0.0, reason=result1["judgment"] if result1 else "Failed to get judgment reason" |
| 123 | + ), |
| 124 | + "round2_judgment": MetricResult( |
| 125 | + score=0.0, reason=result2["judgment"] if result2 else "Failed to get judgment reason" |
| 126 | + ), |
| 127 | + }, |
| 128 | + ) |
| 129 | + |
| 130 | + return {"model": model_name, "games": games} |
| 131 | + |
| 132 | + judgments = [] |
| 133 | + max_workers = 64 |
| 134 | + |
| 135 | + with ThreadPoolExecutor(max_workers=max_workers) as executor: |
| 136 | + futures = [executor.submit(run_judgment, row) for row in rows] |
| 137 | + |
| 138 | + for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures), desc="Generating judgments"): |
| 139 | + result = future.result() |
| 140 | + if result and result["games"][0] and result["games"][1]: |
| 141 | + judgments.append(result) |
| 142 | + |
| 143 | + if not judgments: |
| 144 | + print("❌ No valid judgments generated") |
| 145 | + return rows |
| 146 | + |
| 147 | + print(f"✅ Generated {len(judgments)} valid judgments") |
| 148 | + |
| 149 | + # Convert to scores for leaderboard |
| 150 | + label_to_score = { |
| 151 | + "A>B": [1], |
| 152 | + "A>>B": [1] * 3, |
| 153 | + "A=B": [0.5], |
| 154 | + "A<<B": [0] * 3, |
| 155 | + "A<B": [0], |
| 156 | + "B>A": [0], |
| 157 | + "B>>A": [0] * 3, |
| 158 | + "B=A": [0.5], |
| 159 | + "B<<A": [1] * 3, |
| 160 | + "B<A": [1], |
| 161 | + } |
| 162 | + |
| 163 | + # Extract scores from judgments |
| 164 | + scores_data = [] |
| 165 | + for judgment in judgments: |
| 166 | + game1, game2 = judgment["games"] |
| 167 | + if game1 and game2 and game1.get("score") and game2.get("score"): |
| 168 | + # Convert judgment scores to numerical scores |
| 169 | + scores = label_to_score[game2["score"]] + [1 - s for s in label_to_score[game1["score"]]] |
| 170 | + for score in scores: |
| 171 | + scores_data.append(score) |
| 172 | + |
| 173 | + if not scores_data: |
| 174 | + print("❌ No valid scores extracted") |
| 175 | + return rows |
| 176 | + |
| 177 | + # Create DataFrame (single column of scores) |
| 178 | + battles = pd.DataFrame({"score": scores_data}) |
| 179 | + |
| 180 | + # Bootstrap sampling for calculating relative performance to original model at fixed 50% |
| 181 | + bootstrap_means = [ |
| 182 | + battles.sample(frac=1.0, replace=True)["score"].mean() for _ in tqdm(range(100), desc="Bootstrap sampling") |
| 183 | + ] |
| 184 | + |
| 185 | + # Calculate final scores |
| 186 | + bootstraps = pd.Series(bootstrap_means) |
| 187 | + mean_score = bootstraps.mean() |
| 188 | + lower_score = bootstraps.quantile(0.05) |
| 189 | + upper_score = bootstraps.quantile(0.95) |
| 190 | + |
| 191 | + # Print leaderboard |
| 192 | + print("\n##### LLM Judge Results (90th percentile CI) #####") |
| 193 | + |
| 194 | + clean_model_name = model_name.split("/")[-1] # Clean model name |
| 195 | + |
| 196 | + print(f"{clean_model_name}: {mean_score:.1%} (CI: {lower_score:.1%} - {upper_score:.1%})") |
| 197 | + print("original: 50.0% (CI: 50.0% - 50.0%)") |
| 198 | + |
| 199 | + for row in rows: |
| 200 | + # This is hacky, but it's the only way to get the score into the evaluation result in our current pattern |
| 201 | + if row.evaluation_result: |
| 202 | + row.evaluation_result.score = mean_score |
| 203 | + # Standard error approximation from 90% CI: SE ≈ (upper - lower) / (2 × 1.645), but this is not quite right bc it assumes a normal distribution |
| 204 | + row.evaluation_result.standard_error = (upper_score - lower_score) / (2 * 1.645) |
| 205 | + |
| 206 | + return rows |
0 commit comments