|
| 1 | +import os |
| 2 | +from pydantic import BaseModel |
| 3 | +from pydantic_ai import Agent |
| 4 | +from pydantic_ai.models.openai import OpenAIModel |
| 5 | +import pytest |
| 6 | + |
| 7 | +from eval_protocol.models import EvaluateResult, EvaluationRow |
| 8 | +from eval_protocol.pytest import evaluation_test |
| 9 | +from eval_protocol.pytest.types import RolloutProcessorConfig |
| 10 | +from tests.chinook.dataset import collect_dataset |
| 11 | +from tests.chinook.pydantic.agent import setup_agent |
| 12 | +from tests.pytest.test_pydantic_agent import PydanticAgentRolloutProcessor |
| 13 | + |
| 14 | +LLM_JUDGE_PROMPT = ( |
| 15 | + "Your job is to compare the response to the expected answer.\n" |
| 16 | + "The response will be a narrative report of the query results.\n" |
| 17 | + "If the response contains the same or well summarized information as the expected answer, return 1.0.\n" |
| 18 | + "If the response does not contain the same information or is missing information, return 0.0." |
| 19 | +) |
| 20 | + |
| 21 | + |
| 22 | +def agent_factory(config: RolloutProcessorConfig) -> Agent: |
| 23 | + model_name = config.completion_params["model"] |
| 24 | + provider = config.completion_params["provider"] |
| 25 | + model = OpenAIModel(model_name, provider=provider) |
| 26 | + return setup_agent(model) |
| 27 | + |
| 28 | + |
| 29 | +@pytest.mark.skipif( |
| 30 | + os.environ.get("CI") == "true", |
| 31 | + reason="Only run this test locally (skipped in CI)", |
| 32 | +) |
| 33 | +@pytest.mark.asyncio |
| 34 | +@evaluation_test( |
| 35 | + input_rows=[collect_dataset()], |
| 36 | + completion_params=[ |
| 37 | + { |
| 38 | + "model": "accounts/fireworks/models/kimi-k2-instruct", |
| 39 | + "provider": "fireworks", |
| 40 | + }, |
| 41 | + ], |
| 42 | + rollout_processor=PydanticAgentRolloutProcessor(agent_factory), |
| 43 | + mode="pointwise", |
| 44 | +) |
| 45 | +async def test_pydantic_complex_queries(row: EvaluationRow) -> EvaluationRow: |
| 46 | + """ |
| 47 | + Evaluation of complex queries for the Chinook database using PydanticAI |
| 48 | + """ |
| 49 | + last_assistant_message = row.last_assistant_message() |
| 50 | + if last_assistant_message is None: |
| 51 | + row.evaluation_result = EvaluateResult( |
| 52 | + score=0.0, |
| 53 | + reason="No assistant message found", |
| 54 | + ) |
| 55 | + elif not last_assistant_message.content: |
| 56 | + row.evaluation_result = EvaluateResult( |
| 57 | + score=0.0, |
| 58 | + reason="No assistant message found", |
| 59 | + ) |
| 60 | + else: |
| 61 | + model = OpenAIModel( |
| 62 | + "accounts/fireworks/models/kimi-k2-instruct", |
| 63 | + provider="fireworks", |
| 64 | + ) |
| 65 | + |
| 66 | + class Response(BaseModel): |
| 67 | + """ |
| 68 | + A score between 0.0 and 1.0 indicating whether the response is correct. |
| 69 | + """ |
| 70 | + |
| 71 | + score: float |
| 72 | + |
| 73 | + """ |
| 74 | + A short explanation of why the response is correct or incorrect. |
| 75 | + """ |
| 76 | + reason: str |
| 77 | + |
| 78 | + comparison_agent = Agent( |
| 79 | + model=model, |
| 80 | + system_prompt=LLM_JUDGE_PROMPT, |
| 81 | + output_type=Response, |
| 82 | + output_retries=5, |
| 83 | + ) |
| 84 | + result = await comparison_agent.run( |
| 85 | + f"Expected answer: {row.ground_truth}\nResponse: {last_assistant_message.content}" |
| 86 | + ) |
| 87 | + row.evaluation_result = EvaluateResult( |
| 88 | + score=result.output.score, |
| 89 | + reason=result.output.reason, |
| 90 | + ) |
| 91 | + return row |
0 commit comments