|
| 1 | +"""Langfuse adapter for Eval Protocol. |
| 2 | +
|
| 3 | +This adapter allows pulling data from Langfuse deployments and converting it |
| 4 | +to EvaluationRow format for use in evaluation pipelines. |
| 5 | +""" |
| 6 | + |
| 7 | +from collections.abc import Iterable, Sequence |
| 8 | +import logging |
| 9 | +from typing import List |
| 10 | +from typing_extensions import Any |
| 11 | + |
| 12 | +from openai.pagination import SyncCursorPage |
| 13 | +from openai.types.chat.chat_completion_function_tool_param import ChatCompletionFunctionToolParam |
| 14 | +from openai.types.chat.chat_completion_message import FunctionCall |
| 15 | +from openai.types.responses import Response |
| 16 | +from openai.types.responses.response_item import ResponseItem |
| 17 | +from openai.types.chat.chat_completion_message_function_tool_call import ( |
| 18 | + ChatCompletionMessageFunctionToolCall, |
| 19 | + Function, |
| 20 | +) |
| 21 | +from openai.types.responses.tool import Tool |
| 22 | + |
| 23 | +from eval_protocol.models import EvaluationRow, InputMetadata, Message |
| 24 | + |
| 25 | +logger = logging.getLogger(__name__) |
| 26 | + |
| 27 | + |
| 28 | +from openai import OpenAI |
| 29 | + |
| 30 | + |
| 31 | +class OpenAIResponsesAdapter: |
| 32 | + """Adapter to pull data from OpenAI Responses API and convert to EvaluationRow format. |
| 33 | +
|
| 34 | + This adapter can pull both chat conversations and tool calling traces from |
| 35 | + Langfuse deployments and convert them into the EvaluationRow format expected |
| 36 | + by the evaluation protocol. |
| 37 | +
|
| 38 | + Examples: |
| 39 | + Basic usage: |
| 40 | + >>> adapter = OpenAIResponsesAdapter( |
| 41 | + ... api_key="your_api_key", |
| 42 | + ... ) |
| 43 | + >>> rows = list(adapter.get_evaluation_rows(respnse_ids=["response_id_1", "response_id_2"])) |
| 44 | + """ |
| 45 | + |
| 46 | + def __init__(self, api_key: str | None = None, base_url: str | None = None): |
| 47 | + """Initialize the OpenAI Responses adapter.""" |
| 48 | + self.openai = OpenAI(api_key=api_key, base_url=base_url) |
| 49 | + |
| 50 | + def get_evaluation_rows( |
| 51 | + self, |
| 52 | + response_ids: List[str], |
| 53 | + ) -> List[EvaluationRow]: |
| 54 | + """Pull responses from OpenAI Responses API and convert to EvaluationRow format. |
| 55 | +
|
| 56 | + Args: |
| 57 | + response_ids: List of response IDs to fetch |
| 58 | + Returns: |
| 59 | + List[EvaluationRow]: Converted evaluation rows |
| 60 | + """ |
| 61 | + eval_rows: list[EvaluationRow] = [] |
| 62 | + |
| 63 | + for response_id in response_ids: |
| 64 | + input_items = self.openai.responses.input_items.list(response_id=response_id) |
| 65 | + response = self.openai.responses.retrieve(response_id=response_id) |
| 66 | + eval_rows.append(self._create_evaluation_row(input_items, response)) |
| 67 | + |
| 68 | + logger.info( |
| 69 | + "Successfully processed %d selected traces into %d evaluation rows", len(response_ids), len(eval_rows) |
| 70 | + ) |
| 71 | + return eval_rows |
| 72 | + |
| 73 | + def _create_evaluation_row(self, input_items: SyncCursorPage[ResponseItem], response: Response) -> EvaluationRow: |
| 74 | + """Convert a response to an evaluation row.""" |
| 75 | + messages: list[Message] = [] |
| 76 | + if response.instructions: |
| 77 | + if isinstance(response.instructions, list): |
| 78 | + raise NotImplementedError("List of instructions is not supported") |
| 79 | + else: |
| 80 | + messages.append(Message(role="system", content=response.instructions)) |
| 81 | + messages.extend(self._create_messages(input_items)) |
| 82 | + if response.output_text: |
| 83 | + messages.append(Message(role="assistant", content=response.output_text)) |
| 84 | + tools = self._responses_tools_to_chat_completion_tools(response.tools) |
| 85 | + tool_dicts = [dict(tool) for tool in tools] |
| 86 | + return EvaluationRow( |
| 87 | + messages=messages, |
| 88 | + tools=tool_dicts, |
| 89 | + input_metadata=InputMetadata( |
| 90 | + completion_params={ |
| 91 | + "model": response.model, |
| 92 | + "temperature": response.temperature, |
| 93 | + "max_output_tokens": response.max_output_tokens, |
| 94 | + "max_tool_calls": response.max_tool_calls, |
| 95 | + "parallel_tool_calls": response.parallel_tool_calls, |
| 96 | + """ |
| 97 | + We have to manually extract the reasoning effort and summary |
| 98 | + from the response.reasoning object because the openai-python |
| 99 | + causes an issue with model_dump() which is used for testing. |
| 100 | +
|
| 101 | + https://github.com/openai/openai-python/issues/1306#issuecomment-2966267356 |
| 102 | + """ |
| 103 | + "reasoning": { |
| 104 | + "effort": response.reasoning.effort, |
| 105 | + "summary": response.reasoning.summary, |
| 106 | + } |
| 107 | + if response.reasoning |
| 108 | + else None, |
| 109 | + "top_logprobs": response.top_logprobs, |
| 110 | + "truncation": response.truncation, |
| 111 | + "top_p": response.top_p, |
| 112 | + } |
| 113 | + ), |
| 114 | + ) |
| 115 | + |
| 116 | + def _responses_tools_to_chat_completion_tools( |
| 117 | + self, tools: List[Tool] |
| 118 | + ) -> Sequence[ChatCompletionFunctionToolParam]: |
| 119 | + """Convert OpenAI Responses API tools to chat completion message function tool calls.""" |
| 120 | + chat_completion_tools: List[ChatCompletionFunctionToolParam] = [] |
| 121 | + for tool in tools: |
| 122 | + if tool.type == "function": |
| 123 | + chat_completion_tools.append( |
| 124 | + { |
| 125 | + "type": "function", |
| 126 | + "function": { |
| 127 | + "name": tool.name, |
| 128 | + "parameters": tool.parameters or {}, |
| 129 | + "strict": tool.strict, |
| 130 | + "description": tool.description or "", |
| 131 | + }, |
| 132 | + } |
| 133 | + ) |
| 134 | + else: |
| 135 | + raise NotImplementedError("Only function tools are supported") |
| 136 | + return chat_completion_tools |
| 137 | + |
| 138 | + def _create_messages(self, input_items: SyncCursorPage[ResponseItem]) -> Iterable[Message]: |
| 139 | + """Create messages from input items. |
| 140 | +
|
| 141 | + Converts OpenAI Responses API input items to chat completion message format. |
| 142 | + Handles different types of response items including messages and tool calls. |
| 143 | + Groups parallel tool calls under a single assistant message. |
| 144 | + Since we iterate backwards and reverse at the end, tool call outputs should |
| 145 | + be added before the assistant message with tool calls. |
| 146 | + """ |
| 147 | + messages: list[Message] = [] |
| 148 | + current_tool_calls: list[ChatCompletionMessageFunctionToolCall] = [] |
| 149 | + tool_call_outputs: list[Message] = [] |
| 150 | + |
| 151 | + for item in input_items: |
| 152 | + if item.type == "message": |
| 153 | + # If we have accumulated tool calls, create an assistant message with them |
| 154 | + if current_tool_calls: |
| 155 | + # Add tool call outputs first (since we reverse at the end) |
| 156 | + messages.extend(tool_call_outputs) |
| 157 | + tool_call_outputs = [] |
| 158 | + # Then add the assistant message with tool calls |
| 159 | + messages.append(Message(role="assistant", tool_calls=current_tool_calls)) |
| 160 | + current_tool_calls = [] |
| 161 | + |
| 162 | + # This is a message item (input or output) |
| 163 | + content = item.content |
| 164 | + for content_item in content: |
| 165 | + if content_item.type == "input_text": |
| 166 | + text_content = content_item.text |
| 167 | + # Create new message |
| 168 | + messages.append(Message(role=item.role, content=text_content)) |
| 169 | + else: |
| 170 | + raise NotImplementedError(f"Unsupported content type: {content_item.type}") |
| 171 | + elif item.type == "function_call_output": |
| 172 | + # Collect tool call outputs to add before assistant message |
| 173 | + tool_call_outputs.append(Message(role="tool", content=item.output, tool_call_id=item.call_id)) |
| 174 | + elif item.type == "function_call": |
| 175 | + tool_call = ChatCompletionMessageFunctionToolCall( |
| 176 | + id=item.call_id, type="function", function=Function(name=item.name, arguments=item.arguments) |
| 177 | + ) |
| 178 | + current_tool_calls.append(tool_call) |
| 179 | + else: |
| 180 | + raise NotImplementedError(f"Unsupported item type: {item.type}") |
| 181 | + |
| 182 | + # If we have remaining tool calls, create an assistant message with them |
| 183 | + if current_tool_calls: |
| 184 | + # Add tool call outputs first (since we reverse at the end) |
| 185 | + messages.extend(tool_call_outputs) |
| 186 | + # Then add the assistant message with tool calls |
| 187 | + messages.append(Message(role="assistant", tool_calls=current_tool_calls)) |
| 188 | + |
| 189 | + return reversed(messages) |
0 commit comments