diff --git a/docs/agents.md b/docs/agents.md index adbbbefc98..5c07433edf 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -322,6 +322,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), @@ -386,6 +387,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), @@ -1050,6 +1052,7 @@ with capture_run_messages() as messages: # (2)! timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -1074,6 +1077,7 @@ with capture_run_messages() as messages: # (2)! timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( diff --git a/docs/api/models/function.md b/docs/api/models/function.md index 4cdceb449f..bfbe35c469 100644 --- a/docs/api/models/function.md +++ b/docs/api/models/function.md @@ -30,6 +30,7 @@ async def model_function( timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ] diff --git a/docs/deferred-tools.md b/docs/deferred-tools.md index 31e14149c0..fc5dc2eaa4 100644 --- a/docs/deferred-tools.md +++ b/docs/deferred-tools.md @@ -118,6 +118,7 @@ print(result.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -152,6 +153,7 @@ print(result.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelRequest( @@ -173,6 +175,7 @@ print(result.all_messages()) timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -197,6 +200,7 @@ print(result.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -324,6 +328,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -350,6 +355,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( diff --git a/docs/message-history.md b/docs/message-history.md index 3363312fed..7045c87c9a 100644 --- a/docs/message-history.md +++ b/docs/message-history.md @@ -51,6 +51,7 @@ print(result.all_messages()) timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -95,6 +96,7 @@ async def main(): timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ) ] @@ -122,6 +124,7 @@ async def main(): timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -178,6 +181,7 @@ print(result2.all_messages()) timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -198,6 +202,7 @@ print(result2.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -303,6 +308,7 @@ print(result2.all_messages()) timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -323,6 +329,7 @@ print(result2.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( diff --git a/docs/testing.md b/docs/testing.md index 3089585ab0..7178039af0 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -128,6 +128,7 @@ async def test_forecast(): timestamp=IsNow(tz=timezone.utc), # (7)! ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -158,6 +159,7 @@ async def test_forecast(): timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( diff --git a/docs/tools.md b/docs/tools.md index 40dcf5c810..2b8d9247b8 100644 --- a/docs/tools.md +++ b/docs/tools.md @@ -88,6 +88,7 @@ print(dice_result.all_messages()) timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -110,6 +111,7 @@ print(dice_result.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -132,6 +134,7 @@ print(dice_result.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 92c45a0c52..5a82570759 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -19,7 +19,7 @@ from pydantic_ai._function_schema import _takes_ctx as is_takes_ctx # type: ignore from pydantic_ai._instrumentation import DEFAULT_INSTRUMENTATION_VERSION from pydantic_ai._tool_manager import ToolManager -from pydantic_ai._utils import dataclasses_no_defaults_repr, get_union_args, is_async_callable, run_in_executor +from pydantic_ai._utils import dataclasses_no_defaults_repr, get_union_args, is_async_callable, now_utc, run_in_executor from pydantic_ai.builtin_tools import AbstractBuiltinTool from pydantic_graph import BaseNode, GraphRunContext from pydantic_graph.beta import Graph, GraphBuilder @@ -487,6 +487,7 @@ async def _make_request( async def _prepare_request( self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]] ) -> tuple[ModelSettings | None, models.ModelRequestParameters, list[_messages.ModelMessage], RunContext[DepsT]]: + self.request.timestamp = now_utc() self.request.run_id = self.request.run_id or ctx.state.run_id ctx.state.message_history.append(self.request) @@ -504,6 +505,11 @@ async def _prepare_request( # Update the new message index to ensure `result.new_messages()` returns the correct messages ctx.deps.new_message_index -= len(original_history) - len(message_history) + # Ensure the last request has a timestamp (history processors may create new ModelRequest objects without one) + last_request = message_history[-1] + if isinstance(last_request, _messages.ModelRequest) and last_request.timestamp is None: + last_request.timestamp = self.request.timestamp + # Merge possible consecutive trailing `ModelRequest`s into one, with tool call parts before user parts, # but don't store it in the message history on state. This is just for the benefit of model classes that want clear user/assistant boundaries. # See `tests/test_tools.py::test_parallel_tool_return_with_deferred` for an example where this is necessary @@ -780,7 +786,7 @@ def _handle_final_result( # For backwards compatibility, append a new ModelRequest using the tool returns and retries if tool_responses: - messages.append(_messages.ModelRequest(parts=tool_responses, run_id=ctx.state.run_id)) + messages.append(_messages.ModelRequest(parts=tool_responses, run_id=ctx.state.run_id, timestamp=now_utc())) return End(final_result) @@ -1359,6 +1365,7 @@ def _clean_message_history(messages: list[_messages.ModelMessage]) -> list[_mess merged_message = _messages.ModelRequest( parts=parts, instructions=last_message.instructions or message.instructions, + timestamp=message.timestamp or last_message.timestamp, ) clean_messages[-1] = merged_message else: diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 85ed332d0b..baa7ac841a 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -524,6 +524,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index cc99f80e74..1211fc8d6d 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -585,7 +585,11 @@ async def on_complete() -> None: # For backwards compatibility, append a new ModelRequest using the tool returns and retries if parts: - messages.append(_messages.ModelRequest(parts, run_id=graph_ctx.state.run_id)) + messages.append( + _messages.ModelRequest( + parts, run_id=graph_ctx.state.run_id, timestamp=_utils.now_utc() + ) + ) await agent_run.next(_agent_graph.SetFinalResult(final_result)) @@ -1030,6 +1034,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py index f363b5d990..e27e6e054c 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py @@ -169,6 +169,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py index c5adf5221d..79465fe8b0 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py @@ -824,6 +824,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py index 60c8122686..ba38aa9304 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py @@ -769,6 +769,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index 42fc2a872e..0016d76084 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -843,6 +843,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 826cf754b2..e0a1da744e 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1001,6 +1001,11 @@ class ModelRequest: _: KW_ONLY + # Default is None for backwards compatibility with old serialized messages that don't have this field. + # Using a default_factory would incorrectly fill in the current time for deserialized historical messages. + timestamp: datetime | None = None + """The timestamp when the request was sent to the model.""" + instructions: str | None = None """The instructions for the model.""" @@ -1016,7 +1021,7 @@ class ModelRequest: @classmethod def user_text_prompt(cls, user_prompt: str, *, instructions: str | None = None) -> ModelRequest: """Create a `ModelRequest` with a single user prompt as text.""" - return cls(parts=[UserPromptPart(user_prompt)], instructions=instructions) + return cls(parts=[UserPromptPart(user_prompt)], instructions=instructions, timestamp=_now_utc()) __repr__ = _utils.dataclasses_no_defaults_repr @@ -1242,9 +1247,10 @@ class ModelResponse: """The name of the model that generated the response.""" timestamp: datetime = field(default_factory=_now_utc) - """The timestamp of the response. + """The timestamp when the response was received locally. - If the model provides a timestamp in the response (as OpenAI does) that will be used. + This is always a high-precision local datetime. Provider-specific timestamps + (if available) are stored in `provider_details['timestamp']`. """ kind: Literal['response'] = 'response' diff --git a/pydantic_ai_slim/pydantic_ai/models/anthropic.py b/pydantic_ai_slim/pydantic_ai/models/anthropic.py index 9bc04f7619..808a2f00f9 100644 --- a/pydantic_ai_slim/pydantic_ai/models/anthropic.py +++ b/pydantic_ai_slim/pydantic_ai/models/anthropic.py @@ -580,7 +580,6 @@ async def _process_streamed_response( model_request_parameters=model_request_parameters, _model_name=first_chunk.message.model, _response=peekable_response, - _timestamp=_utils.now_utc(), _provider_name=self._provider.name, _provider_url=self._provider.base_url, ) @@ -1142,9 +1141,9 @@ class AnthropicStreamedResponse(StreamedResponse): _model_name: AnthropicModelName _response: AsyncIterable[BetaRawMessageStreamEvent] - _timestamp: datetime _provider_name: str _provider_url: str + _timestamp: datetime = field(default_factory=_utils.now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901 current_block: BetaContentBlock | None = None diff --git a/pydantic_ai_slim/pydantic_ai/models/google.py b/pydantic_ai_slim/pydantic_ai/models/google.py index c6f5459f08..d05fc5f1ef 100644 --- a/pydantic_ai_slim/pydantic_ai/models/google.py +++ b/pydantic_ai_slim/pydantic_ai/models/google.py @@ -495,13 +495,17 @@ def _process_response(self, response: GenerateContentResponse) -> ModelResponse: candidate = response.candidates[0] vendor_id = response.response_id - vendor_details: dict[str, Any] | None = None finish_reason: FinishReason | None = None + vendor_details: dict[str, Any] = {} + raw_finish_reason = candidate.finish_reason if raw_finish_reason: # pragma: no branch - vendor_details = {'finish_reason': raw_finish_reason.value} + vendor_details['finish_reason'] = raw_finish_reason.value finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) + if response.create_time is not None: # pragma: no branch + vendor_details['timestamp'] = response.create_time + if candidate.content is None or candidate.content.parts is None: if finish_reason == 'content_filter' and raw_finish_reason: raise UnexpectedModelBehavior( @@ -520,7 +524,7 @@ def _process_response(self, response: GenerateContentResponse) -> ModelResponse: self._provider.base_url, usage, vendor_id=vendor_id, - vendor_details=vendor_details, + vendor_details=vendor_details or None, finish_reason=finish_reason, url_context_metadata=candidate.url_context_metadata, ) @@ -538,9 +542,9 @@ async def _process_streamed_response( model_request_parameters=model_request_parameters, _model_name=first_chunk.model_version or self._model_name, _response=peekable_response, - _timestamp=first_chunk.create_time or _utils.now_utc(), _provider_name=self._provider.name, _provider_url=self._provider.base_url, + _provider_timestamp=first_chunk.create_time, ) async def _map_messages( @@ -662,9 +666,10 @@ class GeminiStreamedResponse(StreamedResponse): _model_name: GoogleModelName _response: AsyncIterator[GenerateContentResponse] - _timestamp: datetime _provider_name: str _provider_url: str + _provider_timestamp: datetime | None = None + _timestamp: datetime = field(default_factory=_utils.now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901 code_execution_tool_call_id: str | None = None @@ -680,9 +685,15 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: self.provider_response_id = chunk.response_id raw_finish_reason = candidate.finish_reason + provider_details_dict: dict[str, Any] = {} if raw_finish_reason: - self.provider_details = {'finish_reason': raw_finish_reason.value} + provider_details_dict['finish_reason'] = raw_finish_reason.value self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) + if self._provider_timestamp is not None: + # _provider_timestamp is always None in Google streaming cassettes + provider_details_dict['timestamp'] = self._provider_timestamp # pragma: no cover + if provider_details_dict: + self.provider_details = provider_details_dict # Google streams the grounding metadata (including the web search queries and results) # _after_ the text that was generated using it, so it would show up out of order in the stream, diff --git a/pydantic_ai_slim/pydantic_ai/models/groq.py b/pydantic_ai_slim/pydantic_ai/models/groq.py index d5f70fa451..a3d1697f4c 100644 --- a/pydantic_ai_slim/pydantic_ai/models/groq.py +++ b/pydantic_ai_slim/pydantic_ai/models/groq.py @@ -207,7 +207,6 @@ async def request( return ModelResponse( parts=[tool_call_part], model_name=e.model_name, - timestamp=_utils.now_utc(), provider_name=self._provider.name, provider_url=self.base_url, finish_reason='error', @@ -321,7 +320,6 @@ async def _completions_create( def _process_response(self, response: chat.ChatCompletion) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" - timestamp = number_to_datetime(response.created) choice = response.choices[0] items: list[ModelResponsePart] = [] if choice.message.reasoning is not None: @@ -341,13 +339,14 @@ def _process_response(self, response: chat.ChatCompletion) -> ModelResponse: items.append(ToolCallPart(tool_name=c.function.name, args=c.function.arguments, tool_call_id=c.id)) raw_finish_reason = choice.finish_reason - provider_details = {'finish_reason': raw_finish_reason} + provider_details: dict[str, Any] = {'finish_reason': raw_finish_reason} + if response.created: # pragma: no branch + provider_details['timestamp'] = number_to_datetime(response.created) finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) return ModelResponse( parts=items, usage=_map_usage(response), model_name=response.model, - timestamp=timestamp, provider_response_id=response.id, provider_name=self._provider.name, provider_url=self.base_url, @@ -371,9 +370,9 @@ async def _process_streamed_response( _response=peekable_response, _model_name=first_chunk.model, _model_profile=self.profile, - _timestamp=number_to_datetime(first_chunk.created), _provider_name=self._provider.name, _provider_url=self.base_url, + _provider_timestamp=number_to_datetime(first_chunk.created) if first_chunk.created else None, ) def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[chat.ChatCompletionToolParam]: @@ -525,9 +524,10 @@ class GroqStreamedResponse(StreamedResponse): _model_name: GroqModelName _model_profile: ModelProfile _response: AsyncIterable[chat.ChatCompletionChunk] - _timestamp: datetime _provider_name: str _provider_url: str + _provider_timestamp: datetime | None = None + _timestamp: datetime = field(default_factory=_utils.now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901 try: @@ -545,9 +545,14 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: except IndexError: continue + provider_details_dict: dict[str, Any] = {} if raw_finish_reason := choice.finish_reason: - self.provider_details = {'finish_reason': raw_finish_reason} + provider_details_dict['finish_reason'] = raw_finish_reason self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) + if self._provider_timestamp is not None: # pragma: no branch + provider_details_dict['timestamp'] = self._provider_timestamp + if provider_details_dict: # pragma: no branch + self.provider_details = provider_details_dict if choice.delta.reasoning is not None: if not reasoning: diff --git a/pydantic_ai_slim/pydantic_ai/models/huggingface.py b/pydantic_ai_slim/pydantic_ai/models/huggingface.py index ab6652dbb4..7c71139c88 100644 --- a/pydantic_ai_slim/pydantic_ai/models/huggingface.py +++ b/pydantic_ai_slim/pydantic_ai/models/huggingface.py @@ -11,7 +11,7 @@ from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage from .._run_context import RunContext from .._thinking_part import split_content_into_text_and_thinking -from .._utils import guard_tool_call_id as _guard_tool_call_id, now_utc as _now_utc +from .._utils import guard_tool_call_id as _guard_tool_call_id from ..exceptions import UserError from ..messages import ( AudioUrl, @@ -272,11 +272,6 @@ async def _completions_create( def _process_response(self, response: ChatCompletionOutput) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" - if response.created: - timestamp = datetime.fromtimestamp(response.created, tz=timezone.utc) - else: - timestamp = _now_utc() - choice = response.choices[0] content = choice.message.content tool_calls = choice.message.tool_calls @@ -290,14 +285,15 @@ def _process_response(self, response: ChatCompletionOutput) -> ModelResponse: items.append(ToolCallPart(c.function.name, c.function.arguments, tool_call_id=c.id)) raw_finish_reason = choice.finish_reason - provider_details = {'finish_reason': raw_finish_reason} + provider_details: dict[str, Any] = {'finish_reason': raw_finish_reason} + if response.created: # pragma: no branch + provider_details['timestamp'] = datetime.fromtimestamp(response.created, tz=timezone.utc) finish_reason = _FINISH_REASON_MAP.get(cast(TextGenerationOutputFinishReason, raw_finish_reason), None) return ModelResponse( parts=items, usage=_map_usage(response), model_name=response.model, - timestamp=timestamp, provider_response_id=response.id, provider_name=self._provider.name, provider_url=self.base_url, @@ -321,9 +317,11 @@ async def _process_streamed_response( _model_name=first_chunk.model, _model_profile=self.profile, _response=peekable_response, - _timestamp=datetime.fromtimestamp(first_chunk.created, tz=timezone.utc), _provider_name=self._provider.name, _provider_url=self.base_url, + _provider_timestamp=datetime.fromtimestamp(first_chunk.created, tz=timezone.utc) + if first_chunk.created + else None, ) def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[ChatCompletionInputTool]: @@ -470,9 +468,10 @@ class HuggingFaceStreamedResponse(StreamedResponse): _model_name: str _model_profile: ModelProfile _response: AsyncIterable[ChatCompletionStreamOutput] - _timestamp: datetime _provider_name: str _provider_url: str + _provider_timestamp: datetime | None = None + _timestamp: datetime = field(default_factory=_utils.now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: async for chunk in self._response: @@ -486,11 +485,16 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: except IndexError: continue + provider_details_dict: dict[str, Any] = {} if raw_finish_reason := choice.finish_reason: - self.provider_details = {'finish_reason': raw_finish_reason} + provider_details_dict['finish_reason'] = raw_finish_reason self.finish_reason = _FINISH_REASON_MAP.get( cast(TextGenerationOutputFinishReason, raw_finish_reason), None ) + if self._provider_timestamp is not None: # pragma: no branch + provider_details_dict['timestamp'] = self._provider_timestamp + if provider_details_dict: # pragma: no branch + self.provider_details = provider_details_dict # Handle the text part of the response content = choice.delta.content diff --git a/pydantic_ai_slim/pydantic_ai/models/mistral.py b/pydantic_ai_slim/pydantic_ai/models/mistral.py index 01fee32a25..6f6c196048 100644 --- a/pydantic_ai_slim/pydantic_ai/models/mistral.py +++ b/pydantic_ai_slim/pydantic_ai/models/mistral.py @@ -348,11 +348,6 @@ def _process_response(self, response: MistralChatCompletionResponse) -> ModelRes """Process a non-streamed response, and prepare a message to return.""" assert response.choices, 'Unexpected empty response choice.' - if response.created: - timestamp = number_to_datetime(response.created) - else: - timestamp = _now_utc() - choice = response.choices[0] content = choice.message.content tool_calls = choice.message.tool_calls @@ -370,14 +365,15 @@ def _process_response(self, response: MistralChatCompletionResponse) -> ModelRes parts.append(tool) raw_finish_reason = choice.finish_reason - provider_details = {'finish_reason': raw_finish_reason} + provider_details: dict[str, Any] = {'finish_reason': raw_finish_reason} + if response.created: # pragma: no branch + provider_details['timestamp'] = number_to_datetime(response.created) finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) return ModelResponse( parts=parts, usage=_map_usage(response), model_name=response.model, - timestamp=timestamp, provider_response_id=response.id, provider_name=self._provider.name, provider_url=self._provider.base_url, @@ -398,18 +394,13 @@ async def _process_streamed_response( 'Streamed response ended without content or tool calls' ) - if first_chunk.data.created: - timestamp = number_to_datetime(first_chunk.data.created) - else: - timestamp = _now_utc() - return MistralStreamedResponse( model_request_parameters=model_request_parameters, _response=peekable_response, _model_name=first_chunk.data.model, - _timestamp=timestamp, _provider_name=self._provider.name, _provider_url=self._provider.base_url, + _provider_timestamp=number_to_datetime(first_chunk.data.created) if first_chunk.data.created else None, ) @staticmethod @@ -615,9 +606,10 @@ class MistralStreamedResponse(StreamedResponse): _model_name: MistralModelName _response: AsyncIterable[MistralCompletionEvent] - _timestamp: datetime _provider_name: str _provider_url: str + _provider_timestamp: datetime | None = None + _timestamp: datetime = field(default_factory=_now_utc) _delta_content: str = field(default='', init=False) @@ -634,9 +626,14 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: except IndexError: continue + provider_details_dict: dict[str, Any] = {} if raw_finish_reason := choice.finish_reason: - self.provider_details = {'finish_reason': raw_finish_reason} + provider_details_dict['finish_reason'] = raw_finish_reason self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) + if self._provider_timestamp is not None: # pragma: no branch + provider_details_dict['timestamp'] = self._provider_timestamp + if provider_details_dict: # pragma: no branch + self.provider_details = provider_details_dict # Handle the text part of the response content = choice.delta.content diff --git a/pydantic_ai_slim/pydantic_ai/models/openai.py b/pydantic_ai_slim/pydantic_ai/models/openai.py index efe9629c3a..d62c18eb33 100644 --- a/pydantic_ai_slim/pydantic_ai/models/openai.py +++ b/pydantic_ai_slim/pydantic_ai/models/openai.py @@ -597,7 +597,7 @@ def _validate_completion(self, response: chat.ChatCompletion) -> chat.ChatComple """ return chat.ChatCompletion.model_validate(response.model_dump()) - def _process_provider_details(self, response: chat.ChatCompletion) -> dict[str, Any]: + def _process_provider_details(self, response: chat.ChatCompletion) -> dict[str, Any] | None: """Hook that response content to provider details. This method may be overridden by subclasses of `OpenAIChatModel` to apply custom mappings. @@ -615,10 +615,8 @@ def _process_response(self, response: chat.ChatCompletion | str) -> ModelRespons f'Invalid response from {self.system} chat completions endpoint, expected JSON data' ) - if response.created: - timestamp = number_to_datetime(response.created) - else: - timestamp = _now_utc() + timestamp = _now_utc() + if not response.created: response.created = int(timestamp.timestamp()) # Workaround for local Ollama which sometimes returns a `None` finish reason. @@ -654,12 +652,18 @@ def _process_response(self, response: chat.ChatCompletion | str) -> ModelRespons part.tool_call_id = _guard_tool_call_id(part) items.append(part) + provider_details = self._process_provider_details(response) + if response.created: # pragma: no branch + if provider_details is None: + provider_details = {} + provider_details['timestamp'] = number_to_datetime(response.created) + return ModelResponse( parts=items, usage=self._map_usage(response), model_name=response.model, timestamp=timestamp, - provider_details=self._process_provider_details(response), + provider_details=provider_details or None, provider_response_id=response.id, provider_name=self._provider.name, provider_url=self._provider.base_url, @@ -714,9 +718,9 @@ async def _process_streamed_response( _model_name=model_name, _model_profile=self.profile, _response=peekable_response, - _timestamp=number_to_datetime(first_chunk.created), _provider_name=self._provider.name, _provider_url=self._provider.base_url, + _provider_timestamp=number_to_datetime(first_chunk.created) if first_chunk.created else None, ) @property @@ -1180,14 +1184,16 @@ def _process_response( # noqa: C901 self, response: responses.Response, model_request_parameters: ModelRequestParameters ) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" - timestamp = number_to_datetime(response.created_at) + timestamp = _now_utc() items: list[ModelResponsePart] = [] for item in response.output: if isinstance(item, responses.ResponseReasoningItem): signature = item.encrypted_content # Handle raw CoT content from gpt-oss models + provider_details: dict[str, Any] = {} raw_content: list[str] | None = [c.text for c in item.content] if item.content else None - provider_details: dict[str, Any] | None = {'raw_content': raw_content} if raw_content else None + if raw_content: + provider_details['raw_content'] = raw_content if item.summary: for summary in item.summary: @@ -1198,7 +1204,7 @@ def _process_response( # noqa: C901 id=item.id, signature=signature, provider_name=self.system if (signature or provider_details) else None, - provider_details=provider_details, + provider_details=provider_details or None, ) ) # We only need to store the signature and raw_content once. @@ -1211,7 +1217,7 @@ def _process_response( # noqa: C901 id=item.id, signature=signature, provider_name=self.system if (signature or provider_details) else None, - provider_details=provider_details, + provider_details=provider_details or None, ) ) elif isinstance(item, responses.ResponseOutputMessage): @@ -1271,11 +1277,13 @@ def _process_response( # noqa: C901 pass finish_reason: FinishReason | None = None - provider_details: dict[str, Any] | None = None + provider_details: dict[str, Any] = {} raw_finish_reason = details.reason if (details := response.incomplete_details) else response.status if raw_finish_reason: - provider_details = {'finish_reason': raw_finish_reason} + provider_details['finish_reason'] = raw_finish_reason finish_reason = _RESPONSES_FINISH_REASON_MAP.get(raw_finish_reason) + if response.created_at: # pragma: no branch + provider_details['timestamp'] = number_to_datetime(response.created_at) return ModelResponse( parts=items, @@ -1286,7 +1294,7 @@ def _process_response( # noqa: C901 provider_name=self._provider.name, provider_url=self._provider.base_url, finish_reason=finish_reason, - provider_details=provider_details, + provider_details=provider_details or None, ) async def _process_streamed_response( @@ -1305,9 +1313,11 @@ async def _process_streamed_response( model_request_parameters=model_request_parameters, _model_name=first_chunk.response.model, _response=peekable_response, - _timestamp=number_to_datetime(first_chunk.response.created_at), _provider_name=self._provider.name, _provider_url=self._provider.base_url, + _provider_timestamp=number_to_datetime(first_chunk.response.created_at) + if first_chunk.response.created_at + else None, ) @overload @@ -1916,9 +1926,10 @@ class OpenAIStreamedResponse(StreamedResponse): _model_name: OpenAIModelName _model_profile: ModelProfile _response: AsyncIterable[ChatCompletionChunk] - _timestamp: datetime _provider_name: str _provider_url: str + _provider_timestamp: datetime | None = None + _timestamp: datetime = field(default_factory=_now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: async for chunk in self._validate_response(): @@ -1942,7 +1953,7 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: if raw_finish_reason := choice.finish_reason: self.finish_reason = self._map_finish_reason(raw_finish_reason) - if provider_details := self._map_provider_details(chunk): + if provider_details := self._map_provider_details(chunk): # pragma: no branch self.provider_details = provider_details for event in self._map_part_delta(choice): @@ -2035,7 +2046,12 @@ def _map_provider_details(self, chunk: ChatCompletionChunk) -> dict[str, Any] | This method may be overridden by subclasses of `OpenAIStreamResponse` to customize the provider details. """ - return _map_provider_details(chunk.choices[0]) + provider_details = _map_provider_details(chunk.choices[0]) + if self._provider_timestamp is not None: # pragma: no branch + if provider_details is None: + provider_details = {} + provider_details['timestamp'] = self._provider_timestamp + return provider_details or None def _map_usage(self, response: ChatCompletionChunk) -> usage.RequestUsage: return _map_usage(response, self._provider_name, self._provider_url, self.model_name) @@ -2076,9 +2092,10 @@ class OpenAIResponsesStreamedResponse(StreamedResponse): _model_name: OpenAIModelName _response: AsyncIterable[responses.ResponseStreamEvent] - _timestamp: datetime _provider_name: str _provider_url: str + _provider_timestamp: datetime | None = None + _timestamp: datetime = field(default_factory=_now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901 async for chunk in self._response: @@ -2089,9 +2106,13 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: raw_finish_reason = ( details.reason if (details := chunk.response.incomplete_details) else chunk.response.status ) + provider_details: dict[str, Any] = {} if raw_finish_reason: # pragma: no branch - self.provider_details = {'finish_reason': raw_finish_reason} + provider_details['finish_reason'] = raw_finish_reason self.finish_reason = _RESPONSES_FINISH_REASON_MAP.get(raw_finish_reason) + if self._provider_timestamp is not None: # pragma: no branch + provider_details['timestamp'] = self._provider_timestamp + self.provider_details = provider_details or None elif isinstance(chunk, responses.ResponseContentPartAddedEvent): pass # there's nothing we need to do here @@ -2486,7 +2507,7 @@ def _map_usage( def _map_provider_details( choice: chat_completion_chunk.Choice | chat_completion.Choice, -) -> dict[str, Any]: +) -> dict[str, Any] | None: provider_details: dict[str, Any] = {} # Add logprobs to vendor_details if available @@ -2495,7 +2516,7 @@ def _map_provider_details( if raw_finish_reason := choice.finish_reason: provider_details['finish_reason'] = raw_finish_reason - return provider_details + return provider_details or None def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]: diff --git a/pydantic_ai_slim/pydantic_ai/models/openrouter.py b/pydantic_ai_slim/pydantic_ai/models/openrouter.py index 7ac019d43c..190b788cab 100644 --- a/pydantic_ai_slim/pydantic_ai/models/openrouter.py +++ b/pydantic_ai_slim/pydantic_ai/models/openrouter.py @@ -471,7 +471,8 @@ def _map_openrouter_provider_details( provider_details: dict[str, Any] = {} provider_details['downstream_provider'] = response.provider - provider_details['finish_reason'] = response.choices[0].native_finish_reason + if native_finish_reason := response.choices[0].native_finish_reason: + provider_details['finish_reason'] = native_finish_reason if usage := response.usage: if cost := usage.cost: @@ -565,12 +566,13 @@ def _process_thinking(self, message: chat.ChatCompletionMessage) -> list[Thinkin return super()._process_thinking(message) @override - def _process_provider_details(self, response: chat.ChatCompletion) -> dict[str, Any]: + def _process_provider_details(self, response: chat.ChatCompletion) -> dict[str, Any] | None: assert isinstance(response, _OpenRouterChatCompletion) provider_details = super()._process_provider_details(response) - provider_details.update(_map_openrouter_provider_details(response)) - return provider_details + if openrouter_details := _map_openrouter_provider_details(response): + provider_details = {**(provider_details or {}), **openrouter_details} + return provider_details or None @dataclass class _MapModelResponseContext(OpenAIChatModel._MapModelResponseContext): # type: ignore[reportPrivateUsage] @@ -686,8 +688,16 @@ def _map_thinking_delta(self, choice: chat_completion_chunk.Choice) -> Iterable[ def _map_provider_details(self, chunk: chat.ChatCompletionChunk) -> dict[str, Any] | None: assert isinstance(chunk, _OpenRouterChatCompletionChunk) - if provider_details := super()._map_provider_details(chunk): + if provider_details := super()._map_provider_details(chunk): # pragma: no branch provider_details.update(_map_openrouter_provider_details(chunk)) + # Preserve finish_reason from previous chunk if the current chunk doesn't have one. + # After the chunk with native_finish_reason 'completed', OpenRouter sends one more + # chunk with usage data (see cassette test_openrouter_stream_with_native_options.yaml) + # which has native_finish_reason: null. Since provider_details is replaced on each + # chunk, we need to carry forward the finish_reason from the previous chunk. + if 'finish_reason' not in provider_details and self.provider_details: # pragma: no branch + if previous_finish_reason := self.provider_details.get('finish_reason'): + provider_details['finish_reason'] = previous_finish_reason return provider_details @override diff --git a/pydantic_ai_slim/pydantic_ai/models/outlines.py b/pydantic_ai_slim/pydantic_ai/models/outlines.py index d8dc6b2241..c071426d0e 100644 --- a/pydantic_ai_slim/pydantic_ai/models/outlines.py +++ b/pydantic_ai_slim/pydantic_ai/models/outlines.py @@ -8,8 +8,8 @@ import io from collections.abc import AsyncIterable, AsyncIterator, Sequence from contextlib import asynccontextmanager -from dataclasses import dataclass, replace -from datetime import datetime, timezone +from dataclasses import dataclass, field, replace +from datetime import datetime from typing import TYPE_CHECKING, Any, Literal, cast from typing_extensions import assert_never @@ -518,13 +518,11 @@ async def _process_streamed_response( if isinstance(first_chunk, _utils.Unset): # pragma: no cover raise UnexpectedModelBehavior('Streamed response ended without content or tool calls') - timestamp = datetime.now(tz=timezone.utc) return OutlinesStreamedResponse( model_request_parameters=model_request_parameters, _model_name=self._model_name, _model_profile=self.profile, _response=peekable_response, - _timestamp=timestamp, _provider_name='outlines', ) @@ -544,9 +542,9 @@ class OutlinesStreamedResponse(StreamedResponse): _model_name: str _model_profile: ModelProfile _response: AsyncIterable[str] - _timestamp: datetime _provider_name: str _provider_url: str | None = None + _timestamp: datetime = field(default_factory=_utils.now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: async for content in self._response: diff --git a/pydantic_ai_slim/pydantic_ai/run.py b/pydantic_ai_slim/pydantic_ai/run.py index 0ed3e2455d..81529856f8 100644 --- a/pydantic_ai_slim/pydantic_ai/run.py +++ b/pydantic_ai_slim/pydantic_ai/run.py @@ -64,6 +64,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), @@ -243,6 +244,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index 0e1230a7a5..62b4bc13ad 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -5,7 +5,7 @@ import re from collections.abc import Callable, Sequence from dataclasses import dataclass, field -from datetime import timezone +from datetime import datetime, timezone from decimal import Decimal from functools import cached_property from typing import Annotated, Any, TypeVar, cast @@ -240,6 +240,7 @@ async def test_sync_request_text_response(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -256,6 +257,7 @@ async def test_sync_request_text_response(allow_model_requests: None): ), ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1057,6 +1059,7 @@ async def test_request_structured_response(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1086,6 +1089,7 @@ async def test_request_structured_response(allow_model_requests: None): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1128,6 +1132,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1157,6 +1162,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1186,6 +1192,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1542,6 +1549,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1584,6 +1592,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1744,6 +1753,7 @@ def simple_instructions(): [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -1782,6 +1792,7 @@ async def test_anthropic_model_thinking_part(allow_model_requests: None, anthrop [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1840,6 +1851,7 @@ async def test_anthropic_model_thinking_part(allow_model_requests: None, anthrop timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1903,6 +1915,7 @@ async def test_anthropic_model_thinking_part_redacted(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1950,6 +1963,7 @@ async def test_anthropic_model_thinking_part_redacted(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2010,6 +2024,7 @@ async def test_anthropic_model_thinking_part_redacted_stream(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2154,6 +2169,7 @@ async def test_anthropic_model_thinking_part_from_other_model( timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2164,26 +2180,11 @@ async def test_anthropic_model_thinking_part_from_other_model( signature=IsStr(), provider_name='openai', ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7', - ), + ThinkingPart(content=IsStr(), id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7'), + ThinkingPart(content=IsStr(), id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7'), + ThinkingPart(content=IsStr(), id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7'), + ThinkingPart(content=IsStr(), id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7'), + ThinkingPart(content=IsStr(), id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7'), TextPart(content=IsStr(), id='msg_68c1fdbecbf081a18085a084257a9aef06da9901a3d98ab7'), ], usage=RequestUsage(input_tokens=23, output_tokens=2211, details={'reasoning_tokens': 1920}), @@ -2191,7 +2192,10 @@ async def test_anthropic_model_thinking_part_from_other_model( timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 10, 22, 37, 27, tzinfo=timezone.utc), + }, provider_response_id='resp_68c1fda6f11081a1b9fa80ae9122743506da9901a3d98ab7', finish_reason='stop', run_id=IsStr(), @@ -2217,6 +2221,7 @@ async def test_anthropic_model_thinking_part_from_other_model( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2274,6 +2279,7 @@ async def test_anthropic_model_thinking_part_stream(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2659,7 +2665,7 @@ async def test_anthropic_model_empty_message_on_history(allow_model_requests: No result = await agent.run( 'I need a potato!', message_history=[ - ModelRequest(parts=[], instructions='You are a helpful assistant.', kind='request'), + ModelRequest(parts=[], instructions='You are a helpful assistant.', kind='request', timestamp=IsDatetime()), ModelResponse(parts=[TextPart(content='Hello, how can I help you?')], kind='response'), ], ) @@ -2685,6 +2691,7 @@ async def test_anthropic_web_search_tool(allow_model_requests: None, anthropic_a [ ModelRequest( parts=[UserPromptPart(content='What is the weather in San Francisco today?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2894,6 +2901,7 @@ async def test_anthropic_web_search_tool(allow_model_requests: None, anthropic_a timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3108,6 +3116,7 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4155,6 +4164,7 @@ async def test_anthropic_web_fetch_tool(allow_model_requests: None, anthropic_ap timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4237,6 +4247,7 @@ async def test_anthropic_web_fetch_tool(allow_model_requests: None, anthropic_ap timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4307,6 +4318,7 @@ async def test_anthropic_web_fetch_tool(allow_model_requests: None, anthropic_ap timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4404,6 +4416,7 @@ async def test_anthropic_web_fetch_tool_stream( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4865,7 +4878,7 @@ async def test_anthropic_web_fetch_tool_message_replay(): # Create message history with BuiltinToolCallPart and BuiltinToolReturnPart messages = [ - ModelRequest(parts=[UserPromptPart(content='Test')]), + ModelRequest(parts=[UserPromptPart(content='Test')], timestamp=IsDatetime()), ModelResponse( parts=[ BuiltinToolCallPart( @@ -5019,6 +5032,7 @@ async def test_anthropic_mcp_servers(allow_model_requests: None, anthropic_api_k timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5106,6 +5120,7 @@ async def test_anthropic_mcp_servers(allow_model_requests: None, anthropic_api_k timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5272,6 +5287,7 @@ async def test_anthropic_mcp_servers_stream(allow_model_requests: None, anthropi timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5535,6 +5551,7 @@ async def test_anthropic_code_execution_tool(allow_model_requests: None, anthrop [ ModelRequest( parts=[UserPromptPart(content='How much is 3 * 12390?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='Always use the code execution tool for math.', run_id=IsStr(), ), @@ -5603,6 +5620,7 @@ async def test_anthropic_code_execution_tool(allow_model_requests: None, anthrop timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='Always use the code execution tool for math.', run_id=IsStr(), ), @@ -5685,6 +5703,7 @@ async def test_anthropic_code_execution_tool_stream(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6270,6 +6289,7 @@ async def test_anthropic_server_tool_pass_history_to_another_provider( [ ModelRequest( parts=[UserPromptPart(content='What day is tomorrow?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6284,7 +6304,10 @@ async def test_anthropic_server_tool_pass_history_to_another_provider( timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 11, 19, 23, 41, 8, tzinfo=timezone.utc), + }, provider_response_id='resp_0dcd74f01910b54500691e5594957481a0ac36dde76eca939f', finish_reason='stop', run_id=IsStr(), @@ -6328,14 +6351,16 @@ async def test_anthropic_empty_content_filtering(env: TestEnv): # Test _map_message with empty string in user prompt messages_empty_string: list[ModelMessage] = [ - ModelRequest(parts=[UserPromptPart(content='')], kind='request'), + ModelRequest(parts=[UserPromptPart(content='')], kind='request', timestamp=IsDatetime()), ] _, anthropic_messages = await model._map_message(messages_empty_string, ModelRequestParameters(), {}) # type: ignore[attr-defined] assert anthropic_messages == snapshot([]) # Empty content should be filtered out # Test _map_message with list containing empty strings in user prompt messages_mixed_content: list[ModelMessage] = [ - ModelRequest(parts=[UserPromptPart(content=['', 'Hello', '', 'World'])], kind='request'), + ModelRequest( + parts=[UserPromptPart(content=['', 'Hello', '', 'World'])], kind='request', timestamp=IsDatetime() + ), ] _, anthropic_messages = await model._map_message(messages_mixed_content, ModelRequestParameters(), {}) # type: ignore[attr-defined] assert anthropic_messages == snapshot( @@ -6344,9 +6369,9 @@ async def test_anthropic_empty_content_filtering(env: TestEnv): # Test _map_message with empty assistant response messages: list[ModelMessage] = [ - ModelRequest(parts=[SystemPromptPart(content='You are helpful')], kind='request'), + ModelRequest(parts=[SystemPromptPart(content='You are helpful')], kind='request', timestamp=IsDatetime()), ModelResponse(parts=[TextPart(content='')], kind='response'), # Empty response - ModelRequest(parts=[UserPromptPart(content='Hello')], kind='request'), + ModelRequest(parts=[UserPromptPart(content='Hello')], kind='request', timestamp=IsDatetime()), ] _, anthropic_messages = await model._map_message(messages, ModelRequestParameters(), {}) # type: ignore[attr-defined] # The empty assistant message should be filtered out @@ -6385,6 +6410,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6419,6 +6445,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6457,6 +6484,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6491,6 +6519,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6528,6 +6557,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6587,6 +6617,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6621,6 +6652,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6673,6 +6705,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index 185468021a..190f5b3e52 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -131,6 +131,7 @@ async def test_bedrock_model(allow_model_requests: None, bedrock_provider: Bedro timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -308,6 +309,7 @@ async def temperature(city: str, date: datetime.date) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -339,6 +341,7 @@ async def temperature(city: str, date: datetime.date) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -370,6 +373,7 @@ async def temperature(city: str, date: datetime.date) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -446,6 +450,7 @@ async def get_capital(country: str) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -477,6 +482,7 @@ async def get_capital(country: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -775,6 +781,7 @@ def instructions() -> str: [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -817,9 +824,14 @@ async def test_bedrock_multiple_documents_in_history( result = await agent.run( 'What is in the documents?', message_history=[ - ModelRequest(parts=[UserPromptPart(content=['Here is a PDF document: ', document_content])]), + ModelRequest( + parts=[UserPromptPart(content=['Here is a PDF document: ', document_content])], timestamp=IsDatetime() + ), ModelResponse(parts=[TextPart(content='foo bar')]), - ModelRequest(parts=[UserPromptPart(content=['Here is another PDF document: ', document_content])]), + ModelRequest( + parts=[UserPromptPart(content=['Here is another PDF document: ', document_content])], + timestamp=IsDatetime(), + ), ModelResponse(parts=[TextPart(content='foo bar 2')]), ], ) @@ -838,6 +850,7 @@ async def test_bedrock_model_thinking_part_deepseek(allow_model_requests: None, [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -867,6 +880,7 @@ async def test_bedrock_model_thinking_part_deepseek(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -901,6 +915,7 @@ async def test_bedrock_model_thinking_part_anthropic(allow_model_requests: None, [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -937,6 +952,7 @@ async def test_bedrock_model_thinking_part_anthropic(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -985,6 +1001,7 @@ async def test_bedrock_model_thinking_part_redacted(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1022,6 +1039,7 @@ async def test_bedrock_model_thinking_part_redacted(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1081,6 +1099,7 @@ async def test_bedrock_model_thinking_part_redacted_stream( timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1216,6 +1235,7 @@ async def test_bedrock_model_thinking_part_from_other_model( timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1223,25 +1243,13 @@ async def test_bedrock_model_thinking_part_from_other_model( ThinkingPart( content=IsStr(), id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27', - signature='gAAAAABowgAKxFTo-oXVZ9WpxX1o2XmQkqXqGTeqSbHjr1hsNXhe0QDBXDnKBMrBVbYympkJVMbAIsYJuZ8P3-DmXZVwYJR_F1cfpCbt97TxVSbG7WIbUp-H1vYpN3oA2-hlP-G76YzOGJzHQy1bWWluUC4GsPP194NpVANRnTUBQakfwhOgk9WE2Op7SyzfdHxYV5vpRPcrXRMrLZYZFUXM6D6ROZljjaZKNj9KaluIOdiTZydQnKVyZs0ffjIpNe6Cn9jJNAUH-cxKfOJ3fmUVN213tTr-PveUkAdlYwCRdtq_IlrFrr1gp6hiMgtdQXxSdtjPuoMfQEZTsI-FiAGFipYDrN5Gu_YXlqX1Lmzbb2famCXTYp6bWljYT14pCSMA-OZrJWsgj4tSahyZIgNq_E_cvHnQ-iJo1ACH0Jt22soOFBhAhSG8rLOG8O5ZkmF7sGUr1MbP56LLkz29NPgh98Zsyxp4tM33QH5XPrMC7MOfTvzj8TyhRH31CWHScQl3AJq1o3z2K3qgl6spkmWIwWLjbo4DBzFz6-wRPBm5Fv60hct1oFuYjXL-ntOBASLOAES7U3Cvb56VPex7JdmTyzb-XP7jNhYzWK-69HgGZaMhOJJmLGZhu8Xp9P6GPnXiQpyL5LvcX_FEiR6CzpkhhS54IryQx2UW7VadUMnpvwEUwtT2c9xoh6WEwt2kTDj65DyzRwFdcms3WG_B1cSe5iwBN1JAQm3ay04dSG-a5JNVqFyaW7r1NcVts3HWC2c-S9Z_Xjse548XftM_aD97KTqoiR5GxU95geXvrWI8szDSYSueSGCTI8L7bCDO-iKE4RQEmyS8ZbqMSWyQgClVQOR5CF3jPKb6hP7ofoQlPRuMyMY8AqyWGeY9bbWb-LjrSDpRTAR6af8Ip5JYr4rlcG1YqEWYT-MqiCPw3ZJqBXUICSpz9ZHQNTrYIzkJZqPg-hCqvFkOCUtvOYSDtGkAe9x1ekPqlV0IuWLxAmjqbkGH0QCaYAF90wVQUgWPkVWfQ6ULRz2sveQDZf0P8rVZw6ATEvZVnkml6VDbaH69lMyvzls7suvEZJxS5osyjrGfkt6L4nsvhZS7Nuxj2TcRxSEXxo5kULEqAO85Ivsm4j7R1Cxb2h8I4ZZZ_-DnkbWsgd7DELMI-CYtpAWLFl4K4VaMBT6mNAuud545BemUlWnQgmrde4aS7Q_W5GP11iQea9_JcJr6DMf4Y40NDr_fPVU5p7q1bnc1xtwkIpyx0uEeXHEZDR8k-5apBXScJtmelzpiy-25oJdSU5xtgVPrb77kVyJofPtujplZoqMh6MOqTdIhIMm_Goy_Wne4W39hVI01b2vwduBaCCaX6M8uACX96s454WPitX4MYAVc65UHF0BTFskEcbY5bFZpzcWb39VTfra-Ru2URvdo_66qmUd-03XzLKiMsqJHGclhaU6XBqaIo9qD8FjLVT9DOx56eh3GFvYA1dxvgbp6gyOg7bOBL0KDarT9Vmo40vGvwyCT_a2S_6Oki6uBU_3bf-jGvtum4tkN--wZkBrhOj7L8onItPoAZQXjYXrcXfVC1KR_xA0IOxYZD59G1rBxTDlvatIFwhvoISinkU-zPkKMpralHlxDicmJrBsKsy-mZWCF5qHeWF36pjE35dE9GxR28xw1Ed0pA_kOIgMKSKCiRWUYY8D1jAHKzimnj_4VTKR05kTp30pasr0IUMl2celsQMDv1D2atEJ_65CeRio5cnNGUR_Z73LJ-fqLkSjSxlE2YvtcKX7bdF6bSq3EqDtOdLVUjYl_pxRaUNMRmahQUJXGsDx7X-W9xUgQmAq09qT3lh1fhVUgdtUuuaoNY_M1s5V0E5ePuu_C6Duuz8WCcecbhrcbI3FDQSJn_XHK6ImLMYBowGRYVkBE_Rf7q7Hj4zdF-3bVE_QDce3syZNshCYK5kO8mvADptgdNVG7lEiZ9TIQPBd-XWRUrZ3XvIfGVJFVMjh_Laq8RTDyvvId7iQDuvq89hQ86hlfWteEl8HzuwpakWnogg3CCStX5CMGpYUWWkOCUu2LCH2H4EBaeCcAPLCmEoxcpKS182kYLm8-4ShRz-YOMIEmE9TL2za15I6BCBi9OhQGcLSl4BquhfBVHyxmkEN7_g102yI1Ocucux8q_HLMo5UZz0KALRQy4qmNpnLg9f4Yetj6msezjuU17Ji1ofIcadglOYy2J3Aswf58M9fCwCfB6hAHRYM2XkYzJ3nc0VosWA0er90zqKOeM1-erWC-skbupO-8nw9DA5OtnJTZOLnhGRjzXqna0E5R69wOHi3yvb3zzv2K9fLMKi11bCM_cnel9ItcFM-AYQ0AhBTZ3sTn-tpIf3IVNCvnCxMWvbO-MBmoexQnPorA0SL6n_nL49Y9Zb7UgwCyNGmhsFjIlSXu-YG-yCV1lVXBYoEPDwa2eCaMwph0QneXPHHMUs_i9PuFVI-nwfEiwU0b4tk8x3tWdkltvtzhjB8fxQxJNrk-ykNhuEYfQMQ0_MCqIRD097_gjO8q-eFUjnuiVqqoQ9_rH9QCxABdA8afoNt0hFxBwR6d57P81_XKOnApyrPx0DjsuKVTBFoCWccKX4DZuQT_PhmsFtPquNp6OPWQM5a8HzKntjz_HgFYnyS5p6n0hBGZVC_GDtFEm8JELcwuVoSLSXhI_XKnck2FIhHA5YQ4vLGOhCEEZoINkDdq3oNgm-NiP-DpG2LYetLl4ljlUpRBUizmWn4Fr3jhIt8rmQwqmFj6aMDSEM0Sgen9DsUH7H3uGK2NipvFv2Uxic5aXAKQ37EFjxPFqvKXlDl-hLnUXtkXLXBbmgCJJw6nBvm-SeIxU_eKnWHkhtdnkNZrmNFaq0OYZKk-moYSxEgzxasQNYGtkN89LqAhRTS6dIbb4nXa8ArvuHTJ_qpLFjGF3SSX98Y53cgtSdGTTmHQ6_v0BmeKCWhRd83vPrmFosif57AXyBVk0HJ5YdeueitsBCyXcJmeCntrT4zDlujwuMWK7wDO4vGMj3nIIyuJMJjtpD_auuDLmpYHqmKTHm8Ob8R2jJIwDhJIupkTldX5kHZmo6Nyh8tjeMgeEbp4Tp05CfyUTWWM16gaGkwW2Gto3sJtv0AiA_PzSN_dDziD5fRSH2Q2JTW4g03Uc9SBelL2fFiQifPSc3-mI4i8QHIswd_qPnSAnHxBW6SLJFqY-qIG6soLzt2VnH5hpVvakMfO27A82DQrcoFDFsqRb8KgLEoL5u-6NbgwKSNFjfIrLFg9IzrQI7oktylkFrc_EWL_smmL6iuT5WEYt4jBwtMvyDD6nVHzzx7jd8J3XQqjXfWuH_uTAX6cOHprzaPn05QRAluZgcBL-FSQJ3Qw7PjpoiLyd3DGL77nfl_m9cpAnpz3ojtajP7Gb-aq_xa_JIqxbnuBDBkeyN8pOQp--ZD7T2BOAgS7poVoqPFXRYIJOwKtOcrj6UdPN2yrx-44ZMTJYzwcGELnFRs32PKx8TiiF1pKSwo4NB5Z97_0k_WbyBwyNajMtRUPmEuTr9VoO7CBwe1r3U3iIZbBKCfJjiG5FQToqzku31_YAs5OIIaV4B9ifLt5PwUA4mO-7XqgO1VQQjt2cUQo3Ui3EKWEJ-ov7F3wf_byGsguBwv2qMuAQiLBqs5jxrJUxyYIJAM7B_TtUjpQnNERvHEkt9TxCN8Kc6L-MejMOfu3VPdArf38naQjvBjBAZDznV639bkIRED7-soJbGMcGEyGWUqAVs9vkFleO9S4YLNvFShwo3ujBd7SMMdAyvi851CXT5uN5SDtaxmQnUGzAXmPJ9-UoJF23lSGB26eMdnIerzFoYMCgWPHyvt949IrsUKnpjuxebqQYVSrppmhIIrD8R255bJGSscVwdbrd9iA9-gHoB3UzCr5pd3gfW9Z6ynT4dQVILqtj0KgrDOHw4AIBqmwaecTBi5BeyXJx2oF1ClqS_7AanfqNToLcAwaKXnrK4RGyrX_mXHUFX9cT-o-eGqhi0lifCcJixwb3kG2AhP1USNNsCz31m40_c7cm7JcqLbzCnz4hvbivUvON5rf6kQ8PrfrjNrZA73VVIKhgZBDHxsHa3skwQvq-JH_3QulELy1-6vL5Kq84bg3ZPQxOUtxBRuyjxEJkpgG-sED2pYsKrUPqo0Ku_ggMTQjvoGGYRBt5uMlVX4pdB1zhOe1ZjcvPb8IwnL_BdLX4NvLpN97KH9Ot45bLeVTCGpv5UH8Nnm5CzQ53wqsOUD-9u5hqrSwx89sF7h8TlN9non95r7b_oHkU1R_czZ-ZjL6EubsUx4w-rWKwVU7GYde-ie62v8jcaLhkM72O4B0UvCfY2t3GtruZ4OirX44hWfOPujFr5L6bOkVSMKONJFooIJ2RIwCw64Mczkle2zQZ1P3u1DrMS5s65h-gNTwSGw3qyQBwF58-um9ycDis6f6O0ggqubsCDlsW7Vdnk_GlETHLDQ7lR_lRG1g3kRQEhKz2iwzxQan01X021EJd4TlocJYafpp8HU_rgcJdUmcvPFgB2xysE6F1vYdUAdovDztLftb5Bad4aKueUfDs8haq9TBgosHQinvKFfazE2StHUaEAVK_BiOYrH1XsrFQlXuMwhQlRgA9L3Q663gMrnhnfcQPSNd7P5EhqbadtddoVrLOKhMD5yBJj9RiC0vamCGVr2LA7hStIPBGysTBanE3u4bT-TKe2qCOskvfR2xU8NSlai9b8d57zkuxklf7LaDnMi-xu9TOqduYFfXOn87uqjaN3_emcq0NExYcQ1fMUMcbOuGoW6qeWlWmMtANjI3VaJCa_v2JYJ4cyl4gUoboC42d2esKg_Em2XfqUkKQh4XTG673LC1ebToWGPRvFtTQM3gZ4Wh5JY4pL58VeSsf1jhINWsytNpgGckHCK11BzUUx4MABT2BuMWf-a_5DV4KYdmXHn_AKAqoZWHgE2hC2Q6DUEaKTm7AV56Cm5vo-NibALDGH1zG8ih5C3dmHvQmES7vUOVM1jPS6k7paHXEwnPFE9M-zg6XmjKjdvSZ04lauZEeCjSJPb4E_v-uWlwkdHsDcTxfj9oTjfEpX0mZxIuT_Ex7Mx2I7DUHDUQgKgZT9n1TQym9patiPO8VYzYuoXrsEeLS1Mk5N3AmQXeB89x85_Xj2plBbDOqqMpAD2uMBXwHI4kut10unkHhl3S0JtA1tE0ukxTRaitpDQveHfao0tQC8gy4JEA6M5AD7iyWOm_iuW9baElC-R_g_6s_X1t2qv4mWwd8P-h7yFm4XEZg_oJEIA40hGwSPKD1d-b9QRz7Kl734V5RvMw1ekdsvZ9dVKNcPffkGX0inTp8RgkOWFUnS0hZpxuNbte3-rGWEt6Syy4x2jaH-Zr6o667kigSt1Q3cQO_eqQtq4VWuFmYIbDzkEbIKmIHY52gh-rB5k-FMQqCs-ay5Blj_IpvfcImMtrZBrbhL89gzGNRonBZEa-9kJeu4jr2_DLzw14KJR5zVNwiGLub3jJkgYqOZZ5ee_oNchx3v68S3wHyFnZA9IIaXRZjYLMrjD699h9SZvkTHdGAwICpyOjrfYbgX_7woRp1ZWBslOamnw6mDqJAk22nb1a8cpdGNP2IjXVRtuqIB8y36bHEFjChDTxERZ2dsz7a2mp5qM2Xz75OGBM77DAjnGpU7GFXDnolAnAsU5T3dd-LLnVlVhvzyuZWg7ZdH-0WsVVCezyIsQnm3WMpdPrlUcHtT6fyY2fhJVIm1QJEES5wEiEPMRrmGQ68V-q8TWlrPan6LU5Kr8Ak0nJKhE-r5bcaemeUbIsY4a9n2YDZck9CI6VGumMccelQ61Bhs5vgQ0W4AID90TXnUtJjWrVcgdhrLCWV_kv2_YSqDDoI6TM0oJKNaoNeG2HXCxXpHy8izUvfMwHvdniW3c4BPnvMpQW83bXrMPteKk-CFXdwQ6bB2PzzXAzWTp5q6D5cLWAyPJjju4AmopBUJmRwp0tjulMCClWqMiB08y8DIWDDLAAaG7Q-de-_Q-T6tZy4LRk_c0sYOtAaNCA1HgTDSLvP4j-xeuu8DrKv5SqefP2J7LLFM_JAi1gRh_84NUvUDvBdexr9wZI8eXjnnoDvP6KTosKCLmSC_ErmtzRXfUg1mz5fNVtlKSm03tqzmfL46iKDATVuEejDtlo34djj7uBV5DUw4lDIpQY1VsO1Ozgpoz9i8sNcRKQ-K3Of-vDL6R28gLBUq0Xo3nm1hAJgjc68C57jrMlJhD8GM6AeoGnnhDTfJ2xuxsdnH6i06qFUKcuTmA8l23Ek-A3ryx8DHAIaRX40d3e5MwaUqbglufHWBGId7KBiaiFuD3LhJC0CLl23XyHf225Rd4lir9LpltmuaRLnyS0FwIGZMaRmxQ-SWB2fDVzj81SJpo9lPDsuLu_ji7AA1cx-PnTj5fVp3APeRmy9E0A2v8hCKm4C6tPuvgC7Xp6MV8epxYIsGRiTy5wlHQE0FUuOdBtBH0rmGJDf4HQJoZHjhDhOJZqkvlDtEowB1mtndHgRz-0lpQurRm-RwKvl4n0quBfWZ1GL_PmiZIO36Iyyw4BRt3c1a5Zc5ilweQcle_-ZxawS1aAXXOaknt2c6AGB5JnmrTz2dXS7A8M20uNp7Cv8RoeiCYjPa1Co3Nr_6BuQL7HFxNsyk1AXDbG2qUJljSeWG3YFkaPHxgTw7aAefXrFFL_GNPi0YtageYJq3WN6lrdQ2CB0g7QLoj9dsHlAGhm8PtUESBUBbSyJVOm1lCuGGbB7psYxOLLO3BSqnXHb0--sDiyCTKMi-80rtMiHttXC3zAxXUFQjTre3a8KNohgPWx1PTAbxf96enJ33rhBV-2ewMIROT9j-K_Esee0eWUcTmt9v0yHW-V5ij0Hopx7oaXadNQLdgBJwUDf6R9xEktHhzUkyJ0g73gjrKQz2EidorhljD9LSFMAlUuRTkUhG35crMduH9TAAEgOHXZI24CD5Fz3n2KgXKoxWHlpaLlTwBXK1xLHVCrqCqvsBo60w5FV7cmdNTBjFbDU1EKSHLopt_aMgtT_6Fg1ZT6H2p0CAvvbinLkTLop3pSVU1_itnzRHOf3ayHzMrmSN_pI_03Of_63ZuHJmRWRCd7s1PviAo-B1LcG52VTanJz0JCF1RAlPj9-2DIgJLxDgNcPI96cTqZBbLk-rwKlebrmX6d5CBg3V5pmJKkgLIj5FpTmhiXhqDHHJvu-BxfzDQl2c8QtQYF6aygihfCCluN5biEv51XKRDpC-S3sU3USofDTgcg1pznwUvVv2eL8nWywckhIHWnip7z_ptCTmyn7BEzzgRgGLA_pLG17SPRJP6laoXHG_dprfpRM7gcLJZQ2zk29W2zVEpFwWePGpnQbpPjPqcOBiQfewxwnLHEuV8yGBR7Y-SEKrc6M6v8AHYk9oLXaRu1qBKkLUKSzKQhNFtfl-h-J8Adf0W9hxYSt6QNzf1YUuE8H_w2SrUGcVnsCnIQY_xu11sJ-0d-T2oFelzeEoasMeeCDamuFQye14ps0k4cM8vXpk_7ZrVE7rQmEpW40_n1iNHwB4UINg9CnQGXH98DzBBCoGPZpA1SELOwGTcJGcBZVQ5Tfey1SRFwXWJO0QFHfDb5-_tQUj9o30MhJBGxOftnwLaFROLgq3FuSBRM9dYsdlpHe1SILQXKVIwjXcOVMFgmbDq_hMSNFlMvblX9LLBduT9cXk6JhBVcxb8-oKbvbjL7zqQHOgke3ZC6oDEvcew2YzLMiNLiyGxJcthsyDfrWbhbq9DSRE7lYq9AVeh_Zc2wZq0RFh4CJGhXtW8WobIOY8JPIkyQKD4W_mKRxchykWyrCRliFId1Tzbgzu1NKxdZLiGZchs7MRgd-c_Kk0mDAvcVqyCSw5ZnlG8qWxmgwods9KD80tww2Bvp87a9Jwf-S8_PhqqG3ggGuLLm2CH71h7v6uA7f9-aCJKnlPiyb43OU2IK-rRgJf_U6VNAs1n0-RwWlaMttgA5wcecqRUlkneFkWpJOKDXpuAR9vwfoArMnPnp0jGQDN3-OPymX4xsYY6L4k0zC6j3zz9K2wgcGFD9kliVy2qwbeAqWL37Qdnr6sEbkxusF6IiYh-POUU_8rCQX03_uw0XHroHwK4mFajchjXmOY8ykOBQCIGPwCNI446xFhqWFDytDTXq9Eu651PlEqDELIcRwQz6KYWNJNlEFi4_f4GYS8sn0wpwte5R9QuaaLjc38obGBswmh15l9PrMvrWklBnnEZpV3NWmxQViKWcuey_QG_hRfQ-8Kjhv0f4D4L-d52x89yVXeVu0wbN_GstklEGCCecqvmQi1vXDf2FKr69Md-TE-mAh9pA-72vepP3guNcHz6PqzzOQX9Sj1uNZCkB0heHrXuCunn_Elv3ZvHZ-9AE26ybqtRVxaHtYrbtX9AKVk7ud_YdFPxSq-HeavXCXOBDGxEVleN03Q01jj7xoz5MjhKrVDF7XOobW0xMLtPfJLLmEGkBtSrLFCDGo1T7T3DnEiFQzXZutM50_l0k_3DxzDKhI4s5rOeeTMjSXDaxjM52LLgwAanVnMtKEsEXFVF4b5xvu_xn5CzqW5T0TTDOFXm2Gdxj-t59bgRGmnO56K85rTGgeJyXBroTz8cS4hkgfm2fQKiDAQZ5iMJeY4iqKZJTrOYb0IueB_ez-I8XW_dibgUd-WcJNKYKf4KnZR9_Z8o4OofbCdVj2mcgunpgjbTCORNWj7IpYmkHcbIQFtXnnts_2WNf-TtE6xr-iIVkwGABYE7ugHl1BUO5yKuDmeTOijSxWQGO22dzPnGVQ4O7AuXUYBFRa6FKVEIIVyk49ggvgRFFerncqEW1s8LR9gCzMIsxH2jCOyOSqjWGdZncRqDWhF6NYgFsqs3BDGYspC1vd9KFYppnH5W7MRYb2Duoi9yb7SQhNarto9KaqqgiTdEWeOw3kSkTZxa1moEh8F3ueFWhjQXNW4I3_inDPUdw0Xcf703y7uitnAsi-235tGC36JkWMR9M9Dx1cQSnS0NWhOYjUPPrKSHW8QCY-ZAfEUSJfixJeXEEUI0YmuGlFCIrLFvtlqFjxzqJW4JPCfnB0jCC9Z07d7rwHznYBSkr_cis4gNwnPOa11060WODyso6zRSJ7Q57bPhULvgnMZHZq2hl5dygeAz-elG8XYIUmr8jwXKuVGT_hl13cNI5QHaxshgdJuTzE362jxI4c0usFIVIzwhX6KqDFtWIZ5skj8iGioS6pDkY5tTj91aRu1ZL9eQ7KSLBbPeqhZCjQJGuudUr4u7HGuz8lQR0KvuZqKGGaybbPYwzJSx9qkGwqr_RNT7RW7oDxNiPlUHEf1qvED5M5FBFt_YlTmVtLQDJHRxvx3jv-Nc9pm6tew-et17Z0lMcXypXhr138RTXZYHSwJXsHMTNNGZFHCuZsyrq-PywrzCm-i6tXstJXx79s9os_dAaYgMtYEjPNRCb29LjaNw6OL60MKAl0Fung52DEDjnxFCTp9ygM_IkmLw95r9nhdq3smfsasefn6cp3YnEG3skKDswqS2Ul8Pilfqz3JI7mVucw4zA08ICIXAxB_L8_MPXUPPrVrdcf2HHicjjFs5L7mabPyv6blX2uB0BJ8Pcsdr_qdm-JbxmEEZZnxmtaG0VPgo23-DaHHIdMnNa-4cElpS64Tqcanin5QIsd1e1jIBJcjLmGOjV0eJpawOICK6dIhgdAsgLyXT-ItiUkVc_7NrPdpe0Fag7jMtvqXlvi-JljdILhGfbT7o-rNPY2iJ32jKUIDVZTSADQRf7Psnt40y3m1Ccx6aN3JVhNrgihrfjMF4rhZkqrh7Rlzs350VVOar8RblBoycjjBh9-xyXXSp4OWebr4rK6w76HQqKoOdQZvFrBG0Y3Qfkq1tNnJyy7QA3ZZwhnVPzmvi7GeCLIZMNQLQ2A3mUvZXcmmcI2NmLBJuTHoQ5IBhmtMA9_b1qVTt-8iy0jIklazgzzUa0Zdl3IAuptdmJT7AGneTDhrR60WBnxVbbjJa-_LvOyVdEVimw6wNuUO0HIuyLo7s5MkR1D1SNShzV7PUtM2YKxUxbE1zEHkqiTIF1P5RxIhh85XAaIaJMlIxjhvtIUy--jiuzLh9HDDjDCuSMRrqOk958lSAkZnProYbHuRI12ViZ561Z4-whNCQwctuoP68FvRWLByoO2NtNSaPBC9aqNx6OWHcTTGdaip8MZLmD_xPjoq6O04HNxBsaCQeo2xqMkeoB74m_8HtZQIPHyEgW2cAnDDOPDRFspt8KN9TMgAWTf_Pa7eI1ZvWo1vZtjOUi9E9SARkpmtFNtaQP_NRLp_76h9B_piPJCdzuIl9QXbwscJOaDHIlYfeauN1j1zMGmSY1jS2UPNPF7Qfy1wUcdxLFuzGy_1YPe6i8DoMimj_c995kmHFKi9jIdBHrTz5p-pX_E01O95Wd1mzgCeQCo643zzQ10c93MASc5dgHgCjyTfT4RATHXhVrhhjnamu0xnLxIHt0qA43qDfQd23xzzp5gA0KLoQ-b9fYpo5tjD3z-A6BuVES9k9W60WN3nwxJiil6rjHSHxzq_rmoDj--EOBsKv5TcismcMk4IdBgoKWcsHGW43c5t5gmaA6c1QZPDHZWTnkHPZIsH2U1kMcsNHoWG-H-xQ65cz6cceu_ATRu26etMuEZo4ecqNSENhCQq7NlkEnWVacuW7qybowkEr2uIU-BB_wI1oHPKVupH-0ZOHsVZOgktQ5g1DWiXUVloBabeRIZJt7fDYFs5oNgXxggElnN9fK-fb8BQb9j2BraENpRQonC68YsbFQLoyefvK3WnO1GFQQg7qDqzhU9PgMU6CIYfMfuHAoFiXtaTsnykAIv7m0nckJn8nldATLqakn72ObT_rzRQXi_cKoksBvKel4sqg7FtoM9no5s9a3wT1OwRXNUZ5Jg5iYyFW9mlRV4-Pwo67XhiipGG-iXsqxlhmDQjmeJoBfOKfm3MWJccFO9hMReoCp1DDqP5wxG_1gFMhl4mHPgxQW24pRrYOO00YYdR9VVrsBdjalyjo4mK5PuWqP0O3BKTZ7-Al2P5_VyQ2MxMZAZCkHSE5tRIkq0k29sZLPM58yUwN5FkIrzop2PR_VNYNa2eY2jK-mVv7eYvwcq9LcF6JbJN79K9YyPI-dqKutPoFzFXQEijdF77VbVDQYN5v33gKMYWIyXUb_ZgBFZ9wwZkGkzK7aRR22QVhUMk-M6dZrVH365Cmnboiq_7ZqSIa49uF1qlWbCljkpXMDxF8i0YGRdx4CUSU6vfyKyMUtCb-c6ZxGztojxz72-u3SwPOEJeRNUjpgH25LHo21ORRGuDHM0p04CyxXYe6YH-qYyINgouQ58GorDnhZJfLssqXFDEV_HeQfuZp-KsEnHSMDgX7ibItCu_ETXE2ano2M0XnOjdmSPRHl1aFyQAWkHsgTsrlzucRFcDhkK1BNIGPgC4eWce4bsaf_DHP0OJW8qVEnd15Oj1r9Om2K-vL5pYCLySkxA85DSgMKNOXPsPV3wGkiJjLJqn250v5aiwAziMHrcY5ik4Fm2AvDlRXPvGqXOuQG-zJsFc05J-1TBLgT1wZ1b2mw_qihmlJt71mthNKfgjmCMtx6WVKgRGM2lhdZ6gXt_9AkBcf3Rax9inuLnPgfaOZSCNa-MMR5yVa7ql7i-NwvuupwuuTuuKGkXv_-T3EK-Ky418dDDOMTgpW8nHiUM6Y5uBu6v__N8NMYvnJmujw6dUTNMR-R6vgaXdDtzs6a4KAccwIgqQ43uhgDexj9x4OB4304dKb5PJ2HpgIlnXlhjB-JGmnQAbAIaLrEcW9V0S0PX4H_Mz4NGqaAtDTeeiw=', + signature=IsStr(), provider_name='openai', ), - ThinkingPart( - content=IsStr(), - id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27', - ), + ThinkingPart(content=IsStr(), id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27'), + ThinkingPart(content=IsStr(), id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27'), + ThinkingPart(content=IsStr(), id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27'), + ThinkingPart(content=IsStr(), id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27'), TextPart(content=IsStr(), id='msg_68c200091ccc8191b38e07ea231e862d0003919771fccd27'), ], usage=RequestUsage(input_tokens=23, output_tokens=2030, details={'reasoning_tokens': 1728}), @@ -1249,7 +1257,10 @@ async def test_bedrock_model_thinking_part_from_other_model( timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime.datetime(2025, 9, 10, 22, 46, 57, tzinfo=datetime.timezone.utc), + }, provider_response_id='resp_68c1ffe0f9a48191894c46b63c1a4f440003919771fccd27', finish_reason='stop', run_id=IsStr(), @@ -1277,6 +1288,7 @@ async def test_bedrock_model_thinking_part_from_other_model( timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1334,16 +1346,17 @@ async def test_bedrock_group_consecutive_tool_return_parts(bedrock_provider: Bed now = datetime.datetime.now() # Create a ModelRequest with 3 consecutive ToolReturnParts req = [ - ModelRequest(parts=[UserPromptPart(content=['Hello'])]), + ModelRequest(parts=[UserPromptPart(content=['Hello'])], timestamp=IsDatetime()), ModelResponse(parts=[TextPart(content='Hi')]), - ModelRequest(parts=[UserPromptPart(content=['How are you?'])]), + ModelRequest(parts=[UserPromptPart(content=['How are you?'])], timestamp=IsDatetime()), ModelResponse(parts=[TextPart(content='Cloudy')]), ModelRequest( parts=[ ToolReturnPart(tool_name='tool1', content='result1', tool_call_id='id1', timestamp=now), ToolReturnPart(tool_name='tool2', content='result2', tool_call_id='id2', timestamp=now), ToolReturnPart(tool_name='tool3', content='result3', tool_call_id='id3', timestamp=now), - ] + ], + timestamp=IsDatetime(), ), ] @@ -1433,6 +1446,7 @@ async def test_bedrock_model_thinking_part_stream(allow_model_requests: None, be timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1463,7 +1477,8 @@ async def test_bedrock_mistral_tool_result_format(bedrock_provider: BedrockProvi ModelRequest( parts=[ ToolReturnPart(tool_name='tool1', content={'foo': 'bar'}, tool_call_id='id1', timestamp=now), - ] + ], + timestamp=IsDatetime(), ), ] diff --git a/tests/models/test_cohere.py b/tests/models/test_cohere.py index a1b7785801..0958d01c67 100644 --- a/tests/models/test_cohere.py +++ b/tests/models/test_cohere.py @@ -3,7 +3,7 @@ import json from collections.abc import Sequence from dataclasses import dataclass -from datetime import timezone +from datetime import datetime, timezone from typing import Any, cast import pytest @@ -128,6 +128,7 @@ async def test_request_simple_success(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -142,6 +143,7 @@ async def test_request_simple_success(allow_model_requests: None): ), ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -212,6 +214,7 @@ async def test_request_structured_response(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -239,6 +242,7 @@ async def test_request_structured_response(allow_model_requests: None): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -305,6 +309,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -332,6 +337,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -360,6 +366,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -452,6 +459,7 @@ def simple_instructions(ctx: RunContext): [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -501,6 +509,7 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -515,7 +524,10 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 5, 22, 7, 17, tzinfo=timezone.utc), + }, provider_response_id='resp_68bb5f153efc81a2b3958ddb1f257ff30886f4f20524f3b9', finish_reason='stop', run_id=IsStr(), @@ -537,6 +549,7 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_deepseek.py b/tests/models/test_deepseek.py index 0ecdbbb0bb..37c76d62b1 100644 --- a/tests/models/test_deepseek.py +++ b/tests/models/test_deepseek.py @@ -1,5 +1,7 @@ from __future__ import annotations as _annotations +from datetime import datetime, timezone + import pytest from inline_snapshot import snapshot @@ -36,6 +38,7 @@ async def test_deepseek_model_thinking_part(allow_model_requests: None, deepseek [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -56,7 +59,10 @@ async def test_deepseek_model_thinking_part(allow_model_requests: None, deepseek timestamp=IsDatetime(), provider_name='deepseek', provider_url='https://api.deepseek.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 22, 14, 9, 11, tzinfo=timezone.utc), + }, provider_response_id='181d9669-2b3a-445e-bd13-2ebff2c378f6', finish_reason='stop', run_id=IsStr(), @@ -84,6 +90,7 @@ async def test_deepseek_model_thinking_stream(allow_model_requests: None, deepse timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -104,7 +111,10 @@ async def test_deepseek_model_thinking_stream(allow_model_requests: None, deepse timestamp=IsDatetime(), provider_name='deepseek', provider_url='https://api.deepseek.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 10, 17, 41, 44, tzinfo=timezone.utc), + }, provider_response_id='33be18fc-3842-486c-8c29-dd8e578f7f20', finish_reason='stop', run_id=IsStr(), diff --git a/tests/models/test_fallback.py b/tests/models/test_fallback.py index 41c20d1e46..3dd0a012a5 100644 --- a/tests/models/test_fallback.py +++ b/tests/models/test_fallback.py @@ -77,6 +77,7 @@ def test_first_successful() -> None: parts=[ UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -104,6 +105,7 @@ def test_first_failed() -> None: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -132,6 +134,7 @@ def test_first_failed_instrumented(capfire: CaptureLogfire) -> None: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -816,6 +819,7 @@ def prompted_output_func(_: list[ModelMessage], info: AgentInfo) -> ModelRespons timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), instructions='Be kind', run_id=IsStr(), ), diff --git a/tests/models/test_gemini.py b/tests/models/test_gemini.py index ee1aa83b15..73056e3fd8 100644 --- a/tests/models/test_gemini.py +++ b/tests/models/test_gemini.py @@ -561,6 +561,7 @@ async def test_text_success(get_gemini_client: GetGeminiClient): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -582,6 +583,7 @@ async def test_text_success(get_gemini_client: GetGeminiClient): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -595,6 +597,7 @@ async def test_text_success(get_gemini_client: GetGeminiClient): ), ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -624,6 +627,7 @@ async def test_request_structured_response(get_gemini_client: GetGeminiClient): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -644,6 +648,7 @@ async def test_request_structured_response(get_gemini_client: GetGeminiClient): tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -689,6 +694,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -711,6 +717,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -740,6 +747,7 @@ async def get_location(loc_name: str) -> str: tool_call_id=IsStr(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -899,6 +907,7 @@ async def bar(y: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -922,6 +931,7 @@ async def bar(y: str) -> str: tool_name='bar', content='b', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -942,6 +952,7 @@ async def bar(y: str) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -988,6 +999,7 @@ def get_location(loc_name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1015,6 +1027,7 @@ def get_location(loc_name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -1201,6 +1214,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1234,6 +1248,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1391,6 +1406,7 @@ async def test_gemini_model_instructions(allow_model_requests: None, gemini_api_ [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -1470,6 +1486,7 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1511,7 +1528,10 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': IsDatetime(), + }, provider_response_id='resp_680393ff82488191a7d0850bf0dd99a004f0817ea037a07b', finish_reason='stop', run_id=IsStr(), @@ -1531,6 +1551,7 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1545,7 +1566,10 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': IsDatetime(), + }, provider_response_id='resp_680393ff82488191a7d0850bf0dd99a004f0817ea037a07b', finish_reason='stop', run_id=IsStr(), @@ -1557,6 +1581,7 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1630,6 +1655,7 @@ async def test_gemini_youtube_video_url_input(allow_model_requests: None, gemini parts=[ UserPromptPart(content=['What is the main content of this URL?', url], timestamp=IsDatetime()), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1715,6 +1741,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1738,6 +1765,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1767,6 +1795,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -1799,6 +1828,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1822,6 +1852,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1851,6 +1882,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -1882,6 +1914,7 @@ def upcase(text: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1955,6 +1988,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2008,6 +2042,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2062,6 +2097,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2112,6 +2148,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2135,6 +2172,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2179,6 +2217,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_gemini_vertex.py b/tests/models/test_gemini_vertex.py index a361f51033..84175e1104 100644 --- a/tests/models/test_gemini_vertex.py +++ b/tests/models/test_gemini_vertex.py @@ -141,6 +141,7 @@ async def test_url_input( timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -181,6 +182,7 @@ async def test_url_input_force_download(allow_model_requests: None) -> None: # timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_google.py b/tests/models/test_google.py index be6d4bd68a..6d48d4e42e 100644 --- a/tests/models/test_google.py +++ b/tests/models/test_google.py @@ -5,6 +5,7 @@ import os import re from collections.abc import AsyncIterator +from datetime import timezone from typing import Any import pytest @@ -64,7 +65,7 @@ from pydantic_ai.settings import ModelSettings from pydantic_ai.usage import RequestUsage, RunUsage, UsageLimits -from ..conftest import IsBytes, IsDatetime, IsInstance, IsStr, try_import +from ..conftest import IsBytes, IsDatetime, IsInstance, IsNow, IsStr, try_import from ..parts_from_messages import part_types_from_messages with try_import() as imports_successful: @@ -137,6 +138,7 @@ async def test_google_model(allow_model_requests: None, google_provider: GoogleP timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -203,6 +205,7 @@ async def temperature(city: str, date: datetime.date) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -231,6 +234,7 @@ async def temperature(city: str, date: datetime.date) -> str: tool_name='temperature', content='30°C', tool_call_id=IsStr(), timestamp=IsDatetime() ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -264,6 +268,7 @@ async def temperature(city: str, date: datetime.date) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -331,6 +336,7 @@ async def test_google_model_builtin_code_execution_stream( timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -572,6 +578,7 @@ async def get_capital(country: str) -> str: SystemPromptPart(content='You are a helpful chatbot.', timestamp=IsDatetime()), UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime()), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -604,6 +611,7 @@ async def get_capital(country: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -636,6 +644,7 @@ async def get_capital(country: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -942,6 +951,7 @@ def instructions() -> str: [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -972,9 +982,14 @@ async def test_google_model_multiple_documents_in_history( result = await agent.run( 'What is in the documents?', message_history=[ - ModelRequest(parts=[UserPromptPart(content=['Here is a PDF document: ', document_content])]), + ModelRequest( + parts=[UserPromptPart(content=['Here is a PDF document: ', document_content])], timestamp=IsDatetime() + ), ModelResponse(parts=[TextPart(content='foo bar')]), - ModelRequest(parts=[UserPromptPart(content=['Here is another PDF document: ', document_content])]), + ModelRequest( + parts=[UserPromptPart(content=['Here is another PDF document: ', document_content])], + timestamp=IsDatetime(), + ), ModelResponse(parts=[TextPart(content='foo bar 2')]), ], ) @@ -1016,6 +1031,7 @@ async def test_google_model_web_search_tool(allow_model_requests: None, google_p timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1096,6 +1112,7 @@ async def test_google_model_web_search_tool(allow_model_requests: None, google_p timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1191,6 +1208,7 @@ async def test_google_model_web_search_tool_stream(allow_model_requests: None, g timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1337,6 +1355,7 @@ async def test_google_model_web_search_tool_stream(allow_model_requests: None, g timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1443,6 +1462,7 @@ async def test_google_model_web_fetch_tool( timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1523,6 +1543,7 @@ async def test_google_model_web_fetch_tool_stream(allow_model_requests: None, go timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1649,6 +1670,7 @@ async def test_google_model_code_execution_tool(allow_model_requests: None, goog SystemPromptPart(content='You are a helpful chatbot.', timestamp=IsDatetime()), UserPromptPart(content='What day is today in Utrecht?', timestamp=IsDatetime()), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1718,6 +1740,7 @@ async def test_google_model_code_execution_tool(allow_model_requests: None, goog [ ModelRequest( parts=[UserPromptPart(content='What day is tomorrow?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1908,7 +1931,7 @@ async def test_google_model_empty_assistant_response(allow_model_requests: None, result = await agent.run( 'Was your previous response empty?', message_history=[ - ModelRequest(parts=[UserPromptPart(content='Hi')]), + ModelRequest(parts=[UserPromptPart(content='Hi')], timestamp=IsDatetime()), ModelResponse(parts=[TextPart(content='')]), ], ) @@ -1947,6 +1970,7 @@ def dummy() -> None: ... # pragma: no cover timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1985,6 +2009,7 @@ def dummy() -> None: ... # pragma: no cover timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2037,6 +2062,7 @@ def dummy() -> None: ... # pragma: no cover timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2047,22 +2073,10 @@ def dummy() -> None: ... # pragma: no cover signature=IsStr(), provider_name='openai', ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689', - ), + ThinkingPart(content=IsStr(), id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689'), + ThinkingPart(content=IsStr(), id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689'), + ThinkingPart(content=IsStr(), id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689'), + ThinkingPart(content=IsStr(), id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689'), TextPart(content=IsStr(), id='msg_68c1fb814fdc8196aec1a46164ddf7680c14a8a9087e8689'), ], usage=RequestUsage(input_tokens=45, output_tokens=1719, details={'reasoning_tokens': 1408}), @@ -2070,7 +2084,10 @@ def dummy() -> None: ... # pragma: no cover timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime.datetime(2025, 9, 10, 22, 27, 55, tzinfo=datetime.timezone.utc), + }, provider_response_id='resp_68c1fb6b6a248196a6216e80fc2ace380c14a8a9087e8689', finish_reason='stop', run_id=IsStr(), @@ -2096,6 +2113,7 @@ def dummy() -> None: ... # pragma: no cover timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2153,6 +2171,7 @@ def dummy() -> None: ... # pragma: no cover timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2350,9 +2369,10 @@ async def test_google_url_input( parts=[ UserPromptPart( content=['What is the main content of this URL?', Is(url)], - timestamp=IsDatetime(), + timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2362,7 +2382,7 @@ async def test_google_url_input( timestamp=IsDatetime(), provider_name='google-vertex', provider_url='https://aiplatform.googleapis.com/', - provider_details={'finish_reason': 'STOP'}, + provider_details={'finish_reason': 'STOP', 'timestamp': IsDatetime()}, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -2393,9 +2413,10 @@ async def test_google_url_input_force_download( parts=[ UserPromptPart( content=['What is the main content of this URL?', Is(video_url)], - timestamp=IsDatetime(), + timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2405,7 +2426,7 @@ async def test_google_url_input_force_download( timestamp=IsDatetime(), provider_name='google-vertex', provider_url='https://aiplatform.googleapis.com/', - provider_details={'finish_reason': 'STOP'}, + provider_details={'finish_reason': 'STOP', 'timestamp': IsDatetime()}, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -2447,6 +2468,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2472,6 +2494,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2503,6 +2526,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -2545,6 +2569,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2570,6 +2595,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2601,6 +2627,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -2633,6 +2660,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2658,6 +2686,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2723,6 +2752,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2777,6 +2807,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2832,6 +2863,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2879,6 +2911,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2904,6 +2937,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2949,6 +2983,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3155,6 +3190,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3196,6 +3232,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3272,6 +3309,7 @@ async def test_google_image_generation_stream(allow_model_requests: None, google timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3345,6 +3383,7 @@ async def test_google_image_generation_with_text(allow_model_requests: None, goo timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3462,6 +3501,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3497,6 +3537,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3572,6 +3613,7 @@ async def test_google_image_generation_with_web_search(allow_model_requests: Non timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4147,6 +4189,7 @@ def get_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4169,7 +4212,10 @@ def get_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime.datetime(2025, 11, 21, 21, 57, 19, tzinfo=datetime.timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -4183,6 +4229,7 @@ def get_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4203,7 +4250,10 @@ def get_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime.datetime(2025, 11, 21, 21, 57, 25, tzinfo=datetime.timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -4247,6 +4297,7 @@ def get_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -4313,7 +4364,7 @@ async def test_google_api_non_http_error( async def test_google_model_retrying_after_empty_response(allow_model_requests: None, google_provider: GoogleProvider): message_history = [ - ModelRequest(parts=[UserPromptPart(content='Hi')]), + ModelRequest(parts=[UserPromptPart(content='Hi')], timestamp=IsDatetime()), ModelResponse(parts=[]), ] @@ -4325,7 +4376,11 @@ async def test_google_model_retrying_after_empty_response(allow_model_requests: assert result.output == snapshot('Hello! How can I help you today?') assert result.new_messages() == snapshot( [ - ModelRequest(parts=[], run_id=IsStr()), + ModelRequest( + parts=[], + timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), ModelResponse( parts=[ TextPart( @@ -4514,6 +4569,7 @@ def get_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4546,6 +4602,7 @@ def get_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index 0f10b4b7e4..16917934ba 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -48,7 +48,7 @@ from pydantic_ai.output import NativeOutput, PromptedOutput from pydantic_ai.usage import RequestUsage, RunUsage -from ..conftest import IsDatetime, IsInstance, IsNow, IsStr, raise_if_exception, try_import +from ..conftest import IsDatetime, IsInstance, IsStr, raise_if_exception, try_import from .mock_async_stream import MockAsyncStream with try_import() as imports_successful: @@ -168,31 +168,39 @@ async def test_request_simple_success(allow_model_requests: None): assert result.all_messages() == snapshot( [ ModelRequest( - parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + parts=[UserPromptPart(content='hello', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), ), ModelRequest( - parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + parts=[UserPromptPart(content='hello', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -237,7 +245,8 @@ async def test_request_structured_response(allow_model_requests: None): assert result.all_messages() == snapshot( [ ModelRequest( - parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + parts=[UserPromptPart(content='Hello', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -249,10 +258,13 @@ async def test_request_structured_response(allow_model_requests: None): ) ], model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -263,9 +275,10 @@ async def test_request_structured_response(allow_model_requests: None): tool_name='final_result', content='Final result processed.', tool_call_id='123', - timestamp=IsNow(tz=timezone.utc), + timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -329,9 +342,10 @@ async def get_location(loc_name: str) -> str: [ ModelRequest( parts=[ - SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), - UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), + SystemPromptPart(content='this is the system prompt', timestamp=IsDatetime()), + UserPromptPart(content='Hello', timestamp=IsDatetime()), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -344,10 +358,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=1), model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -358,9 +375,10 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', content='Wrong location, please try again', tool_call_id='1', - timestamp=IsNow(tz=timezone.utc), + timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -373,10 +391,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=3, output_tokens=2), model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -387,18 +408,22 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', content='{"lat": 51, "lng": 0}', tool_call_id='2', - timestamp=IsNow(tz=timezone.utc), + timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -504,7 +529,8 @@ async def test_stream_structured(allow_model_requests: None): assert result.all_messages() == snapshot( [ ModelRequest( - parts=[UserPromptPart(content='', timestamp=IsNow(tz=timezone.utc))], + parts=[UserPromptPart(content='', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -516,9 +542,10 @@ async def test_stream_structured(allow_model_requests: None): ) ], model_name='llama-3.3-70b-versatile', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='x', run_id=IsStr(), ), @@ -528,9 +555,10 @@ async def test_stream_structured(allow_model_requests: None): tool_name='final_result', content='Final result processed.', tool_call_id=IsStr(), - timestamp=IsNow(tz=timezone.utc), + timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -615,6 +643,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -624,7 +653,10 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 4, 29, 20, 21, 45, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-3c327c89-e9f5-4aac-a5d5-190e6f6f25c9', finish_reason='tool_call', run_id=IsStr(), @@ -645,6 +677,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -654,7 +687,10 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 29, 20, 21, 47, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-82dfad42-6a28-4089-82c3-c8633f626c0d', finish_reason='stop', run_id=IsStr(), @@ -743,6 +779,7 @@ async def test_groq_model_instructions(allow_model_requests: None, groq_api_key: [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -753,7 +790,10 @@ async def test_groq_model_instructions(allow_model_requests: None, groq_api_key: timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 7, 16, 32, 53, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-7586b6a9-fb4b-4ec7-86a0-59f0a77844cf', finish_reason='stop', run_id=IsStr(), @@ -783,6 +823,7 @@ async def test_groq_model_web_search_tool(allow_model_requests: None, groq_api_k timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1022,7 +1063,10 @@ async def test_groq_model_web_search_tool(allow_model_requests: None, groq_api_k timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 17, 21, 14, 13, tzinfo=timezone.utc), + }, provider_response_id='stub', finish_reason='stop', run_id=IsStr(), @@ -1054,6 +1098,7 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1190,7 +1235,10 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 17, 21, 20, 46, tzinfo=timezone.utc), + }, provider_response_id='stub', finish_reason='stop', run_id=IsStr(), @@ -1946,6 +1994,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key [ ModelRequest( parts=[UserPromptPart(content='I want a recipe to cook Uruguayan alfajores.', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a chef.', run_id=IsStr(), ), @@ -1956,7 +2005,10 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 19, 12, 3, 5, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -1973,6 +2025,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key [ ModelRequest( parts=[UserPromptPart(content='I want a recipe to cook Uruguayan alfajores.', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a chef.', run_id=IsStr(), ), @@ -1983,7 +2036,10 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 19, 12, 3, 5, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-9748c1af-1065-410a-969a-d7fb48039fbb', finish_reason='stop', run_id=IsStr(), @@ -1995,6 +2051,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='You are a chef.', run_id=IsStr(), ), @@ -2005,7 +2062,10 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 19, 12, 3, 10, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-994aa228-883a-498c-8b20-9655d770b697', finish_reason='stop', run_id=IsStr(), @@ -2039,6 +2099,7 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='You are a chef.', run_id=IsStr(), ), @@ -2126,7 +2187,10 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 17, 21, 29, 56, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-4ef92b12-fb9d-486f-8b98-af9b5ecac736', finish_reason='stop', run_id=IsStr(), @@ -3383,6 +3447,7 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='You are a chef.', run_id=IsStr(), ), @@ -3489,7 +3554,10 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 17, 21, 30, 1, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-dd0af56b-f71d-4101-be2f-89efcf3f05ac', finish_reason='stop', run_id=IsStr(), @@ -5330,6 +5398,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5370,6 +5439,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5389,7 +5459,10 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 9, 2, 21, 3, 54, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -5403,6 +5476,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5418,7 +5492,10 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 2, 21, 3, 57, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -5454,6 +5531,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5476,6 +5554,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', + provider_details={'timestamp': datetime(2025, 9, 2, 21, 23, 3, tzinfo=timezone.utc)}, provider_response_id='chatcmpl-4e0ca299-7515-490a-a98a-16d7664d4fba', run_id=IsStr(), ), @@ -5501,6 +5580,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5518,7 +5598,10 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 9, 2, 21, 23, 4, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-fffa1d41-1763-493a-9ced-083bd3f2d98b', finish_reason='tool_call', run_id=IsStr(), @@ -5532,6 +5615,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5542,7 +5626,10 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 2, 21, 23, 4, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-fe6b5685-166f-4c71-9cd7-3d5a97301bf1', finish_reason='stop', run_id=IsStr(), @@ -5584,6 +5671,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -5598,7 +5686,10 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 2, 20, 1, 5, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -5628,6 +5719,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -5642,7 +5734,10 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), provider_name='groq', provider_url='https://api.groq.com', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 2, 20, 1, 6, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index ed99de4e56..3ec552cf0d 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -168,9 +168,12 @@ async def test_simple_completion(allow_model_requests: None, huggingface_api_key ], usage=RequestUsage(input_tokens=30, output_tokens=29), model_name='Qwen/Qwen2.5-72B-Instruct-fast', - timestamp=datetime(2025, 7, 8, 13, 42, 33, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 8, 13, 42, 33, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-d445c0d473a84791af2acf356cc00df7', run_id=IsStr(), ) @@ -238,10 +241,13 @@ async def test_request_structured_response( ) ], model_name='hf-model', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', provider_url='https://api-inference.huggingface.co', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ) @@ -363,6 +369,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -375,10 +382,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='hf-model', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', provider_url='https://api-inference.huggingface.co', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ), @@ -391,6 +401,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -403,10 +414,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=1), model_name='hf-model', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', provider_url='https://api-inference.huggingface.co', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ), @@ -419,15 +433,19 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], model_name='hf-model', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', provider_url='https://api-inference.huggingface.co', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ), @@ -645,15 +663,19 @@ async def test_image_url_input(allow_model_requests: None, huggingface_api_key: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Hello! How can I assist you with this image of a potato?')], usage=RequestUsage(input_tokens=269, output_tokens=15), model_name='Qwen/Qwen2.5-VL-72B-Instruct', - timestamp=datetime(2025, 7, 8, 14, 4, 39, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 8, 14, 4, 39, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-49aa100effab4ca28514d5ccc00d7944', run_id=IsStr(), ), @@ -714,6 +736,7 @@ def simple_instructions(ctx: RunContext): [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -723,7 +746,10 @@ def simple_instructions(ctx: RunContext): model_name='Qwen/Qwen2.5-72B-Instruct-fast', timestamp=IsDatetime(), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 2, 15, 39, 17, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-b3936940372c481b8d886e596dc75524', run_id=IsStr(), ), @@ -812,15 +838,19 @@ def response_validator(value: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='invalid-response')], model_name='hf-model', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', provider_url='https://api-inference.huggingface.co', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ), @@ -833,15 +863,19 @@ def response_validator(value: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final-response')], model_name='hf-model', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', provider_url='https://api-inference.huggingface.co', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ), @@ -865,7 +899,7 @@ async def test_thinking_part_in_history(allow_model_requests: None): model = HuggingFaceModel('hf-model', provider=HuggingFaceProvider(hf_client=mock_client, api_key='x')) agent = Agent(model) messages = [ - ModelRequest(parts=[UserPromptPart(content='request')]), + ModelRequest(parts=[UserPromptPart(content='request')], timestamp=IsDatetime()), ModelResponse( parts=[ TextPart(content='text 1'), @@ -932,6 +966,7 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -943,7 +978,10 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap model_name='Qwen/Qwen3-235B-A22B', timestamp=IsDatetime(), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 9, 13, 17, 45, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-957db61fe60d4440bcfe1f11f2c5b4b9', run_id=IsStr(), ), @@ -966,6 +1004,7 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -977,7 +1016,10 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap model_name='Qwen/Qwen3-235B-A22B', timestamp=IsDatetime(), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 9, 13, 18, 14, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-35fdec1307634f94a39f7e26f52e12a7', run_id=IsStr(), ), @@ -1007,6 +1049,7 @@ async def test_hf_model_thinking_part_iter(allow_model_requests: None, huggingfa timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1017,7 +1060,10 @@ async def test_hf_model_thinking_part_iter(allow_model_requests: None, huggingfa model_name='Qwen/Qwen3-235B-A22B', timestamp=IsDatetime(), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 23, 19, 58, 41, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-357f347a3f5d4897b36a128fb4e4cf7b', run_id=IsStr(), ), diff --git a/tests/models/test_instrumented.py b/tests/models/test_instrumented.py index 47f7a38731..bb176b0bc5 100644 --- a/tests/models/test_instrumented.py +++ b/tests/models/test_instrumented.py @@ -44,7 +44,7 @@ from pydantic_ai.settings import ModelSettings from pydantic_ai.usage import RequestUsage -from ..conftest import IsInt, IsStr, try_import +from ..conftest import IsDatetime, IsInt, IsStr, try_import with try_import() as imports_successful: from logfire.testing import CaptureLogfire @@ -148,7 +148,8 @@ async def test_instrumented_model(capfire: CaptureLogfire): RetryPromptPart('retry_prompt1', tool_name='tool4', tool_call_id='tool_call_4'), RetryPromptPart('retry_prompt2'), {}, # test unexpected parts # type: ignore - ] + ], + timestamp=IsDatetime(), ), ModelResponse(parts=[TextPart('text3')]), ] @@ -359,7 +360,7 @@ async def test_instrumented_model_not_recording(): InstrumentationSettings(tracer_provider=NoOpTracerProvider(), logger_provider=NoOpLoggerProvider()), ) - messages: list[ModelMessage] = [ModelRequest(parts=[SystemPromptPart('system_prompt')])] + messages: list[ModelMessage] = [ModelRequest(parts=[SystemPromptPart('system_prompt')], timestamp=IsDatetime())] await model.request( messages, model_settings=ModelSettings(temperature=1), @@ -380,7 +381,8 @@ async def test_instrumented_model_stream(capfire: CaptureLogfire): ModelRequest( parts=[ UserPromptPart('user_prompt'), - ] + ], + timestamp=IsDatetime(), ), ] async with model.request_stream( @@ -482,7 +484,8 @@ async def test_instrumented_model_stream_break(capfire: CaptureLogfire): ModelRequest( parts=[ UserPromptPart('user_prompt'), - ] + ], + timestamp=IsDatetime(), ), ] @@ -607,6 +610,7 @@ async def test_instrumented_model_attributes_mode(capfire: CaptureLogfire, instr RetryPromptPart('retry_prompt2'), {}, # test unexpected parts # type: ignore ], + timestamp=IsDatetime(), ), ModelResponse(parts=[TextPart('text3')]), ] @@ -987,7 +991,7 @@ def __repr__(self): messages = [ ModelResponse(parts=[ToolCallPart('tool', {'arg': Foo()}, tool_call_id='tool_call_id')]), - ModelRequest(parts=[ToolReturnPart('tool', Bar(), tool_call_id='return_tool_call_id')]), + ModelRequest(parts=[ToolReturnPart('tool', Bar(), tool_call_id='return_tool_call_id')], timestamp=IsDatetime()), ] settings = InstrumentationSettings() @@ -1026,7 +1030,7 @@ def __repr__(self): def test_messages_to_otel_events_instructions(): messages = [ - ModelRequest(instructions='instructions', parts=[UserPromptPart('user_prompt')]), + ModelRequest(instructions='instructions', parts=[UserPromptPart('user_prompt')], timestamp=IsDatetime()), ModelResponse(parts=[TextPart('text1')]), ] settings = InstrumentationSettings() @@ -1052,9 +1056,9 @@ def test_messages_to_otel_events_instructions(): def test_messages_to_otel_events_instructions_multiple_messages(): messages = [ - ModelRequest(instructions='instructions', parts=[UserPromptPart('user_prompt')]), + ModelRequest(instructions='instructions', parts=[UserPromptPart('user_prompt')], timestamp=IsDatetime()), ModelResponse(parts=[TextPart('text1')]), - ModelRequest(instructions='instructions2', parts=[UserPromptPart('user_prompt2')]), + ModelRequest(instructions='instructions2', parts=[UserPromptPart('user_prompt2')], timestamp=IsDatetime()), ] settings = InstrumentationSettings() assert [InstrumentedModel.event_to_dict(e) for e in settings.messages_to_otel_events(messages)] == snapshot( @@ -1081,10 +1085,22 @@ def test_messages_to_otel_events_instructions_multiple_messages(): def test_messages_to_otel_events_image_url(document_content: BinaryContent): messages = [ - ModelRequest(parts=[UserPromptPart(content=['user_prompt', ImageUrl('https://example.com/image.png')])]), - ModelRequest(parts=[UserPromptPart(content=['user_prompt2', AudioUrl('https://example.com/audio.mp3')])]), - ModelRequest(parts=[UserPromptPart(content=['user_prompt3', DocumentUrl('https://example.com/document.pdf')])]), - ModelRequest(parts=[UserPromptPart(content=['user_prompt4', VideoUrl('https://example.com/video.mp4')])]), + ModelRequest( + parts=[UserPromptPart(content=['user_prompt', ImageUrl('https://example.com/image.png')])], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[UserPromptPart(content=['user_prompt2', AudioUrl('https://example.com/audio.mp3')])], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[UserPromptPart(content=['user_prompt3', DocumentUrl('https://example.com/document.pdf')])], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[UserPromptPart(content=['user_prompt4', VideoUrl('https://example.com/video.mp4')])], + timestamp=IsDatetime(), + ), ModelRequest( parts=[ UserPromptPart( @@ -1096,9 +1112,10 @@ def test_messages_to_otel_events_image_url(document_content: BinaryContent): VideoUrl('https://example.com/video2.mp4'), ] ) - ] + ], + timestamp=IsDatetime(), ), - ModelRequest(parts=[UserPromptPart(content=['user_prompt6', document_content])]), + ModelRequest(parts=[UserPromptPart(content=['user_prompt6', document_content])], timestamp=IsDatetime()), ModelResponse(parts=[TextPart('text1')]), ModelResponse(parts=[FilePart(content=document_content)]), ] @@ -1238,7 +1255,7 @@ def test_messages_to_otel_events_image_url(document_content: BinaryContent): def test_messages_to_otel_events_without_binary_content(document_content: BinaryContent): messages: list[ModelMessage] = [ - ModelRequest(parts=[UserPromptPart(content=['user_prompt6', document_content])]), + ModelRequest(parts=[UserPromptPart(content=['user_prompt6', document_content])], timestamp=IsDatetime()), ] settings = InstrumentationSettings(include_binary_content=False) assert [InstrumentedModel.event_to_dict(e) for e in settings.messages_to_otel_events(messages)] == snapshot( @@ -1266,7 +1283,7 @@ def test_messages_to_otel_events_without_binary_content(document_content: Binary def test_messages_without_content(document_content: BinaryContent): messages: list[ModelMessage] = [ - ModelRequest(parts=[SystemPromptPart('system_prompt')]), + ModelRequest(parts=[SystemPromptPart('system_prompt')], timestamp=IsDatetime()), ModelResponse(parts=[TextPart('text1')]), ModelRequest( parts=[ @@ -1280,13 +1297,17 @@ def test_messages_without_content(document_content: BinaryContent): document_content, ] ) - ] + ], + timestamp=IsDatetime(), ), ModelResponse(parts=[TextPart('text2'), ToolCallPart(tool_name='my_tool', args={'a': 13, 'b': 4})]), - ModelRequest(parts=[ToolReturnPart('tool', 'tool_return_content', 'tool_call_1')]), - ModelRequest(parts=[RetryPromptPart('retry_prompt', tool_name='tool', tool_call_id='tool_call_2')]), - ModelRequest(parts=[UserPromptPart(content=['user_prompt2', document_content])]), - ModelRequest(parts=[UserPromptPart('simple text prompt')]), + ModelRequest(parts=[ToolReturnPart('tool', 'tool_return_content', 'tool_call_1')], timestamp=IsDatetime()), + ModelRequest( + parts=[RetryPromptPart('retry_prompt', tool_name='tool', tool_call_id='tool_call_2')], + timestamp=IsDatetime(), + ), + ModelRequest(parts=[UserPromptPart(content=['user_prompt2', document_content])], timestamp=IsDatetime()), + ModelRequest(parts=[UserPromptPart('simple text prompt')], timestamp=IsDatetime()), ModelResponse(parts=[FilePart(content=document_content)]), ] settings = InstrumentationSettings(include_content=False) @@ -1460,7 +1481,7 @@ def test_deprecated_event_mode_warning(): async def test_response_cost_error(capfire: CaptureLogfire, monkeypatch: pytest.MonkeyPatch): model = InstrumentedModel(MyModel()) - messages: list[ModelMessage] = [ModelRequest(parts=[UserPromptPart('user_prompt')])] + messages: list[ModelMessage] = [ModelRequest(parts=[UserPromptPart('user_prompt')], timestamp=IsDatetime())] monkeypatch.setattr(ModelResponse, 'cost', None) with warns( @@ -1619,7 +1640,9 @@ def test_cache_point_in_user_prompt(): OpenTelemetry message parts output. """ messages: list[ModelMessage] = [ - ModelRequest(parts=[UserPromptPart(content=['text before', CachePoint(), 'text after'])]), + ModelRequest( + parts=[UserPromptPart(content=['text before', CachePoint(), 'text after'])], timestamp=IsDatetime() + ), ] settings = InstrumentationSettings() @@ -1641,7 +1664,8 @@ def test_cache_point_in_user_prompt(): ModelRequest( parts=[ UserPromptPart(content=['first', CachePoint(), 'second', CachePoint(), 'third']), - ] + ], + timestamp=IsDatetime(), ), ] assert settings.messages_to_otel_messages(messages_multi) == snapshot( @@ -1670,7 +1694,8 @@ def test_cache_point_in_user_prompt(): 'question', ] ), - ] + ], + timestamp=IsDatetime(), ), ] assert settings.messages_to_otel_messages(messages_mixed) == snapshot( diff --git a/tests/models/test_mcp_sampling.py b/tests/models/test_mcp_sampling.py index 1da0851c20..c4094ce818 100644 --- a/tests/models/test_mcp_sampling.py +++ b/tests/models/test_mcp_sampling.py @@ -11,7 +11,7 @@ from pydantic_ai.agent import Agent from pydantic_ai.exceptions import UnexpectedModelBehavior -from ..conftest import IsNow, IsStr, try_import +from ..conftest import IsDatetime, IsNow, IsStr, try_import with try_import() as imports_successful: from mcp import CreateMessageResult @@ -55,6 +55,7 @@ def test_assistant_text(): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -92,6 +93,7 @@ def test_assistant_text_history(): [ ModelRequest( parts=[UserPromptPart(content='1', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), instructions='testing', run_id=IsStr(), ), @@ -103,6 +105,7 @@ def test_assistant_text_history(): ), ModelRequest( parts=[UserPromptPart(content='2', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), instructions='testing', run_id=IsStr(), ), @@ -125,7 +128,8 @@ def test_assistant_text_history_complex(): content=['a string', BinaryContent(data=base64.b64encode(b'data'), media_type='image/jpeg')] ), SystemPromptPart(content='system content'), - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[TextPart(content='text content')], diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py index 424be8d39b..0d3a653b07 100644 --- a/tests/models/test_mistral.py +++ b/tests/models/test_mistral.py @@ -225,6 +225,7 @@ async def test_multiple_completions(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -241,16 +242,20 @@ async def test_multiple_completions(allow_model_requests: None): ), ModelRequest( parts=[UserPromptPart(content='hello again', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='hello again')], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -291,48 +296,60 @@ async def test_three_completions(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), ), ModelRequest( parts=[UserPromptPart(content='hello again', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='hello again')], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), ), ModelRequest( parts=[UserPromptPart(content='final message', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final message')], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -441,6 +458,7 @@ class CityLocation(BaseModel): [ ModelRequest( parts=[UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -453,10 +471,13 @@ class CityLocation(BaseModel): ], usage=RequestUsage(input_tokens=1, output_tokens=2), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -470,6 +491,7 @@ class CityLocation(BaseModel): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -510,6 +532,7 @@ class CityLocation(BaseModel): [ ModelRequest( parts=[UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -522,10 +545,13 @@ class CityLocation(BaseModel): ], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -539,6 +565,7 @@ class CityLocation(BaseModel): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -576,6 +603,7 @@ async def test_request_output_type_with_arguments_str_response(allow_model_reque SystemPromptPart(content='System prompt value', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -588,10 +616,13 @@ async def test_request_output_type_with_arguments_str_response(allow_model_reque ], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -605,6 +636,7 @@ async def test_request_output_type_with_arguments_str_response(allow_model_reque timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1132,6 +1164,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1144,10 +1177,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1161,6 +1197,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1173,10 +1210,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=3, output_tokens=2), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1190,16 +1230,20 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1292,6 +1336,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1304,10 +1349,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1321,6 +1369,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1333,10 +1382,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=3, output_tokens=2), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1350,6 +1402,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1362,10 +1415,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1379,6 +1435,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1442,7 +1499,7 @@ async def get_location(loc_name: str) -> str: v = [c async for c in result.stream_output(debounce_by=None)] assert v == snapshot([{'won': True}]) assert result.is_complete - assert result.timestamp() == datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc) + assert result.timestamp() == IsNow(tz=timezone.utc) assert result.usage().input_tokens == 4 assert result.usage().output_tokens == 4 @@ -1456,6 +1513,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1468,10 +1526,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=2), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='tool_call', run_id=IsStr(), @@ -1485,16 +1546,20 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args='{"won": true}', tool_call_id='1')], usage=RequestUsage(input_tokens=2, output_tokens=2), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='tool_call', run_id=IsStr(), @@ -1508,6 +1573,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1558,7 +1624,7 @@ async def get_location(loc_name: str) -> str: v = [c async for c in result.stream_output(debounce_by=None)] assert v == snapshot(['final ', 'final response']) assert result.is_complete - assert result.timestamp() == datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc) + assert result.timestamp() == IsNow(tz=timezone.utc) assert result.usage().input_tokens == 6 assert result.usage().output_tokens == 6 @@ -1572,6 +1638,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1584,10 +1651,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=2), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='tool_call', run_id=IsStr(), @@ -1601,16 +1671,20 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], usage=RequestUsage(input_tokens=4, output_tokens=4), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='stop', run_id=IsStr(), @@ -1676,7 +1750,7 @@ async def get_location(loc_name: str) -> str: v = [c async for c in result.stream_text(debounce_by=None)] assert v == snapshot(['final ', 'final response']) assert result.is_complete - assert result.timestamp() == datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc) + assert result.timestamp() == IsNow(tz=timezone.utc) assert result.usage().input_tokens == 7 assert result.usage().output_tokens == 7 @@ -1690,6 +1764,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1702,10 +1777,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=2), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='tool_call', run_id=IsStr(), @@ -1719,6 +1797,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1731,10 +1810,13 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='tool_call', run_id=IsStr(), @@ -1748,16 +1830,20 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], usage=RequestUsage(input_tokens=4, output_tokens=4), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='stop', run_id=IsStr(), @@ -1929,6 +2015,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1938,7 +2025,10 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 11, 28, 2, 19, 58, tzinfo=timezone.utc), + }, provider_response_id='412174432ea945889703eac58b44ae35', finish_reason='tool_call', run_id=IsStr(), @@ -1959,6 +2049,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1972,7 +2063,10 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 11, 28, 2, 20, 5, tzinfo=timezone.utc), + }, provider_response_id='049b5c7704554d3396e727a95cb6d947', finish_reason='stop', run_id=IsStr(), @@ -2008,6 +2102,7 @@ async def test_image_url_input(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2017,7 +2112,10 @@ async def test_image_url_input(allow_model_requests: None): timestamp=IsDatetime(), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -2050,6 +2148,7 @@ async def test_image_as_binary_content_input(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2059,7 +2158,10 @@ async def test_image_as_binary_content_input(allow_model_requests: None): timestamp=IsDatetime(), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -2095,6 +2197,7 @@ async def test_pdf_url_input(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2104,7 +2207,10 @@ async def test_pdf_url_input(allow_model_requests: None): timestamp=IsDatetime(), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -2134,6 +2240,7 @@ async def test_pdf_as_binary_content_input(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2143,7 +2250,10 @@ async def test_pdf_as_binary_content_input(allow_model_requests: None): timestamp=IsDatetime(), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -2230,6 +2340,7 @@ async def test_mistral_model_instructions(allow_model_requests: None, mistral_ap [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -2240,7 +2351,10 @@ async def test_mistral_model_instructions(allow_model_requests: None, mistral_ap timestamp=IsDatetime(), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -2260,6 +2374,7 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2279,7 +2394,10 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 5, 22, 29, 38, tzinfo=timezone.utc), + }, provider_response_id='resp_68bb6452990081968f5aff503a55e3b903498c8aa840cf12', finish_reason='stop', run_id=IsStr(), @@ -2302,6 +2420,7 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2314,7 +2433,10 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap timestamp=IsDatetime(), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 5, 22, 30, tzinfo=timezone.utc), + }, provider_response_id='9abe8b736bff46af8e979b52334a57cd', finish_reason='stop', run_id=IsStr(), @@ -2345,6 +2467,7 @@ async def test_mistral_model_thinking_part_iter(allow_model_requests: None, mist timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2379,7 +2502,10 @@ async def test_mistral_model_thinking_part_iter(allow_model_requests: None, mist timestamp=IsDatetime(), provider_name='mistral', provider_url='https://api.mistral.ai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 11, 28, 2, 19, 53, tzinfo=timezone.utc), + }, provider_response_id='9f9d90210f194076abeee223863eaaf0', finish_reason='stop', run_id=IsStr(), diff --git a/tests/models/test_model_function.py b/tests/models/test_model_function.py index 196b140454..f774682490 100644 --- a/tests/models/test_model_function.py +++ b/tests/models/test_model_function.py @@ -27,7 +27,7 @@ from pydantic_ai.result import RunUsage from pydantic_ai.usage import RequestUsage -from ..conftest import IsNow, IsStr +from ..conftest import IsDatetime, IsNow, IsStr pytestmark = pytest.mark.anyio @@ -68,6 +68,7 @@ def test_simple(): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -86,6 +87,7 @@ def test_simple(): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -97,6 +99,7 @@ def test_simple(): ), ModelRequest( parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -167,6 +170,7 @@ def test_weather(): [ ModelRequest( parts=[UserPromptPart(content='London', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -189,6 +193,7 @@ def test_weather(): tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -207,6 +212,7 @@ def test_weather(): tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -371,6 +377,7 @@ def test_call_all(): SystemPromptPart(content='foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -404,6 +411,7 @@ def test_call_all(): tool_name='quz', content='a', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -477,6 +485,7 @@ async def test_stream_text(): [ ModelRequest( parts=[UserPromptPart(content='', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_model_test.py b/tests/models/test_model_test.py index f7a6809a71..ae357f481c 100644 --- a/tests/models/test_model_test.py +++ b/tests/models/test_model_test.py @@ -34,7 +34,7 @@ from pydantic_ai.models.test import TestModel, _chars, _JsonSchemaTestData # pyright: ignore[reportPrivateUsage] from pydantic_ai.usage import RequestUsage, RunUsage -from ..conftest import IsNow, IsStr +from ..conftest import IsDatetime, IsNow, IsStr def test_call_one(): @@ -78,6 +78,7 @@ def test_custom_output_args(): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -102,6 +103,7 @@ def test_custom_output_args(): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -125,6 +127,7 @@ class Foo(BaseModel): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -149,6 +152,7 @@ class Foo(BaseModel): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -168,6 +172,7 @@ def test_output_type(): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -192,6 +197,7 @@ def test_output_type(): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -218,6 +224,7 @@ async def my_ret(x: int) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -236,6 +243,7 @@ async def my_ret(x: int) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -251,6 +259,7 @@ async def my_ret(x: int) -> str: tool_name='my_ret', content='1', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index 74cb3c1414..b6599965bc 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -125,30 +125,38 @@ async def test_request_simple_success(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='openai', provider_url='https://api.openai.com/v1', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), ), ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='openai', provider_url='https://api.openai.com/v1', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -195,6 +203,42 @@ async def test_request_simple_usage(allow_model_requests: None): ) +async def test_response_with_created_timestamp_but_no_provider_details(allow_model_requests: None): + class MinimalOpenAIChatModel(OpenAIChatModel): + def _process_provider_details(self, response: chat.ChatCompletion) -> dict[str, Any] | None: + return None + + c = completion_message(ChatCompletionMessage(content='world', role='assistant')) + mock_client = MockOpenAI.create_mock(c) + m = MinimalOpenAIChatModel('gpt-4o', provider=OpenAIProvider(openai_client=mock_client)) + agent = Agent(m) + + result = await agent.run('hello') + assert result.output == 'world' + assert result.all_messages() == snapshot( + [ + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), + ModelResponse( + parts=[TextPart(content='world')], + model_name='gpt-4o-123', + timestamp=IsNow(tz=timezone.utc), + provider_name='openai', + provider_url='https://api.openai.com/v1', + provider_details={ + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, + provider_response_id='123', + finish_reason='stop', + run_id=IsStr(), + ), + ] + ) + + async def test_openai_chat_image_detail_vendor_metadata(allow_model_requests: None): c = completion_message( ChatCompletionMessage(content='done', role='assistant'), @@ -240,6 +284,7 @@ async def test_request_structured_response(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -251,10 +296,13 @@ async def test_request_structured_response(allow_model_requests: None): ) ], model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -268,6 +316,7 @@ async def test_request_structured_response(allow_model_requests: None): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -336,6 +385,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -352,10 +402,13 @@ async def get_location(loc_name: str) -> str: output_tokens=1, ), model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -369,6 +422,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -385,10 +439,13 @@ async def get_location(loc_name: str) -> str: output_tokens=2, ), model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -402,15 +459,19 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -484,7 +545,10 @@ async def test_stream_text_finish_reason(allow_model_requests: None): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', ) @@ -899,6 +963,7 @@ async def get_image() -> ImageUrl: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -917,7 +982,10 @@ async def get_image() -> ImageUrl: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 4, 29, 21, 7, 59, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BRmTHlrARTzAHK1na9s80xDlQGYPX', finish_reason='tool_call', run_id=IsStr(), @@ -941,6 +1009,7 @@ async def get_image() -> ImageUrl: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -959,7 +1028,10 @@ async def get_image() -> ImageUrl: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 29, 21, 8, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BRmTI0Y2zmkGw27kLarhsmiFQTGxR', finish_reason='stop', run_id=IsStr(), @@ -988,6 +1060,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1006,7 +1079,10 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 4, 29, 20, 21, 33, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BRlkLhPc87BdohVobEJJCGq3rUAG2', finish_reason='tool_call', run_id=IsStr(), @@ -1027,6 +1103,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1045,7 +1122,10 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 29, 20, 21, 36, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BRlkORPA5rXMV3uzcOcgK4eQFKCVW', finish_reason='stop', run_id=IsStr(), @@ -1251,6 +1331,7 @@ async def test_message_history_can_start_with_model_response(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1269,7 +1350,10 @@ async def test_message_history_can_start_with_model_response(allow_model_request timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 11, 22, 10, 1, 40, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Ceeiy4ivEE0hcL1EX5ZfLuW5xNUXB', finish_reason='stop', run_id=IsStr(), @@ -2056,6 +2140,7 @@ async def test_openai_instructions(allow_model_requests: None, openai_api_key: s [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -2075,7 +2160,10 @@ async def test_openai_instructions(allow_model_requests: None, openai_api_key: s timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 7, 16, 30, 56, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BJjf61mLb9z5H45ClJzbx0UWKwjo1', finish_reason='stop', run_id=IsStr(), @@ -2106,6 +2194,7 @@ async def get_temperature(city: str) -> float: [ ModelRequest( parts=[UserPromptPart(content='What is the temperature in Tokyo?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -2125,7 +2214,10 @@ async def get_temperature(city: str) -> float: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 4, 16, 13, 37, 14, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BMxEwRA0p0gJ52oKS7806KAlfMhqq', finish_reason='tool_call', run_id=IsStr(), @@ -2136,6 +2228,7 @@ async def get_temperature(city: str) -> float: tool_name='get_temperature', content=20.0, tool_call_id=IsStr(), timestamp=IsDatetime() ) ], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -2155,7 +2248,10 @@ async def get_temperature(city: str) -> float: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 16, 13, 37, 15, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BMxEx6B8JEj6oDC45MOWKp0phg8UP', finish_reason='stop', run_id=IsStr(), @@ -2175,6 +2271,7 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2193,7 +2290,10 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 10, 22, 21, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_68c1fa0523248197888681b898567bde093f57e27128848a', finish_reason='stop', run_id=IsStr(), @@ -2215,6 +2315,7 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2233,7 +2334,10 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 10, 22, 22, 24, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-CENUmtwDD0HdvTUYL6lUeijDtxrZL', finish_reason='stop', run_id=IsStr(), @@ -2498,7 +2602,10 @@ def test_openai_response_timestamp_milliseconds(allow_model_requests: None): result = agent.run_sync('Hello') response = cast(ModelResponse, result.all_messages()[-1]) - assert response.timestamp == snapshot(datetime(2025, 6, 1, 3, 7, 48, tzinfo=timezone.utc)) + assert response.timestamp == IsNow(tz=timezone.utc) + assert response.provider_details == snapshot( + {'finish_reason': 'stop', 'timestamp': datetime(2025, 6, 1, 3, 7, 48, tzinfo=timezone.utc)} + ) async def test_openai_tool_output(allow_model_requests: None, openai_api_key: str): @@ -2526,6 +2633,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2544,7 +2652,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 5, 1, 23, 36, 24, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BSXk0dWkG4hfPt0lph4oFO35iT73I', finish_reason='tool_call', run_id=IsStr(), @@ -2558,6 +2669,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2582,7 +2694,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 5, 1, 23, 36, 25, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BSXk1xGHYzbhXgUkSutK08bdoNv5s', finish_reason='tool_call', run_id=IsStr(), @@ -2596,6 +2711,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -2626,6 +2742,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2646,7 +2763,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 6, 9, 21, 20, 53, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BgeDFS85bfHosRFEEAvq8reaCPCZ8', finish_reason='tool_call', run_id=IsStr(), @@ -2660,6 +2780,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2678,7 +2799,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 6, 9, 21, 20, 54, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BgeDGX9eDyVrEI56aP2vtIHahBzFH', finish_reason='stop', run_id=IsStr(), @@ -2714,6 +2838,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2734,7 +2859,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 5, 1, 23, 36, 22, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BSXjyBwGuZrtuuSzNCeaWMpGv2MZ3', finish_reason='tool_call', run_id=IsStr(), @@ -2748,6 +2876,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2766,7 +2895,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 5, 1, 23, 36, 23, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BSXjzYGu67dhTy5r8KmjJvQ4HhDVO', finish_reason='stop', run_id=IsStr(), @@ -2804,6 +2936,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2824,7 +2957,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 6, 9, 23, 21, 26, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgg5utuCSXMQ38j0n2qgfdQKcR9VD', finish_reason='tool_call', run_id=IsStr(), @@ -2838,6 +2974,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2860,7 +2997,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 6, 9, 23, 21, 27, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgg5vrxUtCDlvgMreoxYxPaKxANmd', finish_reason='stop', run_id=IsStr(), @@ -2894,6 +3034,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2914,7 +3055,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 6, 10, 0, 21, 35, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgh27PeOaFW6qmF04qC5uI2H9mviw', finish_reason='tool_call', run_id=IsStr(), @@ -2928,6 +3072,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2946,7 +3091,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 6, 10, 0, 21, 36, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgh28advCSFhGHPnzUevVS6g6Uwg0', finish_reason='stop', run_id=IsStr(), @@ -2984,6 +3132,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -3004,7 +3153,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 6, 10, 0, 21, 38, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgh2AW2NXGgMc7iS639MJXNRgtatR', finish_reason='tool_call', run_id=IsStr(), @@ -3018,6 +3170,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -3040,7 +3193,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 6, 10, 0, 21, 39, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgh2BthuopRnSqCuUgMbBnOqgkDHC', finish_reason='stop', run_id=IsStr(), diff --git a/tests/models/test_openai_responses.py b/tests/models/test_openai_responses.py index b03e99bb91..16041fd003 100644 --- a/tests/models/test_openai_responses.py +++ b/tests/models/test_openai_responses.py @@ -1,6 +1,7 @@ import json import re from dataclasses import replace +from datetime import datetime, timezone from typing import Any, Literal, cast import pytest @@ -50,7 +51,7 @@ from pydantic_ai.tools import ToolDefinition from pydantic_ai.usage import RequestUsage, RunUsage -from ..conftest import IsBytes, IsDatetime, IsStr, TestEnv, try_import +from ..conftest import IsBytes, IsDatetime, IsNow, IsStr, TestEnv, try_import from .mock_openai import MockOpenAIResponses, get_mock_responses_kwargs, response_message with try_import() as imports_successful: @@ -286,6 +287,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -308,7 +310,10 @@ async def get_location(loc_name: str) -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 3, 27, 12, 42, 44, tzinfo=timezone.utc), + }, provider_response_id='resp_67e547c48c9481918c5c4394464ce0c60ae6111e84dd5c08', finish_reason='stop', run_id=IsStr(), @@ -328,6 +333,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -346,7 +352,10 @@ async def get_location(loc_name: str) -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 3, 27, 12, 42, 45, tzinfo=timezone.utc), + }, provider_response_id='resp_67e547c5a2f08191802a1f43620f348503a2086afed73b47', finish_reason='stop', run_id=IsStr(), @@ -376,6 +385,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -392,7 +402,10 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 4, 29, 20, 21, 39, tzinfo=timezone.utc), + }, provider_response_id='resp_681134d3aa3481919ca581a267db1e510fe7a5a4e2123dc3', finish_reason='stop', run_id=IsStr(), @@ -413,6 +426,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -427,7 +441,10 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 4, 29, 20, 21, 41, tzinfo=timezone.utc), + }, provider_response_id='resp_681134d53c48819198ce7b89db78dffd02cbfeaababb040c', finish_reason='stop', run_id=IsStr(), @@ -530,7 +547,10 @@ async def get_capital(country: str) -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 3, 27, 13, 37, 38, tzinfo=timezone.utc), + }, provider_response_id='resp_67e554a21aa88191b65876ac5e5bbe0406c52f0e511c76ed', finish_reason='stop', ) @@ -564,6 +584,7 @@ async def test_openai_responses_model_builtin_tools_web_search(allow_model_reque timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -709,7 +730,10 @@ async def test_openai_responses_model_builtin_tools_web_search(allow_model_reque timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 23, 19, 54, tzinfo=timezone.utc), + }, provider_response_id='resp_0e3d55e9502941380068c4aa9a62f48195a373978ed720ac63', finish_reason='stop', run_id=IsStr(), @@ -728,6 +752,7 @@ async def test_openai_responses_model_instructions(allow_model_requests: None, o [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -743,7 +768,10 @@ async def test_openai_responses_model_instructions(allow_model_requests: None, o timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 4, 7, 16, 31, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_67f3fdfd9fa08191a3d5825db81b8df6003bc73febb56d77', finish_reason='stop', run_id=IsStr(), @@ -766,6 +794,7 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -808,7 +837,10 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 16, 20, 27, 26, tzinfo=timezone.utc), + }, provider_response_id='resp_028829e50fbcad090068c9c82e1e0081958ddc581008b39428', finish_reason='stop', run_id=IsStr(), @@ -827,6 +859,7 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -869,7 +902,10 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 16, 20, 27, 39, tzinfo=timezone.utc), + }, provider_response_id='resp_028829e50fbcad090068c9c83b9fb88195b6b84a32e1fc83c0', finish_reason='stop', run_id=IsStr(), @@ -898,6 +934,7 @@ async def test_openai_responses_model_web_search_tool_with_user_location( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -940,7 +977,10 @@ async def test_openai_responses_model_web_search_tool_with_user_location( timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 23, 21, 23, tzinfo=timezone.utc), + }, provider_response_id='resp_0b385a0fdc82fd920068c4aaf3ced88197a88711e356b032c4', finish_reason='stop', run_id=IsStr(), @@ -970,6 +1010,7 @@ async def test_openai_responses_model_web_search_tool_with_invalid_region( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -1012,7 +1053,10 @@ async def test_openai_responses_model_web_search_tool_with_invalid_region( timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 23, 21, 47, tzinfo=timezone.utc), + }, provider_response_id='resp_0b4f29854724a3120068c4ab0b660081919707b95b47552782', finish_reason='stop', run_id=IsStr(), @@ -1049,6 +1093,7 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -1097,7 +1142,10 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 16, 21, 13, 32, tzinfo=timezone.utc), + }, provider_response_id='resp_00a60507bf41223d0068c9d2fbf93481a0ba2a7796ae2cab4c', finish_reason='stop', run_id=IsStr(), @@ -1269,6 +1317,7 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -1317,7 +1366,10 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 16, 21, 13, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_00a60507bf41223d0068c9d31574d881a090c232646860a771', finish_reason='stop', run_id=IsStr(), @@ -1410,6 +1462,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1426,7 +1479,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 43, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0b40a8819cb8d55594bc2c232a001fd29e2d5573f7', finish_reason='stop', run_id=IsStr(), @@ -1440,6 +1496,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1456,7 +1513,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 44, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0bfda8819ea65458cd7cc389b801dc81d4bc91f560', finish_reason='stop', run_id=IsStr(), @@ -1470,6 +1530,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1501,6 +1562,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1517,7 +1579,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 45, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0d9494819ea4f123bba707c9ee0356a60c98816d6a', finish_reason='stop', run_id=IsStr(), @@ -1531,6 +1596,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1545,7 +1611,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 46, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0e2b28819d9c828ef4ee526d6a03434b607c02582d', finish_reason='stop', run_id=IsStr(), @@ -1582,6 +1651,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1598,7 +1668,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 47, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0f220081a1a621d6bcdc7f31a50b8591d9001d2329', finish_reason='stop', run_id=IsStr(), @@ -1612,6 +1685,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1626,7 +1700,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 47, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0fde708192989000a62809c6e5020197534e39cc1f', finish_reason='stop', run_id=IsStr(), @@ -1665,6 +1742,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1681,7 +1759,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 48, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f10f2d081a39b3438f413b3bafc0dd57d732903c563', finish_reason='stop', run_id=IsStr(), @@ -1695,6 +1776,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1709,7 +1791,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 49, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f119830819da162aa6e10552035061ad97e2eef7871', finish_reason='stop', run_id=IsStr(), @@ -1744,6 +1829,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1760,7 +1846,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 13, 11, 46, tzinfo=timezone.utc), + }, provider_response_id='resp_68482f12d63881a1830201ed101ecfbf02f8ef7f2fb42b50', finish_reason='stop', run_id=IsStr(), @@ -1774,6 +1863,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1788,7 +1878,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 13, 11, 55, tzinfo=timezone.utc), + }, provider_response_id='resp_68482f1b556081918d64c9088a470bf0044fdb7d019d4115', finish_reason='stop', run_id=IsStr(), @@ -1827,6 +1920,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1843,7 +1937,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 13, 11, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_68482f1d38e081a1ac828acda978aa6b08e79646fe74d5ee', finish_reason='stop', run_id=IsStr(), @@ -1857,6 +1954,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1871,7 +1969,10 @@ async def get_user_country() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 13, 12, 8, tzinfo=timezone.utc), + }, provider_response_id='resp_68482f28c1b081a1ae73cbbee012ee4906b4ab2d00d03024', finish_reason='stop', run_id=IsStr(), @@ -2074,6 +2175,7 @@ async def test_openai_responses_usage_without_tokens_details(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2083,6 +2185,7 @@ async def test_openai_responses_usage_without_tokens_details(allow_model_request timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -2104,6 +2207,7 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2129,7 +2233,10 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 22, 8, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42c902794819cb9335264c342f65407460311b0c8d3de', finish_reason='stop', run_id=IsStr(), @@ -2150,6 +2257,7 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2174,7 +2282,10 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 22, 43, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42cb3d520819c9d28b07036e9059507460311b0c8d3de', finish_reason='stop', run_id=IsStr(), @@ -2203,6 +2314,7 @@ async def test_openai_responses_thinking_part_from_other_model( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2254,6 +2366,7 @@ async def test_openai_responses_thinking_part_from_other_model( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2276,7 +2389,10 @@ async def test_openai_responses_thinking_part_from_other_model( timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 23, 30, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42ce277ac8193ba08881bcefabaf70ad492c7955fc6fc', finish_reason='stop', run_id=IsStr(), @@ -2308,6 +2424,7 @@ async def test_openai_responses_thinking_part_iter(allow_model_requests: None, o timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2340,7 +2457,10 @@ async def test_openai_responses_thinking_part_iter(allow_model_requests: None, o timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 24, 15, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42d0fb418819dbfa579f69406b49508fbf9b1584184ff', finish_reason='stop', run_id=IsStr(), @@ -2386,6 +2506,7 @@ def update_plan(plan: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions="You are a helpful assistant that uses planning. You MUST use the update_plan tool and continually update it as you make progress against the user's prompt", run_id=IsStr(), ), @@ -2413,7 +2534,10 @@ def update_plan(plan: str) -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 24, 40, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42d28772c819684459966ee2201ed0e8bc41441c948f6', finish_reason='stop', run_id=IsStr(), @@ -2427,6 +2551,7 @@ def update_plan(plan: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions="You are a helpful assistant that uses planning. You MUST use the update_plan tool and continually update it as you make progress against the user's prompt", run_id=IsStr(), ), @@ -2439,7 +2564,10 @@ def update_plan(plan: str) -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 25, 3, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42d3fd6a08196bce23d6be960ff8a0e8bc41441c948f6', finish_reason='stop', run_id=IsStr(), @@ -2480,6 +2608,7 @@ async def test_openai_responses_thinking_without_summary(allow_model_requests: N timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2491,6 +2620,7 @@ async def test_openai_responses_thinking_without_summary(allow_model_requests: N timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -2554,6 +2684,7 @@ async def test_openai_responses_thinking_with_multiple_summaries(allow_model_req timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2568,6 +2699,7 @@ async def test_openai_responses_thinking_with_multiple_summaries(allow_model_req timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -2620,6 +2752,7 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2637,7 +2770,10 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 27, 43, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42ddf9bbc8194aa7b97304dd909cb0202c9ad459e0d23', finish_reason='stop', run_id=IsStr(), @@ -2674,6 +2810,7 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2691,7 +2828,10 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 27, 48, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42de4afcc819f995a1c59fe87c9d5051f82c608a83beb', finish_reason='stop', run_id=IsStr(), @@ -2723,6 +2863,7 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2776,7 +2917,10 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 17, 21, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdba511c7081a389e67b16621029c609b7445677780c8f', finish_reason='stop', run_id=IsStr(), @@ -2795,6 +2939,7 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2812,7 +2957,10 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 17, 46, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdba6a610481a3b4533f345bea8a7b09b7445677780c8f', finish_reason='stop', run_id=IsStr(), @@ -2850,6 +2998,7 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2911,7 +3060,10 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 11, 22, 43, 36, tzinfo=timezone.utc), + }, provider_response_id='resp_68c35098e6fc819e80fb94b25b7d031b0f2d670b80edc507', finish_reason='stop', run_id=IsStr(), @@ -3671,6 +3823,7 @@ def get_meaning_of_life() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3687,7 +3840,10 @@ def get_meaning_of_life() -> int: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 18, 18, 29, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_68cc4fa5603481958e2143685133fe530548824120ffcf74', finish_reason='stop', run_id=IsStr(), @@ -3701,6 +3857,7 @@ def get_meaning_of_life() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3719,7 +3876,10 @@ def get_meaning_of_life() -> int: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 18, 18, 29, 58, tzinfo=timezone.utc), + }, provider_response_id='resp_68cc4fa6a8a881a187b0fe1603057bff0307c6d4d2ee5985', finish_reason='stop', run_id=IsStr(), @@ -3782,6 +3942,7 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3852,7 +4013,10 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 56, 34, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdc382bc98819083a5b47ec92e077b0187028ba77f15f7', finish_reason='stop', run_id=IsStr(), @@ -3878,6 +4042,7 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4012,7 +4177,10 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 57, 1, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdc39da72481909e0512fef9d646240187028ba77f15f7', finish_reason='stop', run_id=IsStr(), @@ -4056,6 +4224,7 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4098,7 +4267,10 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 20, 47, 35, tzinfo=timezone.utc), + }, provider_response_id='resp_06c1a26fd89d07f20068dd9367869c819788cb28e6f19eff9b', finish_reason='stop', run_id=IsStr(), @@ -5545,6 +5717,7 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5593,7 +5766,10 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 57, 58, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -5618,6 +5794,7 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5666,7 +5843,10 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 59, 28, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -5713,6 +5893,7 @@ async def test_openai_responses_image_generation_stream(allow_model_requests: No timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5759,7 +5940,10 @@ async def test_openai_responses_image_generation_stream(allow_model_requests: No timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 20, 40, 2, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -5894,6 +6078,7 @@ async def test_openai_responses_image_generation_tool_without_image_output( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5939,7 +6124,10 @@ async def test_openai_responses_image_generation_tool_without_image_output( timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 23, 49, 51, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdec1f3290819f99d9caba8703b251079003437d26d0c0', finish_reason='stop', run_id=IsStr(), @@ -5952,6 +6140,7 @@ async def test_openai_responses_image_generation_tool_without_image_output( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5997,7 +6186,10 @@ async def test_openai_responses_image_generation_tool_without_image_output( timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 23, 50, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdec61d0a0819fac14ed057a9946a1079003437d26d0c0', finish_reason='stop', run_id=IsStr(), @@ -6059,6 +6251,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6102,7 +6295,10 @@ class Animal(BaseModel): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 19, 38, 16, tzinfo=timezone.utc), + }, provider_response_id='resp_0360827931d9421b0068dd8328c08c81a0ba854f245883906f', finish_reason='stop', run_id=IsStr(), @@ -6115,6 +6311,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6137,7 +6334,10 @@ class Animal(BaseModel): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 19, 39, 28, tzinfo=timezone.utc), + }, provider_response_id='resp_0360827931d9421b0068dd8370a70081a09d6de822ee43bbc4', finish_reason='stop', run_id=IsStr(), @@ -6151,6 +6351,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6176,6 +6377,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6222,7 +6424,10 @@ class Animal(BaseModel): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 19, 41, 59, tzinfo=timezone.utc), + }, provider_response_id='resp_09b7ce6df817433c0068dd8407c37881a0ad817ef3cc3a3600', finish_reason='stop', run_id=IsStr(), @@ -6250,6 +6455,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6296,7 +6502,10 @@ class Animal(BaseModel): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 19, 55, 9, tzinfo=timezone.utc), + }, provider_response_id='resp_0d14a5e3c26c21180068dd871d439081908dc36e63fab0cedf', finish_reason='stop', run_id=IsStr(), @@ -6330,6 +6539,7 @@ async def get_animal() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6352,7 +6562,10 @@ async def get_animal() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 20, 2, 36, tzinfo=timezone.utc), + }, provider_response_id='resp_0481074da98340df0068dd88dceb1481918b1d167d99bc51cd', finish_reason='stop', run_id=IsStr(), @@ -6366,6 +6579,7 @@ async def get_animal() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6403,7 +6617,10 @@ async def get_animal() -> str: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 20, 2, 56, tzinfo=timezone.utc), + }, provider_response_id='resp_0481074da98340df0068dd88f0ba04819185a168065ef28040', finish_reason='stop', run_id=IsStr(), @@ -6434,6 +6651,7 @@ async def test_openai_responses_multiple_images(allow_model_requests: None, open timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6507,7 +6725,10 @@ async def test_openai_responses_multiple_images(allow_model_requests: None, open timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 19, 28, 22, tzinfo=timezone.utc), + }, provider_response_id='resp_0b6169df6e16e9690068dd80d64aec81919c65f238307673bb', finish_reason='stop', run_id=IsStr(), @@ -6538,6 +6759,7 @@ async def test_openai_responses_image_generation_jpeg(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6581,7 +6803,10 @@ async def test_openai_responses_image_generation_jpeg(allow_model_requests: None timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 21, 28, 13, tzinfo=timezone.utc), + }, provider_response_id='resp_08acbdf1ae54befc0068dd9ced226c8197a2e974b29c565407', finish_reason='stop', run_id=IsStr(), @@ -6642,6 +6867,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6664,7 +6890,10 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 13, 11, 30, 47, tzinfo=timezone.utc), + }, provider_response_id='resp_001fd29e2d5573f70068ece2e6dfbc819c96557f0de72802be', finish_reason='stop', run_id=IsStr(), @@ -6678,6 +6907,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6713,6 +6943,7 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -6839,7 +7070,10 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 23, 23, 42, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_0083938b3a28070e0068fabd81970881a0a1195f2cab45bd04', finish_reason='stop', run_id=IsStr(), @@ -6858,6 +7092,7 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -6887,7 +7122,10 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 23, 23, 43, 25, tzinfo=timezone.utc), + }, provider_response_id='resp_0083938b3a28070e0068fabd9d414881a089cf24784f80e021', finish_reason='stop', run_id=IsStr(), @@ -6936,6 +7174,7 @@ async def test_openai_responses_model_mcp_server_tool_stream(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -7108,7 +7347,10 @@ async def test_openai_responses_model_mcp_server_tool_stream(allow_model_request timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 23, 21, 40, 50, tzinfo=timezone.utc), + }, provider_response_id='resp_00b9cc7a23d047270068faa0e25934819f9c3bfdec80065bc4', finish_reason='stop', run_id=IsStr(), @@ -7327,6 +7569,7 @@ async def test_openai_responses_model_mcp_server_tool_with_connector(allow_model parts=[ UserPromptPart(content='What do I have on my Google Calendar for today?', timestamp=IsDatetime()) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -7487,7 +7730,10 @@ async def test_openai_responses_model_mcp_server_tool_with_connector(allow_model timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 23, 21, 41, 13, tzinfo=timezone.utc), + }, provider_response_id='resp_0558010cf1416a490068faa0f945bc81a0b6a6dfb7391030d5', finish_reason='stop', run_id=IsStr(), @@ -7581,6 +7827,7 @@ async def test_openai_responses_raw_cot_only(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7597,6 +7844,7 @@ async def test_openai_responses_raw_cot_only(allow_model_requests: None): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7645,6 +7893,7 @@ async def test_openai_responses_raw_cot_with_summary(allow_model_requests: None) timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7662,6 +7911,7 @@ async def test_openai_responses_raw_cot_with_summary(allow_model_requests: None) timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7712,6 +7962,7 @@ async def test_openai_responses_multiple_summaries(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7731,6 +7982,7 @@ async def test_openai_responses_multiple_summaries(allow_model_requests: None): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7761,6 +8013,7 @@ async def test_openai_responses_raw_cot_stream_openrouter(allow_model_requests: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7785,7 +8038,10 @@ async def test_openai_responses_raw_cot_stream_openrouter(allow_model_requests: timestamp=IsDatetime(), provider_name='openrouter', provider_url='https://openrouter.ai/api/v1', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 11, 27, 17, 43, 31, tzinfo=timezone.utc), + }, provider_response_id='gen-1764265411-Fu1iEX7h5MRWiL79lb94', finish_reason='stop', run_id=IsStr(), @@ -7900,6 +8156,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7916,6 +8173,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7933,6 +8191,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7949,6 +8208,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7959,6 +8219,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7977,6 +8238,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7994,6 +8256,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -8010,6 +8273,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -8020,6 +8284,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -8038,6 +8303,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -8048,6 +8314,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -8064,6 +8331,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -8193,6 +8461,7 @@ async def test_web_search_call_action_find_in_page(allow_model_requests: None): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ) diff --git a/tests/models/test_openrouter.py b/tests/models/test_openrouter.py index f3fe1ae095..acad3003ef 100644 --- a/tests/models/test_openrouter.py +++ b/tests/models/test_openrouter.py @@ -1,3 +1,4 @@ +import datetime from collections.abc import Sequence from typing import Literal, cast @@ -96,7 +97,20 @@ async def test_openrouter_stream_with_native_options(allow_model_requests: None, _ = [chunk async for chunk in stream] - assert stream.provider_details == snapshot({'finish_reason': 'completed', 'downstream_provider': 'xAI'}) + assert stream.provider_details is not None + assert stream.provider_details == snapshot( + { + 'timestamp': datetime.datetime(2025, 11, 2, 6, 14, 57, tzinfo=datetime.timezone.utc), + 'finish_reason': 'completed', + 'cost': 0.00333825, + 'upstream_inference_cost': None, + 'is_byok': False, + 'downstream_provider': 'xAI', + } + ) + # Explicitly verify native_finish_reason is 'completed' and wasn't overwritten by the + # final usage chunk (which has native_finish_reason: null, see cassette for details) + assert stream.provider_details['finish_reason'] == 'completed' assert stream.finish_reason == snapshot('stop') @@ -327,6 +341,40 @@ async def test_openrouter_validate_error_response(openrouter_api_key: str) -> No ) +async def test_openrouter_with_provider_details_but_no_parent_details(openrouter_api_key: str) -> None: + from typing import Any + + class TestOpenRouterModel(OpenRouterModel): + def _process_provider_details(self, response: ChatCompletion) -> dict[str, Any] | None: + from pydantic_ai.models.openrouter import ( + _map_openrouter_provider_details, # pyright: ignore[reportPrivateUsage] + _OpenRouterChatCompletion, # pyright: ignore[reportPrivateUsage] + ) + + assert isinstance(response, _OpenRouterChatCompletion) + openrouter_details = _map_openrouter_provider_details(response) + return openrouter_details or None + + provider = OpenRouterProvider(api_key=openrouter_api_key) + model = TestOpenRouterModel('google/gemini-2.0-flash-exp:free', provider=provider) + + choice = Choice.model_construct( + index=0, message={'role': 'assistant', 'content': 'test'}, finish_reason='stop', native_finish_reason='stop' + ) + response = ChatCompletion.model_construct( + id='test', choices=[choice], created=1704067200, object='chat.completion', model='test', provider='TestProvider' + ) + result = model._process_response(response) # type: ignore[reportPrivateUsage] + + assert result.provider_details == snapshot( + { + 'downstream_provider': 'TestProvider', + 'finish_reason': 'stop', + 'timestamp': datetime.datetime(2024, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), + } + ) + + async def test_openrouter_map_messages_reasoning(allow_model_requests: None, openrouter_api_key: str) -> None: provider = OpenRouterProvider(api_key=openrouter_api_key) model = OpenRouterModel('anthropic/claude-3.7-sonnet:thinking', provider=provider) @@ -433,6 +481,29 @@ async def test_openrouter_streaming_reasoning(allow_model_requests: None, openro ) +async def test_openrouter_no_openrouter_details(openrouter_api_key: str) -> None: + """Test _process_provider_details when _map_openrouter_provider_details returns empty dict.""" + from unittest.mock import patch + + provider = OpenRouterProvider(api_key=openrouter_api_key) + model = OpenRouterModel('google/gemini-2.0-flash-exp:free', provider=provider) + + choice = Choice.model_construct( + index=0, message={'role': 'assistant', 'content': 'test'}, finish_reason='stop', native_finish_reason='stop' + ) + response = ChatCompletion.model_construct( + id='test', choices=[choice], created=1704067200, object='chat.completion', model='test', provider='TestProvider' + ) + + with patch('pydantic_ai.models.openrouter._map_openrouter_provider_details', return_value={}): + result = model._process_response(response) # type: ignore[reportPrivateUsage] + + # With empty openrouter_details, we should still get the parent's provider_details (timestamp + finish_reason) + assert result.provider_details == snapshot( + {'finish_reason': 'stop', 'timestamp': datetime.datetime(2024, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)} + ) + + async def test_openrouter_google_nested_schema(allow_model_requests: None, openrouter_api_key: str) -> None: """Test that nested schemas with $defs/$ref work correctly with OpenRouter + Gemini. diff --git a/tests/models/test_outlines.py b/tests/models/test_outlines.py index f0af90aa3d..c8b3d2cb1c 100644 --- a/tests/models/test_outlines.py +++ b/tests/models/test_outlines.py @@ -326,6 +326,7 @@ async def test_request_async(llamacpp_model: OutlinesModel) -> None: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Answer in one word.', run_id=IsStr(), ), @@ -342,6 +343,7 @@ async def test_request_async(llamacpp_model: OutlinesModel) -> None: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Answer in one word.', run_id=IsStr(), ), @@ -353,6 +355,7 @@ async def test_request_async(llamacpp_model: OutlinesModel) -> None: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Answer in one word.', run_id=IsStr(), ), @@ -374,6 +377,7 @@ def test_request_sync(llamacpp_model: OutlinesModel) -> None: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), @@ -404,6 +408,7 @@ async def test_request_async_model(mock_async_model: OutlinesModel) -> None: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), @@ -439,6 +444,7 @@ def test_request_image_binary(transformers_multimodal_model: OutlinesModel, bina timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), @@ -470,6 +476,7 @@ def test_request_image_url(transformers_multimodal_model: OutlinesModel) -> None timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), @@ -526,6 +533,7 @@ class Box(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), @@ -544,7 +552,8 @@ def test_input_format(transformers_multimodal_model: OutlinesModel, binary_image SystemPromptPart(content='You are a helpful assistance'), UserPromptPart(content='Hello'), RetryPromptPart(content='Failure'), - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[ @@ -566,7 +575,8 @@ def test_input_format(transformers_multimodal_model: OutlinesModel, binary_image AudioUrl('https://example.com/audio.mp3'), ] ) - ] + ], + timestamp=IsDatetime(), ) ] with pytest.raises( @@ -577,14 +587,18 @@ def test_input_format(transformers_multimodal_model: OutlinesModel, binary_image # unsupported: tool calls tool_call_message_history: list[ModelMessage] = [ ModelResponse(parts=[ToolCallPart(tool_call_id='1', tool_name='get_location')]), - ModelRequest(parts=[ToolReturnPart(tool_name='get_location', content='London', tool_call_id='1')]), + ModelRequest( + parts=[ToolReturnPart(tool_name='get_location', content='London', tool_call_id='1')], timestamp=IsDatetime() + ), ] with pytest.raises(UserError, match='Tool calls are not supported for Outlines models yet.'): agent.run_sync('How are you doing?', message_history=tool_call_message_history) # unsupported: tool returns tool_return_message_history: list[ModelMessage] = [ - ModelRequest(parts=[ToolReturnPart(tool_name='get_location', content='London', tool_call_id='1')]) + ModelRequest( + parts=[ToolReturnPart(tool_name='get_location', content='London', tool_call_id='1')], timestamp=IsDatetime() + ) ] with pytest.raises(UserError, match='Tool calls are not supported for Outlines models yet.'): agent.run_sync('How are you doing?', message_history=tool_return_message_history) diff --git a/tests/test_a2a.py b/tests/test_a2a.py index 4e5f74f476..e19e7e5710 100644 --- a/tests/test_a2a.py +++ b/tests/test_a2a.py @@ -1,4 +1,5 @@ import uuid +from datetime import timezone import anyio import httpx @@ -21,7 +22,7 @@ from pydantic_ai.models.function import AgentInfo, FunctionModel from pydantic_ai.usage import RequestUsage -from .conftest import IsDatetime, IsStr, try_import +from .conftest import IsDatetime, IsNow, IsStr, try_import with try_import() as imports_successful: from fasta2a.client import A2AClient @@ -579,6 +580,7 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon [ ModelRequest( parts=[UserPromptPart(content='First message', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ) ] @@ -618,6 +620,7 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon [ ModelRequest( parts=[UserPromptPart(content='First message', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -641,6 +644,7 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon ), UserPromptPart(content='Second message', timestamp=IsDatetime()), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] diff --git a/tests/test_agent.py b/tests/test_agent.py index 6ce2d91c54..41b697d84e 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -185,6 +185,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -210,6 +211,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -228,6 +230,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -318,6 +321,7 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -336,6 +340,7 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -354,6 +359,7 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -462,6 +468,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -479,6 +486,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -499,6 +507,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -511,6 +520,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_name='final_result', content='foobar', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ) ) @@ -524,6 +534,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ) ) @@ -1050,6 +1061,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1074,6 +1086,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1098,6 +1111,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1137,6 +1151,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1154,6 +1169,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1329,6 +1345,7 @@ def say_world(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1385,6 +1402,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1409,6 +1427,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1423,6 +1442,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1447,6 +1467,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1787,6 +1808,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1823,6 +1845,7 @@ class Foo(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1888,6 +1911,7 @@ def return_foo_bar(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1935,6 +1959,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1959,6 +1984,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2016,6 +2042,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2033,6 +2060,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2063,6 +2091,7 @@ async def ret_a(x: str) -> str: SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2078,6 +2107,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2099,6 +2129,7 @@ async def ret_a(x: str) -> str: SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2114,6 +2145,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2125,6 +2157,7 @@ async def ret_a(x: str) -> str: ), ModelRequest( parts=[UserPromptPart(content='Hello again', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2164,6 +2197,7 @@ async def ret_a(x: str) -> str: SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2179,6 +2213,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2190,6 +2225,7 @@ async def ret_a(x: str) -> str: ), ModelRequest( parts=[UserPromptPart(content='Hello again', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2228,6 +2264,7 @@ async def ret_a(x: str) -> str: SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2243,6 +2280,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2267,6 +2305,7 @@ async def ret_a(x: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -2280,6 +2319,7 @@ async def ret_a(x: str) -> str: SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2295,6 +2335,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2313,6 +2354,7 @@ async def ret_a(x: str) -> str: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), # second call, notice no repeated system prompt @@ -2320,6 +2362,7 @@ async def ret_a(x: str) -> str: parts=[ UserPromptPart(content='Hello again', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2338,6 +2381,7 @@ async def ret_a(x: str) -> str: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -2405,6 +2449,7 @@ async def instructions(ctx: RunContext) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), instructions='New instructions', run_id=IsStr(), ), @@ -2463,6 +2508,7 @@ def test_tool() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2560,6 +2606,7 @@ async def test_message_history_ending_on_model_response_with_instructions(): [ ModelRequest( parts=[], + timestamp=IsNow(tz=timezone.utc), instructions="""\ Summarize this conversation to include all important facts about the user and what their interactions were about.\ @@ -2597,6 +2644,7 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2608,6 +2656,7 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: ), ModelRequest( parts=[], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2640,6 +2689,7 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2651,6 +2701,7 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: ), ModelRequest( parts=[], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2688,6 +2739,7 @@ def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2706,6 +2758,7 @@ def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2734,6 +2787,7 @@ def empty(m: list[ModelMessage], _info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2752,6 +2806,7 @@ def empty(m: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3002,6 +3057,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover [ ModelRequest( parts=[UserPromptPart(content='test early strategy', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3043,6 +3099,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3094,6 +3151,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='test early output tools', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3121,6 +3179,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3150,6 +3209,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='test multiple final results', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3177,6 +3237,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3233,6 +3294,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='test early strategy with final result in middle', timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3285,6 +3347,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3342,6 +3405,7 @@ def regular_tool(x: int) -> int: # pragma: no cover content='test early strategy with external tool call', timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3384,6 +3448,7 @@ def regular_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3435,6 +3500,7 @@ def regular_tool(x: int) -> int: content='test early strategy with deferred tool call', timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3460,6 +3526,7 @@ def regular_tool(x: int) -> int: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3491,6 +3558,7 @@ def regular_tool(x: int) -> int: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3515,6 +3583,7 @@ def regular_tool(x: int) -> int: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3539,6 +3608,7 @@ def regular_tool(x: int) -> int: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3595,6 +3665,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover [ ModelRequest( parts=[UserPromptPart(content='test exhaustive strategy', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3651,6 +3722,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3702,6 +3774,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='test exhaustive output tools', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3729,6 +3802,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3780,6 +3854,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='test invalid first valid second', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3807,6 +3882,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3859,6 +3935,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='test valid first invalid second', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3886,6 +3963,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3938,6 +4016,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='test exhaustive with tool retry', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3965,6 +4044,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -4020,6 +4100,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='test multiple final results', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4054,6 +4135,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id='second', ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -4117,6 +4199,7 @@ async def get_location(loc_name: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4138,6 +4221,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4166,6 +4250,7 @@ def test_nested_capture_run_messages() -> None: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4193,6 +4278,7 @@ def test_double_capture_run_messages() -> None: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4242,6 +4328,7 @@ async def func() -> str: SystemPromptPart(content=dynamic_value, timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -4271,6 +4358,7 @@ async def func() -> str: ), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -4283,7 +4371,8 @@ async def func() -> str: kind='response', ), ModelRequest( - parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc))], + parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt')], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -4329,6 +4418,7 @@ async def func(): ), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -4359,6 +4449,7 @@ async def func(): ), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -4371,7 +4462,8 @@ async def func(): kind='response', ), ModelRequest( - parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc))], + parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt')], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -4427,6 +4519,7 @@ async def foobar(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='foobar', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4445,6 +4538,7 @@ async def foobar(x: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4521,6 +4615,7 @@ def test_binary_content_serializable(): 'part_kind': 'user-prompt', } ], + 'timestamp': IsStr(), 'instructions': None, 'kind': 'request', 'run_id': IsStr(), @@ -4584,6 +4679,7 @@ def test_image_url_serializable_missing_media_type(): 'part_kind': 'user-prompt', } ], + 'timestamp': IsStr(), 'instructions': None, 'kind': 'request', 'run_id': IsStr(), @@ -4654,6 +4750,7 @@ def test_image_url_serializable(): 'part_kind': 'user-prompt', } ], + 'timestamp': IsStr(), 'instructions': None, 'kind': 'request', 'run_id': IsStr(), @@ -4777,6 +4874,7 @@ def get_image() -> BinaryContent: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ) ) @@ -4829,6 +4927,7 @@ def get_files(): timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ) ) @@ -4848,6 +4947,7 @@ def system_prompt() -> str: SystemPromptPart(content='A system prompt!', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), instructions='An instructions!', run_id=IsStr(), ) @@ -4872,6 +4972,7 @@ def empty_instructions() -> str: SystemPromptPart(content='A system prompt!', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), instructions='An instructions!', run_id=IsStr(), ) @@ -4887,6 +4988,7 @@ def test_instructions_both_instructions_and_system_prompt_are_set(): SystemPromptPart(content='A system prompt!', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), instructions='An instructions!', run_id=IsStr(), ) @@ -4904,6 +5006,7 @@ def instructions() -> str: assert result.all_messages()[0] == snapshot( ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ) @@ -4921,6 +5024,7 @@ def instructions_2() -> str: assert result.all_messages()[0] == snapshot( ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ) @@ -4940,6 +5044,7 @@ def test_instructions_with_message_history(): ), ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -4968,6 +5073,7 @@ def empty_instructions() -> str: assert result.all_messages()[0] == snapshot( ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions="""\ You are a helpful assistant. @@ -4984,6 +5090,7 @@ def test_instructions_during_run(): assert result.all_messages()[0] == snapshot( ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions="""\ You are a helpful assistant. Your task is to greet people.\ @@ -4996,6 +5103,7 @@ def test_instructions_during_run(): assert result2.all_messages()[0] == snapshot( ModelRequest( parts=[UserPromptPart(content='Hello again!', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions="""\ You are a helpful assistant.\ """, @@ -5024,7 +5132,12 @@ class Output(BaseModel): assert messages == snapshot( [ - ModelRequest(parts=[], instructions='Agent 2 instructions', run_id=IsStr()), + ModelRequest( + parts=[], + timestamp=IsNow(tz=timezone.utc), + instructions='Agent 2 instructions', + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args={'text': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51, output_tokens=9), @@ -5041,6 +5154,7 @@ class Output(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -5073,6 +5187,7 @@ def my_tool(x: int) -> int: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5091,6 +5206,7 @@ def my_tool(x: int) -> int: tool_name='my_tool', content=2, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5109,6 +5225,7 @@ def my_tool(x: int) -> int: tool_name='my_tool', content=4, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5180,6 +5297,7 @@ def foo_tool(foo: Foo) -> int: 'part_kind': 'retry-prompt', } ], + 'timestamp': IsStr(), 'instructions': None, 'kind': 'request', 'run_id': IsStr(), @@ -5252,6 +5370,7 @@ def analyze_data() -> ToolReturn: [ ModelRequest( parts=[UserPromptPart(content='Please analyze the data', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5286,6 +5405,7 @@ def analyze_data() -> ToolReturn: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5333,6 +5453,7 @@ def analyze_data() -> ToolReturn: [ ModelRequest( parts=[UserPromptPart(content='Please analyze the data', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5359,6 +5480,7 @@ def analyze_data() -> ToolReturn: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5637,6 +5759,7 @@ def respond(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5655,6 +5778,7 @@ def respond(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5673,6 +5797,7 @@ def respond(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5728,6 +5853,7 @@ async def only_if_plan_presented( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5752,6 +5878,7 @@ async def only_if_plan_presented( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5776,6 +5903,7 @@ async def only_if_plan_presented( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6049,6 +6177,7 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6066,6 +6195,7 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6123,6 +6253,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6153,6 +6284,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6181,6 +6313,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6211,6 +6344,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelRequest( @@ -6228,6 +6362,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6258,6 +6393,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6406,6 +6542,7 @@ def update_file(ctx: RunContext, path: str, content: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Update .env file', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6443,6 +6580,7 @@ def update_file(ctx: RunContext, path: str, content: str) -> str: ), UserPromptPart(content='continue with the operation', timestamp=IsDatetime()), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6512,6 +6650,7 @@ def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6533,6 +6672,7 @@ def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6566,6 +6706,7 @@ def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6836,6 +6977,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6854,6 +6996,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6878,6 +7021,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6894,6 +7038,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6912,6 +7057,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6936,6 +7082,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -7041,6 +7188,7 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( diff --git a/tests/test_dbos.py b/tests/test_dbos.py index de99f1b1d0..ce000c7ccd 100644 --- a/tests/test_dbos.py +++ b/tests/test_dbos.py @@ -8,7 +8,7 @@ from collections.abc import AsyncIterable, AsyncIterator, Generator, Iterator from contextlib import contextmanager from dataclasses import dataclass, field -from datetime import datetime +from datetime import datetime, timezone from typing import Any, Literal import pytest @@ -44,7 +44,7 @@ from pydantic_ai.run import AgentRunResult from pydantic_ai.usage import RequestUsage -from .conftest import IsDatetime, IsStr +from .conftest import IsDatetime, IsNow, IsStr try: import importlib.metadata @@ -1404,6 +1404,7 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1434,7 +1435,10 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1454,6 +1458,7 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1475,7 +1480,10 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -1537,6 +1545,7 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1567,7 +1576,10 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1587,6 +1599,7 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1608,7 +1621,10 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -1646,6 +1662,7 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1670,7 +1687,10 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1684,6 +1704,7 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1708,7 +1729,10 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1722,6 +1746,7 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1740,7 +1765,10 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), diff --git a/tests/test_history_processor.py b/tests/test_history_processor.py index 89e487dc8c..09dc6f4689 100644 --- a/tests/test_history_processor.py +++ b/tests/test_history_processor.py @@ -66,6 +66,7 @@ def no_op_history_processor(messages: list[ModelMessage]) -> list[ModelMessage]: ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -77,6 +78,7 @@ def no_op_history_processor(messages: list[ModelMessage]) -> list[ModelMessage]: ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -124,7 +126,8 @@ def process_previous_answers(messages: list[ModelMessage]) -> list[ModelMessage] content='Processed answer', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -133,9 +136,13 @@ def process_previous_answers(messages: list[ModelMessage]) -> list[ModelMessage] [ ModelRequest( parts=[UserPromptPart(content='Question 3', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), - ModelRequest(parts=[SystemPromptPart(content='Processed answer', timestamp=IsDatetime())]), + ModelRequest( + parts=[SystemPromptPart(content='Processed answer', timestamp=IsDatetime())], + timestamp=IsDatetime(), + ), ModelResponse( parts=[TextPart(content='Provider response')], usage=RequestUsage(input_tokens=54, output_tokens=2), @@ -183,7 +190,8 @@ def process_previous_answers(messages: list[ModelMessage]) -> list[ModelMessage] content='Processed answer', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -192,9 +200,13 @@ def process_previous_answers(messages: list[ModelMessage]) -> list[ModelMessage] [ ModelRequest( parts=[UserPromptPart(content='Question 3', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), - ModelRequest(parts=[SystemPromptPart(content='Processed answer', timestamp=IsDatetime())]), + ModelRequest( + parts=[SystemPromptPart(content='Processed answer', timestamp=IsDatetime())], + timestamp=IsDatetime(), + ), ModelResponse( parts=[TextPart(content='hello')], usage=RequestUsage(input_tokens=50, output_tokens=1), @@ -238,7 +250,8 @@ def capture_messages_processor(messages: list[ModelMessage]) -> list[ModelMessag content='New question', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -248,6 +261,7 @@ def capture_messages_processor(messages: list[ModelMessage]) -> list[ModelMessag ModelRequest(parts=[UserPromptPart(content='Previous question', timestamp=IsDatetime())]), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -306,7 +320,10 @@ def second_processor(messages: list[ModelMessage]) -> list[ModelMessage]: [ ModelRequest(parts=[UserPromptPart(content='[SECOND] [FIRST] Question', timestamp=IsDatetime())]), ModelResponse(parts=[TextPart(content='Answer')], timestamp=IsDatetime()), - ModelRequest(parts=[UserPromptPart(content='[SECOND] [FIRST] New question', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='[SECOND] [FIRST] New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), + ), ] ) assert captured_messages == result.all_messages() @@ -330,7 +347,8 @@ def second_processor(messages: list[ModelMessage]) -> list[ModelMessage]: content='[SECOND] [FIRST] New question', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[TextPart(content='Provider response')], @@ -371,7 +389,8 @@ async def async_processor(messages: list[ModelMessage]) -> list[ModelMessage]: content='Question 2', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -393,6 +412,7 @@ async def async_processor(messages: list[ModelMessage]) -> list[ModelMessage]: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -441,7 +461,8 @@ async def async_processor(messages: list[ModelMessage]) -> list[ModelMessage]: content='Question 2', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -463,6 +484,7 @@ async def async_processor(messages: list[ModelMessage]) -> list[ModelMessage]: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -509,7 +531,8 @@ def context_processor(ctx: RunContext[str], messages: list[ModelMessage]) -> lis content='PREFIX: test', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -522,7 +545,8 @@ def context_processor(ctx: RunContext[str], messages: list[ModelMessage]) -> lis content='PREFIX: test', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[TextPart(content='Provider response')], @@ -564,6 +588,7 @@ async def async_context_processor(ctx: RunContext[Any], messages: list[ModelMess timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ) ] @@ -578,6 +603,7 @@ async def async_context_processor(ctx: RunContext[Any], messages: list[ModelMess timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -642,7 +668,8 @@ class Deps: content='TEST: Question 2', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -663,7 +690,8 @@ class Deps: content='TEST: Question 2', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[TextPart(content='Provider response')], @@ -702,7 +730,8 @@ def return_new_history(messages: list[ModelMessage]) -> list[ModelMessage]: content='Modified message', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -715,7 +744,8 @@ def return_new_history(messages: list[ModelMessage]) -> list[ModelMessage]: content='Modified message', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[TextPart(content='Provider response')], @@ -774,6 +804,7 @@ def __call__(self, messages: list[ModelMessage]) -> list[ModelMessage]: ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -785,6 +816,7 @@ def __call__(self, messages: list[ModelMessage]) -> list[ModelMessage]: ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -822,6 +854,7 @@ def __call__(self, _: RunContext, messages: list[ModelMessage]) -> list[ModelMes ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -833,6 +866,7 @@ def __call__(self, _: RunContext, messages: list[ModelMessage]) -> list[ModelMes ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/test_logfire.py b/tests/test_logfire.py index b33e8702e0..397cf05d61 100644 --- a/tests/test_logfire.py +++ b/tests/test_logfire.py @@ -22,7 +22,7 @@ from pydantic_ai.toolsets.function import FunctionToolset from pydantic_ai.toolsets.wrapper import WrapperToolset -from .conftest import IsStr +from .conftest import IsDatetime, IsStr try: import logfire @@ -2740,7 +2740,11 @@ def instructions(ctx: RunContext[None]): result = my_agent.run_sync( 'Hello', message_history=[ - ModelRequest(parts=[UserPromptPart(content='Hi')], instructions='Instructions from a previous agent run'), + ModelRequest( + parts=[UserPromptPart(content='Hi')], + instructions='Instructions from a previous agent run', + timestamp=IsDatetime(), + ), ModelResponse(parts=[TextPart(content='Hello')]), ], output_type=MyOutput, diff --git a/tests/test_mcp.py b/tests/test_mcp.py index 02bab17cc3..1de841e8f7 100644 --- a/tests/test_mcp.py +++ b/tests/test_mcp.py @@ -226,6 +226,7 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -250,7 +251,10 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlnvvqIPFofAtKqtQKMWZkgXhzlT', finish_reason='tool_call', run_id=IsStr(), @@ -264,6 +268,7 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -282,7 +287,10 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlnyjUo5wlyqvdNdM5I8vIWjo1qF', finish_reason='stop', run_id=IsStr(), @@ -397,6 +405,7 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -421,7 +430,10 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlo3e1Ud2lnvkddMilmwC7LAemiy', finish_reason='tool_call', run_id=IsStr(), @@ -435,6 +447,7 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -457,7 +470,10 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlo41LxqBYgGKWgGrQn67fQacOLp', finish_reason='stop', run_id=IsStr(), @@ -479,6 +495,7 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -503,7 +520,10 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRmhyweJVYonarb7s9ckIMSHf2vHo', finish_reason='tool_call', run_id=IsStr(), @@ -517,6 +537,7 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -535,7 +556,10 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRmhzqXFObpYwSzREMpJvX9kbDikR', finish_reason='stop', run_id=IsStr(), @@ -557,6 +581,7 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -581,7 +606,10 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BwdHSFe0EykAOpf0LWZzsWAodIQzb', finish_reason='tool_call', run_id=IsStr(), @@ -595,6 +623,7 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -613,7 +642,10 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BwdHTIlBZWzXJPBR8VTOdC4O57ZQA', finish_reason='stop', run_id=IsStr(), @@ -637,6 +669,7 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -661,7 +694,10 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlo7KYJVXuNZ5lLLdYcKZDsX2CHb', finish_reason='tool_call', run_id=IsStr(), @@ -676,6 +712,7 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: ), UserPromptPart(content=['This is file 1c8566:', image_content], timestamp=IsDatetime()), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -698,7 +735,10 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloBGHh27w3fQKwxq4fX2cPuZJa9', finish_reason='stop', run_id=IsStr(), @@ -724,6 +764,7 @@ async def test_tool_returning_image_resource_link( timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -748,7 +789,10 @@ async def test_tool_returning_image_resource_link( timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BwdHygYePH1mZgHo2Xxzib0Y7sId7', finish_reason='tool_call', run_id=IsStr(), @@ -763,6 +807,7 @@ async def test_tool_returning_image_resource_link( ), UserPromptPart(content=['This is file 1c8566:', image_content], timestamp=IsDatetime()), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -785,7 +830,10 @@ async def test_tool_returning_image_resource_link( timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BwdI2D2r9dvqq3pbsA0qgwKDEdTtD', finish_reason='stop', run_id=IsStr(), @@ -805,6 +853,7 @@ async def test_tool_returning_audio_resource( [ ModelRequest( parts=[UserPromptPart(content="What's the content of the audio resource?", timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -831,6 +880,7 @@ async def test_tool_returning_audio_resource( ), UserPromptPart(content=['This is file 2d36ae:', audio_content], timestamp=IsDatetime()), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -870,6 +920,7 @@ async def test_tool_returning_audio_resource_link( timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -909,6 +960,7 @@ async def test_tool_returning_audio_resource_link( timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -945,6 +997,7 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -969,7 +1022,10 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloGQJWIX0Qk7gtNzF4s2Fez0O29', finish_reason='tool_call', run_id=IsStr(), @@ -990,6 +1046,7 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1008,7 +1065,10 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloJHR654fSD0fcvLWZxtKtn0pag', finish_reason='stop', run_id=IsStr(), @@ -1030,6 +1090,7 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1048,7 +1109,10 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloOs7Bb2tq8wJyy9Rv7SQ7L65a7', finish_reason='tool_call', run_id=IsStr(), @@ -1062,6 +1126,7 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1080,7 +1145,10 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloPczU1HSCWnreyo21DdNtdOM7L', finish_reason='stop', run_id=IsStr(), @@ -1102,6 +1170,7 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1124,7 +1193,10 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-CLbP82ODQMEznhobUKdq6Rjn9Aa12', finish_reason='tool_call', run_id=IsStr(), @@ -1138,6 +1210,7 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1156,7 +1229,10 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-CLbPAOYN3jPYdvYeD8JNOOXF5N554', finish_reason='stop', run_id=IsStr(), @@ -1180,6 +1256,7 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1204,7 +1281,10 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloSNg7aGSp1rXDkhInjMIUHKd7A', finish_reason='tool_call', run_id=IsStr(), @@ -1218,6 +1298,7 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1242,7 +1323,10 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloTvSkFeX4DZKQLqfH9KbQkWlpt', finish_reason='tool_call', run_id=IsStr(), @@ -1256,6 +1340,7 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1278,7 +1363,10 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloU3MhnqNEqujs28a3ofRbs7VPF', finish_reason='stop', run_id=IsStr(), @@ -1300,6 +1388,7 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1318,7 +1407,10 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloX2RokWc9j9PAXAuNXGR73WNqY', finish_reason='tool_call', run_id=IsStr(), @@ -1332,6 +1424,7 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1350,7 +1443,10 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloYWGujk8yE94gfVSsM1T1Ol2Ej', finish_reason='stop', run_id=IsStr(), @@ -1374,6 +1470,7 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1398,7 +1495,10 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlobKLgm6vf79c9O8sloZaYx3coC', finish_reason='tool_call', run_id=IsStr(), @@ -1424,6 +1524,7 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1446,7 +1547,10 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloepWR5NJpTgSqFBGTSPeM1SWm8', finish_reason='stop', run_id=IsStr(), diff --git a/tests/test_messages.py b/tests/test_messages.py index f5aaf972d4..3e0868c2fa 100644 --- a/tests/test_messages.py +++ b/tests/test_messages.py @@ -449,7 +449,7 @@ def test_pre_usage_refactor_messages_deserializable(): content='What is the capital of Mexico?', timestamp=IsNow(tz=timezone.utc), ) - ] + ], ), ModelResponse( parts=[TextPart(content='Mexico City.')], diff --git a/tests/test_prefect.py b/tests/test_prefect.py index b1c18b9803..ec27c4a86a 100644 --- a/tests/test_prefect.py +++ b/tests/test_prefect.py @@ -68,7 +68,7 @@ from inline_snapshot import snapshot -from .conftest import IsStr +from .conftest import IsDatetime, IsStr pytestmark = [ pytest.mark.anyio, @@ -1037,7 +1037,9 @@ async def test_cache_policy_custom(): # First set of messages messages1 = [ - ModelRequest(parts=[UserPromptPart(content='What is the capital of France?', timestamp=time1)]), + ModelRequest( + parts=[UserPromptPart(content='What is the capital of France?', timestamp=time1)], timestamp=IsDatetime() + ), ModelResponse( parts=[TextPart(content='The capital of France is Paris.')], usage=RequestUsage(input_tokens=10, output_tokens=10), @@ -1048,7 +1050,9 @@ async def test_cache_policy_custom(): # Second set of messages - same content, different timestamps messages2 = [ - ModelRequest(parts=[UserPromptPart(content='What is the capital of France?', timestamp=time2)]), + ModelRequest( + parts=[UserPromptPart(content='What is the capital of France?', timestamp=time2)], timestamp=IsDatetime() + ), ModelResponse( parts=[TextPart(content='The capital of France is Paris.')], usage=RequestUsage(input_tokens=10, output_tokens=10), @@ -1077,7 +1081,9 @@ async def test_cache_policy_custom(): # Also test that different content produces different hashes messages3 = [ - ModelRequest(parts=[UserPromptPart(content='What is the capital of Spain?', timestamp=time1)]), + ModelRequest( + parts=[UserPromptPart(content='What is the capital of Spain?', timestamp=time1)], timestamp=IsDatetime() + ), ModelResponse( parts=[TextPart(content='The capital of Spain is Madrid.')], usage=RequestUsage(input_tokens=10, output_tokens=10), diff --git a/tests/test_streaming.py b/tests/test_streaming.py index 9149d19d1b..ef7ee37c04 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -74,6 +74,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -90,6 +91,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -110,6 +112,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -126,6 +129,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -166,6 +170,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -182,6 +187,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -212,6 +218,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -228,6 +235,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -590,6 +598,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -608,6 +617,7 @@ async def ret_a(x: str) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -617,6 +627,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -635,6 +646,7 @@ async def ret_a(x: str) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -659,6 +671,7 @@ async def ret_a(x: str) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -690,6 +703,7 @@ async def stream_structured_function( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -699,7 +713,11 @@ async def stream_structured_function( timestamp=IsDatetime(), run_id=IsStr(), ), - ModelRequest(parts=[], run_id=IsStr()), + ModelRequest( + parts=[], + timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='ok here is text')], usage=RequestUsage(input_tokens=50, output_tokens=4), @@ -734,6 +752,7 @@ async def ret_a(x: str) -> str: # pragma: no cover [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -805,6 +824,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover [ ModelRequest( parts=[UserPromptPart(content='test early strategy', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -846,6 +866,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover tool_call_id=IsStr(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -899,6 +920,7 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat [ ModelRequest( parts=[UserPromptPart(content='test early output tools', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -926,6 +948,7 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -951,6 +974,7 @@ async def sf(_: list[ModelMessage], info: AgentInfo) -> AsyncIterator[str | Delt [ ModelRequest( parts=[UserPromptPart(content='test multiple final results', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -978,6 +1002,7 @@ async def sf(_: list[ModelMessage], info: AgentInfo) -> AsyncIterator[str | Delt tool_call_id=IsStr(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1034,6 +1059,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=datetime.timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1102,6 +1128,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=datetime.timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1171,6 +1198,7 @@ def regular_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=datetime.timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1207,6 +1235,7 @@ def regular_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=datetime.timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1256,6 +1285,7 @@ def regular_tool(x: int) -> int: timestamp=IsNow(tz=datetime.timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1281,6 +1311,7 @@ def regular_tool(x: int) -> int: timestamp=IsNow(tz=datetime.timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1315,6 +1346,7 @@ def regular_tool(x: int) -> int: timestamp=IsNow(tz=datetime.timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1340,6 +1372,7 @@ def regular_tool(x: int) -> int: timestamp=IsNow(tz=datetime.timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1365,6 +1398,7 @@ def regular_tool(x: int) -> int: timestamp=IsNow(tz=datetime.timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1420,6 +1454,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover [ ModelRequest( parts=[UserPromptPart(content='test exhaustive strategy', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1472,6 +1507,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1525,6 +1561,7 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat [ ModelRequest( parts=[UserPromptPart(content='test exhaustive output tools', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1552,6 +1589,7 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1686,6 +1724,7 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat [ ModelRequest( parts=[UserPromptPart(content='test valid first invalid second', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1713,6 +1752,7 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1770,6 +1810,7 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat content='test exhaustive with tool retry', timestamp=IsNow(tz=datetime.timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1797,6 +1838,7 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat timestamp=IsNow(tz=datetime.timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -2330,6 +2372,7 @@ def my_tool(ctx: RunContext[None], x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2348,6 +2391,7 @@ def my_tool(ctx: RunContext[None], x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 24ccefb83e..645bdd189f 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -1833,6 +1833,9 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: timestamp=IsDatetime(), ) ], + # NOTE in other tests we check timestamp=IsNow(tz=timezone.utc) + # but temporal tests fail when we use IsNow + timestamp=IsDatetime(), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1863,7 +1866,7 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={'finish_reason': 'tool_calls', 'timestamp': '2025-08-28T22:11:03Z'}, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1883,6 +1886,7 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1906,7 +1910,7 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={'finish_reason': 'stop', 'timestamp': '2025-08-28T22:11:06Z'}, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -1960,6 +1964,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1984,7 +1989,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={'finish_reason': 'tool_calls', 'timestamp': '2025-08-28T23:19:50Z'}, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1998,6 +2003,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2022,7 +2028,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={'finish_reason': 'tool_calls', 'timestamp': '2025-08-28T23:19:51Z'}, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -2036,6 +2042,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2054,7 +2061,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien timestamp=IsDatetime(), provider_name='openai', provider_url='https://api.openai.com/v1/', - provider_details={'finish_reason': 'stop'}, + provider_details={'finish_reason': 'stop', 'timestamp': '2025-08-28T23:19:52Z'}, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), diff --git a/tests/test_tools.py b/tests/test_tools.py index 0031f702cd..1e9e6d5ce7 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1381,6 +1381,7 @@ def my_tool(ctx: RunContext[None], x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1399,6 +1400,7 @@ def my_tool(ctx: RunContext[None], x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1772,6 +1774,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1826,6 +1829,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -1865,6 +1869,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1919,6 +1924,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelRequest( @@ -1947,6 +1953,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1987,6 +1994,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2008,6 +2016,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2084,7 +2093,8 @@ def buy(fruit: str): content='I bought a banana', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ), ] ) @@ -2165,6 +2175,7 @@ def bar(x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2187,6 +2198,7 @@ def bar(x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -2218,6 +2230,7 @@ def bar(x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2240,6 +2253,7 @@ def bar(x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelRequest( @@ -2257,6 +2271,7 @@ def bar(x: int) -> int: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2404,6 +2419,7 @@ def always_fail(ctx: RunContext[None]) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2422,6 +2438,7 @@ def always_fail(ctx: RunContext[None]) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2440,6 +2457,7 @@ def always_fail(ctx: RunContext[None]) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2458,6 +2476,7 @@ def always_fail(ctx: RunContext[None]) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/test_usage_limits.py b/tests/test_usage_limits.py index ac17fd0be5..5cb4d529be 100644 --- a/tests/test_usage_limits.py +++ b/tests/test_usage_limits.py @@ -100,6 +100,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -125,6 +126,7 @@ async def ret_a(x: str) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 12a4ea3eaa..bfe43b00c5 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -2505,8 +2505,8 @@ def sync_timestamps(original: list[ModelRequest | ModelResponse], new: list[Mode for orig_part, new_part in zip(orig_msg.parts, new_msg.parts): if hasattr(orig_part, 'timestamp') and hasattr(new_part, 'timestamp'): new_part.timestamp = orig_part.timestamp # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] - if hasattr(orig_msg, 'timestamp') and hasattr(new_msg, 'timestamp'): - new_msg.timestamp = orig_msg.timestamp # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] + if hasattr(orig_msg, 'timestamp') and hasattr(new_msg, 'timestamp'): # pragma: no branch + new_msg.timestamp = orig_msg.timestamp # pyright: ignore[reportAttributeAccessIssue] # Load back to Pydantic AI format reloaded_messages = VercelAIAdapter.load_messages(ui_messages) @@ -2515,6 +2515,44 @@ def sync_timestamps(original: list[ModelRequest | ModelResponse], new: list[Mode assert reloaded_messages == original_messages +async def test_adapter_dump_load_roundtrip_without_timestamps(): + """Test that dump_messages and load_messages work when messages don't have timestamps.""" + original_messages = [ + ModelRequest( + parts=[ + UserPromptPart(content='User message'), + ] + ), + ModelResponse( + parts=[ + TextPart(content='Response text'), + ] + ), + ] + + for msg in original_messages: + delattr(msg, 'timestamp') + + ui_messages = VercelAIAdapter.dump_messages(original_messages) + reloaded_messages = VercelAIAdapter.load_messages(ui_messages) + + def sync_timestamps(original: list[ModelRequest | ModelResponse], new: list[ModelRequest | ModelResponse]) -> None: + for orig_msg, new_msg in zip(original, new): + for orig_part, new_part in zip(orig_msg.parts, new_msg.parts): + if hasattr(orig_part, 'timestamp') and hasattr(new_part, 'timestamp'): + new_part.timestamp = orig_part.timestamp # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] + if hasattr(orig_msg, 'timestamp') and hasattr(new_msg, 'timestamp'): + new_msg.timestamp = orig_msg.timestamp # pyright: ignore[reportAttributeAccessIssue] + + sync_timestamps(original_messages, reloaded_messages) + + for msg in reloaded_messages: + if hasattr(msg, 'timestamp'): # pragma: no branch + delattr(msg, 'timestamp') + + assert len(reloaded_messages) == len(original_messages) + + async def test_adapter_dump_messages_text_before_thinking(): """Test dumping messages where text precedes a thinking part.""" messages = [ @@ -2744,7 +2782,7 @@ async def test_adapter_dump_messages_thinking_with_metadata(): # Sync timestamps for comparison (ModelResponse always has timestamp) for orig_msg, new_msg in zip(original_messages, reloaded_messages): - new_msg.timestamp = orig_msg.timestamp # pyright: ignore[reportAttributeAccessIssue] + new_msg.timestamp = orig_msg.timestamp assert reloaded_messages == original_messages