-
Notifications
You must be signed in to change notification settings - Fork 1.5k
Transform and to text parts for Google models that don't support tools #3591
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -549,15 +549,25 @@ async def _map_messages( | |
| elif isinstance(part, UserPromptPart): | ||
| message_parts.extend(await self._map_user_prompt(part)) | ||
| elif isinstance(part, ToolReturnPart): | ||
| message_parts.append( | ||
| { | ||
| 'function_response': { | ||
| 'name': part.tool_name, | ||
| 'response': part.model_response_object(), | ||
| 'id': part.tool_call_id, | ||
| if self.profile.supports_tools: | ||
| message_parts.append( | ||
| { | ||
| 'function_response': { | ||
| 'name': part.tool_name, | ||
| 'response': part.model_response_object(), | ||
| 'id': part.tool_call_id, | ||
| } | ||
| } | ||
| } | ||
| ) | ||
| ) | ||
| else: | ||
| text = '\n'.join( | ||
| [ | ||
| f'-----BEGIN TOOL RETURN name="{part.tool_name}" id="{part.tool_call_id}"-----', | ||
| f'response: {part.model_response_object()}', | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's just use |
||
| f'-----END TOOL RETURN id="{part.tool_call_id}"-----', | ||
| ] | ||
| ) | ||
| message_parts.append({'text': text}) | ||
| elif isinstance(part, RetryPromptPart): | ||
| if part.tool_name is None: | ||
| message_parts.append({'text': part.model_response()}) | ||
|
|
@@ -577,7 +587,7 @@ async def _map_messages( | |
| if message_parts: | ||
| contents.append({'role': 'user', 'parts': message_parts}) | ||
| elif isinstance(m, ModelResponse): | ||
| maybe_content = _content_model_response(m, self.system) | ||
| maybe_content = _content_model_response(m, self.system, self.profile.supports_tools) | ||
| if maybe_content: | ||
| contents.append(maybe_content) | ||
| else: | ||
|
|
@@ -786,7 +796,7 @@ def timestamp(self) -> datetime: | |
| return self._timestamp | ||
|
|
||
|
|
||
| def _content_model_response(m: ModelResponse, provider_name: str) -> ContentDict | None: # noqa: C901 | ||
| def _content_model_response(m: ModelResponse, provider_name: str, supports_tools: bool) -> ContentDict | None: # noqa: C901 | ||
| parts: list[PartDict] = [] | ||
| thinking_part_signature: str | None = None | ||
| function_call_requires_signature: bool = True | ||
|
|
@@ -803,17 +813,27 @@ def _content_model_response(m: ModelResponse, provider_name: str) -> ContentDict | |
| thinking_part_signature = None | ||
|
|
||
| if isinstance(item, ToolCallPart): | ||
| function_call = FunctionCallDict(name=item.tool_name, args=item.args_as_dict(), id=item.tool_call_id) | ||
| part['function_call'] = function_call | ||
| if function_call_requires_signature and not part.get('thought_signature'): | ||
| # Per https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#migrating_from_other_models: | ||
| # > If you are transferring a conversation trace from another model (e.g., Gemini 2.5) or injecting | ||
| # > a custom function call that was not generated by Gemini 3, you will not have a valid signature. | ||
| # > To bypass strict validation in these specific scenarios, populate the field with this specific | ||
| # > dummy string: "thoughtSignature": "context_engineering_is_the_way_to_go" | ||
| part['thought_signature'] = b'context_engineering_is_the_way_to_go' | ||
| # Only the first function call requires a signature | ||
| function_call_requires_signature = False | ||
| if supports_tools: | ||
| function_call = FunctionCallDict(name=item.tool_name, args=item.args_as_dict(), id=item.tool_call_id) | ||
| part['function_call'] = function_call | ||
| if function_call_requires_signature and not part.get('thought_signature'): | ||
| # Per https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#migrating_from_other_models: | ||
| # > If you are transferring a conversation trace from another model (e.g., Gemini 2.5) or injecting | ||
| # > a custom function call that was not generated by Gemini 3, you will not have a valid signature. | ||
| # > To bypass strict validation in these specific scenarios, populate the field with this specific | ||
| # > dummy string: "thoughtSignature": "context_engineering_is_the_way_to_go" | ||
| part['thought_signature'] = b'context_engineering_is_the_way_to_go' | ||
| # Only the first function call requires a signature | ||
| function_call_requires_signature = False | ||
| else: | ||
| text = '\n'.join( | ||
| [ | ||
| f'-----BEGIN TOOL CALL name="{item.tool_name} "id="{item.tool_call_id}""-----', | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We've got a few too many quotes here |
||
| f'args: {item.args_as_json_str()}', | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We can drop the |
||
| f'-----END TOOL CALL id="{item.tool_call_id}"-----', | ||
| ] | ||
| ) | ||
| part['text'] = text | ||
| elif isinstance(item, TextPart): | ||
| part['text'] = item.content | ||
| elif isinstance(item, ThinkingPart): | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -4307,6 +4307,7 @@ def test_google_thought_signature_on_thinking_part(): | |
| provider_name='google-gla', | ||
| ), | ||
| 'google-gla', | ||
| True, | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's default the arg to |
||
| ) | ||
| new_google_response = _content_model_response( | ||
| ModelResponse( | ||
|
|
@@ -4318,6 +4319,7 @@ def test_google_thought_signature_on_thinking_part(): | |
| provider_name='google-gla', | ||
| ), | ||
| 'google-gla', | ||
| True, | ||
| ) | ||
| assert old_google_response == snapshot( | ||
| { | ||
|
|
@@ -4342,6 +4344,7 @@ def test_google_thought_signature_on_thinking_part(): | |
| provider_name='google-gla', | ||
| ), | ||
| 'google-gla', | ||
| True, | ||
| ) | ||
| new_google_response = _content_model_response( | ||
| ModelResponse( | ||
|
|
@@ -4352,6 +4355,7 @@ def test_google_thought_signature_on_thinking_part(): | |
| provider_name='google-gla', | ||
| ), | ||
| 'google-gla', | ||
| True, | ||
| ) | ||
| assert old_google_response == snapshot( | ||
| { | ||
|
|
@@ -4376,6 +4380,7 @@ def test_google_thought_signature_on_thinking_part(): | |
| provider_name='google-gla', | ||
| ), | ||
| 'google-gla', | ||
| True, | ||
| ) | ||
| new_google_response = _content_model_response( | ||
| ModelResponse( | ||
|
|
@@ -4386,6 +4391,7 @@ def test_google_thought_signature_on_thinking_part(): | |
| provider_name='google-gla', | ||
| ), | ||
| 'google-gla', | ||
| True, | ||
| ) | ||
| assert old_google_response == snapshot( | ||
| { | ||
|
|
@@ -4412,6 +4418,7 @@ def test_google_missing_tool_call_thought_signature(): | |
| provider_name='openai', | ||
| ), | ||
| 'google-gla', | ||
| True, | ||
| ) | ||
| assert google_response == snapshot( | ||
| { | ||
|
|
@@ -4425,3 +4432,107 @@ def test_google_missing_tool_call_thought_signature(): | |
| ], | ||
| } | ||
| ) | ||
|
|
||
|
|
||
| async def test_google_mapping_messages_no_tool_support(google_provider: GoogleProvider): | ||
| old_messages = [ | ||
| ModelRequest( | ||
| parts=[ | ||
| UserPromptPart( | ||
| content='What is the largest city in the user country?', | ||
| timestamp=IsDatetime(), | ||
| ) | ||
| ], | ||
| run_id=IsStr(), | ||
| ), | ||
| ModelResponse(parts=[ToolCallPart(tool_name='get_user_country', args={}, tool_call_id=IsStr())]), | ||
| ModelRequest( | ||
| parts=[ | ||
| ToolReturnPart( | ||
| tool_name='get_user_country', | ||
| content='Mexico', | ||
| tool_call_id=IsStr(), | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's use an actual dummy value here that we'll see in the output below, instead of literal |
||
| timestamp=IsDatetime(), | ||
| ) | ||
| ], | ||
| run_id=IsStr(), | ||
| ), | ||
| ModelResponse( | ||
| parts=[ | ||
| ToolCallPart( | ||
| tool_name='final_result', | ||
| args={'city': 'Mexico City', 'country': 'Mexico'}, | ||
| tool_call_id=IsStr(), | ||
| ) | ||
| ], | ||
| ), | ||
| ModelRequest( | ||
| parts=[ | ||
| ToolReturnPart( | ||
| tool_name='final_result', | ||
| content='Final result processed.', | ||
| tool_call_id=IsStr(), | ||
| timestamp=IsDatetime(), | ||
| ) | ||
| ], | ||
| run_id=IsStr(), | ||
| ), | ||
| ] | ||
| model = GoogleModel('gemini-2.5-flash-image-preview', provider=google_provider) | ||
| new_messages = await model._map_messages(old_messages, ModelRequestParameters()) # pyright: ignore[reportPrivateUsage] | ||
| assert new_messages == snapshot( | ||
| ( | ||
| None, | ||
| [ | ||
| {'role': 'user', 'parts': [{'text': 'What is the largest city in the user country?'}]}, | ||
| { | ||
| 'role': 'model', | ||
| 'parts': [ | ||
| { | ||
| 'text': """\ | ||
| -----BEGIN TOOL CALL name="get_user_country "id="IsStr()""----- | ||
| args: {} | ||
| -----END TOOL CALL id="IsStr()"-----\ | ||
| """ | ||
| } | ||
| ], | ||
| }, | ||
| { | ||
| 'role': 'user', | ||
| 'parts': [ | ||
| { | ||
| 'text': """\ | ||
| -----BEGIN TOOL RETURN name="get_user_country" id="IsStr()"----- | ||
| response: {'return_value': 'Mexico'} | ||
| -----END TOOL RETURN id="IsStr()"-----\ | ||
| """ | ||
| } | ||
| ], | ||
| }, | ||
| { | ||
| 'role': 'model', | ||
| 'parts': [ | ||
| { | ||
| 'text': """\ | ||
| -----BEGIN TOOL CALL name="final_result "id="IsStr()""----- | ||
| args: {"city":"Mexico City","country":"Mexico"} | ||
| -----END TOOL CALL id="IsStr()"-----\ | ||
| """ | ||
| } | ||
| ], | ||
| }, | ||
| { | ||
| 'role': 'user', | ||
| 'parts': [ | ||
| { | ||
| 'text': """\ | ||
| -----BEGIN TOOL RETURN name="final_result" id="IsStr()"----- | ||
| response: {'return_value': 'Final result processed.'} | ||
| -----END TOOL RETURN id="IsStr()"-----\ | ||
| """ | ||
| } | ||
| ], | ||
| }, | ||
| ], | ||
| ) | ||
| ) | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Let's move this to an
as_text()method onToolCallPartand explain in the docstring what it's for. In case we want to someday do this in other models as well. Same forToolReturnPart