Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions agentops/instrumentation/agentic/google_adk/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,8 @@ def _extract_messages_from_contents(contents: list) -> dict:
# Extract content from parts
text_parts = []
for part in parts:
if "text" in part:
text_parts.append(part["text"])
if "text" in part and part.get("text") is not None:
text_parts.append(str(part["text"]))
elif "function_call" in part:
# Function calls in prompts are typically from the model's previous responses
func_call = part["function_call"]
Expand Down Expand Up @@ -227,8 +227,8 @@ def _extract_llm_attributes(llm_request_dict: dict, llm_response: Any) -> dict:
# Extract content from parts
text_parts = []
for part in parts:
if "text" in part:
text_parts.append(part["text"])
if "text" in part and part.get("text") is not None:
text_parts.append(str(part["text"]))
elif "function_call" in part:
# Function calls in prompts are typically from the model's previous responses
func_call = part["function_call"]
Expand Down Expand Up @@ -299,8 +299,8 @@ def _extract_llm_attributes(llm_request_dict: dict, llm_response: Any) -> dict:
text_parts = []
tool_call_index = 0
for part in parts:
if "text" in part:
text_parts.append(part["text"])
if "text" in part and part.get("text") is not None:
text_parts.append(str(part["text"]))
elif "function_call" in part:
# This is a function call in the response
func_call = part["function_call"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,17 +34,22 @@ def _extract_content_from_prompt(content: Any) -> str:
if isinstance(item, str):
text += item + "\n"
elif isinstance(item, dict) and "text" in item:
text += item["text"] + "\n"
if item.get("text") is not None:
text += str(item["text"]) + "\n"
elif hasattr(item, "text"):
text += item.text + "\n"
part_text = getattr(item, "text", None)
if part_text:
text += part_text + "\n"
# Handle content as a list with mixed types
elif hasattr(item, "parts"):
parts = item.parts
for part in parts:
if isinstance(part, str):
text += part + "\n"
elif hasattr(part, "text"):
text += part.text + "\n"
part_text = getattr(part, "text", None)
if part_text:
text += part_text + "\n"
return text

# Dict with text key
Expand All @@ -62,7 +67,9 @@ def _extract_content_from_prompt(content: Any) -> str:
if isinstance(part, str):
text += part + "\n"
elif hasattr(part, "text"):
text += part.text + "\n"
part_text = getattr(part, "text", None)
if part_text:
text += part_text + "\n"
return text

# Other object types - try to convert to string
Expand Down Expand Up @@ -155,7 +162,9 @@ def _set_response_attributes(attributes: AttributeMap, response: Any) -> None:
if isinstance(part, str):
text += part
elif hasattr(part, "text"):
text += part.text
part_text = getattr(part, "text", None)
if part_text:
text += part_text

attributes[MessageAttributes.COMPLETION_CONTENT.format(i=i)] = text
attributes[MessageAttributes.COMPLETION_ROLE.format(i=i)] = "assistant"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,9 @@ def instrumented_stream():

# Track token count (approximate by word count if metadata not available)
if hasattr(chunk, "text"):
full_text += chunk.text
text_value = getattr(chunk, "text", None)
if text_value:
full_text += text_value

yield chunk

Expand Down Expand Up @@ -195,7 +197,9 @@ async def instrumented_stream():
last_chunk_with_metadata = chunk

if hasattr(chunk, "text"):
full_text += chunk.text
text_value = getattr(chunk, "text", None)
if text_value:
full_text += text_value

yield chunk

Expand Down
94 changes: 94 additions & 0 deletions examples/google_adk/sse_function_call_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
import os
import asyncio
from typing import Optional

import agentops
from google.adk.agents import LlmAgent
from google.adk.tools import FunctionTool
from google.adk.runners import Runner
from google.adk.sessions import InMemorySessionService
from google.genai import types


# Attempt to import RunConfig/StreamingMode from likely ADK locations
RunConfig: Optional[object] = None
StreamingMode: Optional[object] = None
try:
from google.adk.runners import RunConfig as _RunConfig, StreamingMode as _StreamingMode # type: ignore
RunConfig = _RunConfig
StreamingMode = _StreamingMode
except Exception:
try:
from google.adk.types import RunConfig as _RunConfig2, StreamingMode as _StreamingMode2 # type: ignore
RunConfig = _RunConfig2
StreamingMode = _StreamingMode2
except Exception:
RunConfig = None
StreamingMode = None


# Initialize AgentOps (set AGENTOPS_API_KEY in your environment)
agentops.init(api_key=os.getenv("AGENTOPS_API_KEY"), trace_name="adk_sse_text_function_call")

APP_NAME = "adk_sse_text_function_call_app"
USER_ID = "user_sse_text_fc"
SESSION_ID = "session_sse_text_fc"
MODEL_NAME = "gemini-2.0-flash"


# Simple tool to trigger a function call
async def get_weather(location: str) -> str:
return f"Weather for {location}: sunny and 25°C."


weather_tool = FunctionTool(func=get_weather)

# Agent configured with the tool so the model can trigger a function call
agent = LlmAgent(
model=MODEL_NAME,
name="WeatherAgent",
description="Provides weather using a tool",
instruction=(
"You are a helpful assistant. When asked about weather, call the get_weather tool with the given location."
),
tools=[weather_tool],
output_key="weather_output",
)

# Session service and runner
session_service = InMemorySessionService()
runner = Runner(agent=agent, app_name=APP_NAME, session_service=session_service)


async def main():
# Ensure session exists
await session_service.create_session(app_name=APP_NAME, user_id=USER_ID, session_id=SESSION_ID)

# Create user message
user_message = types.Content(role="user", parts=[types.Part(text="What's the weather in Paris?")])

# Configure SSE streaming with TEXT modality, as reported by the user
run_config_kw = {}
if RunConfig is not None and StreamingMode is not None:
run_config_kw["run_config"] = RunConfig(streaming_mode=StreamingMode.SSE, response_modalities=["TEXT"]) # type: ignore

final_text = None
async for event in runner.run_async(user_id=USER_ID, session_id=SESSION_ID, new_message=user_message, **run_config_kw):
# Print out any parts safely; this will include function_call parts when they occur
if hasattr(event, "content") and event.content and getattr(event.content, "parts", None):
for part in event.content.parts:
text = getattr(part, "text", None)
func_call = getattr(part, "function_call", None)
if text:
print(f"Assistant: {text}")
final_text = text
elif func_call is not None:
name = getattr(func_call, "name", "<unknown>")
args = getattr(func_call, "args", {})
print(f"Function call: {name} args={args}")

print("Final text:", final_text)


if __name__ == "__main__":
asyncio.run(main())
Loading