Skip to content

Commit b1070a6

Browse files
committed
feat: Add MetricsPlugin to record agent metrics using OpenTelemetry
1 parent 6182981 commit b1070a6

2 files changed

Lines changed: 111 additions & 1 deletion

File tree

adk/agenticlayer/agent_to_a2a.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
from .callback_tracer_plugin import CallbackTracerPlugin
3333
from .config import McpTool, SubAgent
3434
from .constants import HTTP_HEADERS_SESSION_KEY
35+
from .metrics_plugin import MetricsPlugin
3536

3637
logger = logging.getLogger(__name__)
3738

@@ -116,7 +117,7 @@ async def create_runner() -> Runner:
116117
app=App(
117118
name=agent.name or "adk_agent",
118119
root_agent=agent,
119-
plugins=[CallbackTracerPlugin()],
120+
plugins=[CallbackTracerPlugin(), MetricsPlugin()],
120121
),
121122
artifact_service=InMemoryArtifactService(),
122123
session_service=InMemorySessionService(), # type: ignore[no-untyped-call]

adk/agenticlayer/metrics_plugin.py

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
"""
2+
A custom plugin that records agent metrics using OpenTelemetry.
3+
Tracks agent invocations, LLM calls, token usage, tool calls, and errors.
4+
"""
5+
6+
from typing import Any, Dict, Optional
7+
8+
from google.adk.agents import BaseAgent
9+
from google.adk.agents.callback_context import CallbackContext
10+
from google.adk.models.llm_request import LlmRequest
11+
from google.adk.models.llm_response import LlmResponse
12+
from google.adk.plugins.base_plugin import BasePlugin
13+
from google.adk.tools.base_tool import BaseTool
14+
from google.adk.tools.tool_context import ToolContext
15+
from google.genai import types
16+
from opentelemetry import metrics
17+
18+
_meter = metrics.get_meter("agenticlayer.agent")
19+
20+
21+
class MetricsPlugin(BasePlugin):
22+
"""A custom ADK plugin that records agent metrics using OpenTelemetry."""
23+
24+
def __init__(self) -> None:
25+
super().__init__("MetricsPlugin")
26+
self._agent_invocations = _meter.create_counter(
27+
"agent.invocations",
28+
unit="{invocation}",
29+
description="Number of agent invocations",
30+
)
31+
self._llm_calls = _meter.create_counter(
32+
"agent.llm.calls",
33+
unit="{call}",
34+
description="Number of LLM calls",
35+
)
36+
self._llm_input_tokens = _meter.create_histogram(
37+
"agent.llm.tokens.input",
38+
unit="{token}",
39+
description="Number of input tokens per LLM call",
40+
)
41+
self._llm_output_tokens = _meter.create_histogram(
42+
"agent.llm.tokens.output",
43+
unit="{token}",
44+
description="Number of output tokens per LLM call",
45+
)
46+
self._tool_calls = _meter.create_counter(
47+
"agent.tool.calls",
48+
unit="{call}",
49+
description="Number of tool calls",
50+
)
51+
self._agent_errors = _meter.create_counter(
52+
"agent.errors",
53+
unit="{error}",
54+
description="Number of agent errors",
55+
)
56+
57+
async def before_agent_callback(
58+
self, *, agent: BaseAgent, callback_context: CallbackContext
59+
) -> Optional[types.Content]:
60+
self._agent_invocations.add(1, {"agent_name": callback_context.agent_name})
61+
return None
62+
63+
async def after_model_callback(
64+
self, *, callback_context: CallbackContext, llm_response: LlmResponse
65+
) -> Optional[LlmResponse]:
66+
model = getattr(llm_response, "model", "unknown") or "unknown"
67+
attrs = {"agent_name": callback_context.agent_name, "model": model}
68+
self._llm_calls.add(1, attrs)
69+
usage = getattr(llm_response, "usage_metadata", None)
70+
if usage:
71+
prompt_tokens = getattr(usage, "prompt_token_count", 0) or 0
72+
candidates_tokens = getattr(usage, "candidates_token_count", 0) or 0
73+
if prompt_tokens:
74+
self._llm_input_tokens.record(prompt_tokens, attrs)
75+
if candidates_tokens:
76+
self._llm_output_tokens.record(candidates_tokens, attrs)
77+
return None
78+
79+
async def after_tool_callback(
80+
self,
81+
*,
82+
tool: BaseTool,
83+
tool_args: Dict[str, Any],
84+
tool_context: ToolContext,
85+
result: Dict[str, Any],
86+
) -> Optional[Dict[str, Any]]:
87+
self._tool_calls.add(1, {"agent_name": tool_context.agent_name, "tool_name": tool.name})
88+
return None
89+
90+
async def on_model_error_callback(
91+
self,
92+
*,
93+
callback_context: CallbackContext,
94+
llm_request: LlmRequest,
95+
error: Exception,
96+
) -> Optional[LlmResponse]:
97+
self._agent_errors.add(1, {"agent_name": callback_context.agent_name, "error_source": "model"})
98+
return None
99+
100+
async def on_tool_error_callback(
101+
self,
102+
*,
103+
tool: BaseTool,
104+
tool_args: Dict[str, Any],
105+
tool_context: ToolContext,
106+
error: Exception,
107+
) -> Optional[Dict[str, Any]]:
108+
self._agent_errors.add(1, {"agent_name": tool_context.agent_name, "error_source": "tool"})
109+
return None

0 commit comments

Comments
 (0)