Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions docs/how-to/tool-providers.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ This is an early release with an initial set of built-in providers. More provide
- Config: `TOOLS_PROMETHEUS_URL`, `TOOLS_PROMETHEUS_DISABLE_SSL`
- **Loki logs**: `redis_sre_agent.tools.logs.loki.provider.LokiToolProvider`
- Config: `TOOLS_LOKI_URL`, `TOOLS_LOKI_TENANT_ID`, `TOOLS_LOKI_TIMEOUT`
- **Redis CLI diagnostics**: `redis_sre_agent.tools.diagnostics.redis_cli.provider.RedisCliToolProvider`
- Runs Redis CLI commands against target instances
- **Redis command diagnostics**: `redis_sre_agent.tools.diagnostics.redis_command.provider.RedisCommandToolProvider`
- Runs Redis commands against target instances
- **Host telemetry**: `redis_sre_agent.tools.host_telemetry.provider.HostTelemetryToolProvider`
- System-level metrics and diagnostics

Expand All @@ -41,18 +41,22 @@ Implement a ToolProvider subclass that defines tool schemas and resolves calls.
### Minimal skeleton

```python
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List
from redis_sre_agent.tools.protocols import ToolProvider
from redis_sre_agent.tools.tool_definition import ToolDefinition
from redis_sre_agent.tools.models import ToolDefinition, ToolCapability


class MyMetricsProvider(ToolProvider):
provider_name = "my_metrics"
@property
def provider_name(self) -> str:
return "my_metrics"

def create_tool_schemas(self) -> List[ToolDefinition]:
return [
ToolDefinition(
name=self._make_tool_name("query"),
description="Query my metrics backend using a query string.",
capability=ToolCapability.METRICS,
parameters={
"type": "object",
"properties": {"query": {"type": "string"}},
Expand All @@ -61,17 +65,13 @@ class MyMetricsProvider(ToolProvider):
)
]

async def resolve_tool_call(self, tool_name: str, args: Dict[str, Any]):
op = self.resolve_operation(tool_name)
if op == "query":
return await self.query(**args)
raise ValueError(f"Unknown operation: {op}")

async def query(self, query: str) -> Dict[str, Any]:
# Implement your backend call
return {"status": "success", "query": query, "data": []}
```

The base class `tools()` method automatically wires tool names to provider methods. When an LLM invokes `my_metrics_{hash}_query`, the framework calls `self.query(**args)` directly. No manual `resolve_tool_call()` implementation is required.

### Register your provider

- Install your package into the same environment as the agent (e.g., `pip install -e /path/to/pkg`)
Expand Down
36 changes: 20 additions & 16 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,26 @@ dependencies = [
"opentelemetry-instrumentation-openai>=0.47.5",
]

[dependency-groups]
dev = [
"pytest>=7.0.0",
"pytest-asyncio>=0.21.0",
"pytest-cov>=4.1.0",
"ruff>=0.3.0",
"black>=23.0.0",
"mypy>=1.8.0",
"testcontainers>=3.7.0",
"pre-commit>=3.6.0",
"safety>=3.0.0",
"bandit>=1.7.0",
# OpenAPI client generator
"openapi-python-client>=0.21.0",
"mkdocs>=1.6.1",
"mkdocs-material>=9.6.22",
"mfcqi>=0.0.4",
]


[project.scripts]
redis-sre-agent = "redis_sre_agent.cli:main"

Expand All @@ -77,22 +97,6 @@ include = [

[tool.uv]
default-groups = []
Copy link

Copilot AI Dec 8, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The pyproject.toml change moves dev dependencies from tool.uv.dev-dependencies to dependency-groups.dev, but the tool.uv.default-groups = [] setting remains. This configuration means dev dependencies won't be installed by default with uv sync. Users would need to explicitly run uv sync --group dev to get development tools. This might be intentional but could surprise contributors.

Suggested change
default-groups = []
default-groups = ["dev"]

Copilot uses AI. Check for mistakes.
dev-dependencies = [
"pytest>=7.0.0",
"pytest-asyncio>=0.21.0",
"pytest-cov>=4.1.0",
"ruff>=0.3.0",
"black>=23.0.0",
"mypy>=1.8.0",
"testcontainers>=3.7.0",
"pre-commit>=3.6.0",
"safety>=3.0.0",
"bandit>=1.7.0",
# OpenAPI client generator
"openapi-python-client>=0.21.0",
"mkdocs>=1.6.1",
"mkdocs-material>=9.6.22",
]

[tool.pytest.ini_options]
testpaths = ["tests"]
Expand Down
50 changes: 25 additions & 25 deletions redis_sre_agent/agent/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,23 +85,23 @@ def sanitize_messages_for_llm(msgs: List[Any]) -> List[Any]:
for m in msgs:
if isinstance(m, _AI):
try:
for tc in getattr(m, "tool_calls", []) or []:
for tc in m.tool_calls or []:
if isinstance(tc, dict):
tid = tc.get("id") or tc.get("tool_call_id")
if tid:
seen_tool_ids.add(tid)
except Exception:
pass
clean.append(m)
elif isinstance(m, _TM) or getattr(m, "type", "") == "tool":
tid = getattr(m, "tool_call_id", None)
elif isinstance(m, _TM) or m.type == "tool":
tid = m.tool_call_id
if tid and tid in seen_tool_ids:
clean.append(m)
else:
continue
else:
clean.append(m)
while clean and (isinstance(clean[0], _TM) or getattr(clean[0], "type", "") == "tool"):
while clean and (isinstance(clean[0], _TM) or clean[0].type == "tool"):
clean = clean[1:]
return clean

Expand All @@ -122,26 +122,26 @@ def _compact_messages_tail(msgs: List[Any], limit: int = 6) -> List[Dict[str, An
tail = msgs[-limit:] if msgs else []
compact: List[Dict[str, Any]] = []
for m in tail:
role = getattr(m, "type", m.__class__.__name__.lower())
role = m.type if m.type else m.__class__.__name__.lower()
row: Dict[str, Any] = {"role": role}
try:
is_ai = (_AI is not None and isinstance(m, _AI)) or getattr(m, "type", "") in (
is_ai = (_AI is not None and isinstance(m, _AI)) or m.type in (
"ai",
"assistant",
)
if is_ai:
ids: List[str] = []
for tc in getattr(m, "tool_calls", []) or []:
for tc in m.tool_calls or []:
if isinstance(tc, dict):
tid = tc.get("id") or tc.get("tool_call_id")
if tid:
ids.append(tid)
if ids:
row["tool_calls"] = ids
is_tool = (_TM is not None and isinstance(m, _TM)) or getattr(m, "type", "") == "tool"
is_tool = (_TM is not None and isinstance(m, _TM)) or m.type == "tool"
if is_tool:
row["tool_call_id"] = getattr(m, "tool_call_id", None)
name = getattr(m, "name", None)
row["tool_call_id"] = m.tool_call_id
name = m.name
if name:
row["name"] = name
except Exception:
Expand Down Expand Up @@ -193,7 +193,7 @@ def build_result_envelope(

from .models import ResultEnvelope

content = getattr(tool_message, "content", None)
content = tool_message.content
data_obj = None
if isinstance(content, str) and content:
try:
Expand All @@ -210,9 +210,8 @@ def _extract_operation_from_tool_name(full: str) -> str:
parts = full.split(".")
return parts[-1] if parts else full

description = (
getattr(tooldefs_by_name.get(tool_name), "description", None) if tool_name else None
)
tdef = tooldefs_by_name.get(tool_name) if tool_name else None
description = tdef.description if tdef else None
env = ResultEnvelope(
tool_key=tool_name or "tool",
name=_extract_operation_from_tool_name(tool_name or "tool"),
Expand All @@ -224,13 +223,15 @@ def _extract_operation_from_tool_name(full: str) -> str:
return env.model_dump()


async def build_adapters_for_tooldefs(
tool_manager: Any, tooldefs: List[Any]
) -> tuple[list[dict], list[Any]]:
"""Create OpenAI tool schemas and LangChain StructuredTool adapters for ToolDefinitions.
async def build_adapters_for_tooldefs(tool_manager: Any, tooldefs: List[Any]) -> list[Any]:
"""Create LangChain StructuredTool adapters for ToolDefinitions.

Returns (tool_schemas, adapters)
Each adapter wraps :meth:`ToolManager.resolve_tool_call` so that tools can
be executed either via LangGraph's :class:`ToolNode` or directly via the
manager. The same adapters can also be passed to ``ChatOpenAI.bind_tools``
so we do not need to maintain separate OpenAI-specific tool schemas.
"""

try:
from typing import Any as _Any

Expand All @@ -241,7 +242,7 @@ async def build_adapters_for_tooldefs(
from pydantic import create_model as _create_model
except Exception:
# Best-effort fallback (should not happen in runtime)
return [], []
return []

def _args_model_from_parameters(tool_name: str, params: dict) -> type[_BaseModel]:
props = (params or {}).get("properties", {}) or {}
Expand All @@ -261,20 +262,19 @@ def _args_model_from_parameters(tool_name: str, params: dict) -> type[_BaseModel
pass
return args_model

tool_schemas: list[dict] = [t.to_openai_schema() for t in (tooldefs or [])]
adapters: list[_StructuredTool] = []
for tdef in tooldefs or []:

async def _exec_fn(_name=tdef.name, **kwargs):
return await tool_manager.resolve_tool_call(_name, kwargs or {})

ArgsModel = _args_model_from_parameters(tdef.name, getattr(tdef, "parameters", {}) or {}) # noqa: N806
args_model = _args_model_from_parameters(tdef.name, tdef.parameters or {})
adapters.append(
_StructuredTool.from_function(
coroutine=_exec_fn,
name=tdef.name,
description=getattr(tdef, "description", "") or "",
args_schema=ArgsModel,
description=tdef.description or "",
args_schema=args_model,
)
)
return tool_schemas, adapters
return adapters
Loading
Loading