Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions packages/sdk/server-ai/src/ldai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
ProviderConfig,
)
from ldai.providers import (
AgentGraphResult,
AgentGraphRunner,
AgentGraphRunnerResult,
GraphMetrics,
Expand All @@ -50,7 +49,6 @@
'LDAIClient',
'Evaluator',
'AgentGraphRunner',
'AgentGraphResult',
'AgentGraphRunnerResult',
'GraphMetrics',
'GraphMetricSummary',
Expand Down
26 changes: 8 additions & 18 deletions packages/sdk/server-ai/src/ldai/managed_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from typing import List

from ldai import log
from ldai.models import AICompletionConfig, LDMessage
from ldai.models import AICompletionConfig
from ldai.providers.runner import Runner
from ldai.providers.types import JudgeResult, ManagedResult, RunnerResult
from ldai.tracker import LDAIConfigTracker
Expand All @@ -12,9 +12,10 @@ class ManagedModel:
"""
LaunchDarkly managed wrapper for AI model invocations.

Holds a Runner. Handles conversation management, judge evaluation
dispatch, and tracking automatically via ``create_tracker()``.
Obtain an instance via ``LDAIClient.create_model()``.
Holds a Runner. Handles judge evaluation dispatch and tracking
automatically via ``create_tracker()``. Conversation history is
managed by the runner. Obtain an instance via
``LDAIClient.create_model()``.
"""

def __init__(
Expand All @@ -24,37 +25,26 @@ def __init__(
):
self._ai_config = ai_config
self._model_runner = model_runner
self._messages: List[LDMessage] = []

async def run(self, prompt: str) -> ManagedResult:
"""
Run the model with a prompt string.

Appends the prompt to the conversation history, prepends any
system messages from the config, delegates to the runner, and
appends the response to the history.
Delegates to the runner, then dispatches judge evaluations and
records tracking metrics.

:param prompt: The user prompt to send to the model
:return: ManagedResult containing the model's response, metric summary,
and an optional evaluations task
"""
tracker = self._ai_config.create_tracker()

user_message = LDMessage(role='user', content=prompt)
self._messages.append(user_message)

result: RunnerResult = await tracker.track_metrics_of_async(
lambda r: r.metrics,
lambda: self._model_runner.run(prompt),
)

assistant_message = LDMessage(role='assistant', content=result.content)

input_text = '\r\n'.join(m.content for m in self._messages) if self._messages else ''

evaluations_task = self._track_judge_results(tracker, input_text, result.content)

self._messages.append(assistant_message)
evaluations_task = self._track_judge_results(tracker, prompt, result.content)

return ManagedResult(
content=result.content,
Expand Down
2 changes: 0 additions & 2 deletions packages/sdk/server-ai/src/ldai/providers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from ldai.providers.runner import Runner
from ldai.providers.runner_factory import RunnerFactory
from ldai.providers.types import (
AgentGraphResult,
AgentGraphRunnerResult,
GraphMetrics,
GraphMetricSummary,
Expand All @@ -17,7 +16,6 @@

__all__ = [
'AIProvider',
'AgentGraphResult',
'AgentGraphRunner',
'AgentGraphRunnerResult',
'GraphMetrics',
Expand Down
16 changes: 0 additions & 16 deletions packages/sdk/server-ai/src/ldai/providers/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,19 +205,3 @@ def to_dict(self) -> Dict[str, Any]:
result['errorMessage'] = self.error_message
return result


@dataclass
class AgentGraphResult:
"""Contains the result of an agent graph run."""

output: str
"""The agent graph's final output content."""

raw: Any
"""The provider-native response object from the graph run."""

metrics: LDAIMetrics
"""Metrics recorded during the graph run."""

evaluations: Optional[List[JudgeResult]] = None
"""Optional list of judge evaluation results produced for the graph run."""
2 changes: 0 additions & 2 deletions packages/sdk/server-ai/tests/test_runner_abcs.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import pytest

from ldai.providers import (
AgentGraphResult,
AgentGraphRunner,
AgentGraphRunnerResult,
ToolRegistry,
Expand Down Expand Up @@ -78,6 +77,5 @@ def test_tool_registry_is_dict_of_callables():
def test_top_level_exports():
import ldai
assert hasattr(ldai, 'AgentGraphRunner')
assert hasattr(ldai, 'AgentGraphResult')
assert hasattr(ldai, 'RunnerResult')
assert hasattr(ldai, 'ToolRegistry')
Loading