Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions src/google/adk/apps/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,12 @@ class EventsCompactionConfig(BaseModel):
end of the last compacted range. This creates an overlap between consecutive
compacted summaries, maintaining context."""

token_threshold: Optional[int] = None
"""Token count that triggers compaction when exceeded."""

retain_recent_events: Optional[int] = None
"""Number of the most recent events to retain during compaction."""


class App(BaseModel):
"""Represents an LLM-backed agentic application.
Expand Down
3 changes: 2 additions & 1 deletion src/google/adk/apps/compaction.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,8 @@ async def _run_compaction_for_sliding_window(

compaction_event = (
await app.events_compaction_config.summarizer.maybe_summarize_events(
events=events_to_compact
events=events_to_compact,
config=app.events_compaction_config
)
)
if compaction_event:
Expand Down
15 changes: 14 additions & 1 deletion src/google/adk/apps/llm_event_summarizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,19 +81,32 @@ def _format_events_for_prompt(self, events: list[Event]) -> str:
return '\\n'.join(formatted_history)

async def maybe_summarize_events(
self, *, events: list[Event]
self, *, events: list[Event], config: EventsCompactionConfig
) -> Optional[Event]:
"""Compacts given events and returns the compacted content.

Args:
events: A list of events to compact.
config: The configuration for event compaction.

Returns:
The new compacted event, or None if no compaction is needed.
"""
if not events:
return None

# Placeholder for token counting logic
def count_tokens(events: list[Event]) -> int:
return sum(len(event.content.parts) for event in events if event.content)
Comment on lines +99 to +100
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The count_tokens function is currently a placeholder that sums the number of parts in an event's content. This is a very rough approximation and does not accurately reflect the actual token count that an LLM would use. For the token_threshold feature to be effective and reliable, this logic needs to be replaced with a proper tokenization mechanism, ideally one that is specific to the LLM being used or a widely accepted tokenizer (e.g., tiktoken). An inaccurate token count could lead to either premature compaction or exceeding the LLM's context window, impacting performance and context retention.

Suggested change
def count_tokens(events: list[Event]) -> int:
return sum(len(event.content.parts) for event in events if event.content)
def count_tokens(events: list[Event]) -> int:
# TODO(developer): Replace this placeholder with actual LLM-specific token counting logic.
# For example, using a tokenizer from the LLM or a library like tiktoken.
return sum(len(event.content.parts) for event in events if event.content)


token_count = count_tokens(events)

if config.token_threshold and token_count <= config.token_threshold:
return None

if config.retain_recent_events:
events = events[-config.retain_recent_events:]

conversation_history = self._format_events_for_prompt(events)
prompt = self._prompt_template.format(
conversation_history=conversation_history
Expand Down
9 changes: 9 additions & 0 deletions tests/integration/test_multi_turn.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,15 @@ async def test_dependent_tool_calls():
)


@pytest.mark.asyncio
async def test_context_compaction_with_thresholds():
"""Test context compaction using token threshold and retaining recent events."""
await AgentEvaluator.evaluate(
agent_module="tests.integration.fixture.compaction_evaluation_agent",
eval_dataset_file_path_or_dir="tests/integration/fixture/compaction_evaluation_agent/test_files/compaction_with_thresholds.test.json",
num_runs=4,
)

@pytest.mark.asyncio
async def test_memorizing_past_events():
"""Test memorizing past events."""
Expand Down