Skip to content

Commit 4da60ee

Browse files
committed
Fix the Ruff CI Errors
1 parent 217fd32 commit 4da60ee

7 files changed

Lines changed: 69 additions & 28 deletions

File tree

README.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,11 @@
11
# RLM Code
22

3+
<p align="center">
4+
<a href="https://github.com/SuperagenticAI/rlm-code">
5+
<img src="https://raw.githubusercontent.com/SuperagenticAI/rlm-code/main/assets/rlm-code-logo.png" alt="RLM Code logo" width="300">
6+
</a>
7+
</p>
8+
39
**Run LLM-powered agents in a REPL loop, benchmark them, and compare results.**
410

511
RLM Code implements the [Recursive Language Models](https://arxiv.org/abs/2502.07503) (RLM) approach from the 2025 paper release. Instead of stuffing your entire document into the LLM's context window, RLM stores it as a Python variable and lets the LLM write code to analyze it — chunk by chunk, iteration by iteration. This is dramatically more token-efficient for large inputs.

rlm_code/models/llm_connector.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -814,13 +814,9 @@ def _generate_ollama(
814814
if detail:
815815
lowered = detail.lower()
816816
if "not found" in lowered or "model" in lowered and "pull" in lowered:
817-
guidance = (
818-
f". Verify model name '{self.current_model}' and run `ollama pull {self.current_model}`"
819-
)
817+
guidance = f". Verify model name '{self.current_model}' and run `ollama pull {self.current_model}`"
820818
elif "insufficient" in lowered or "memory" in lowered:
821-
guidance = (
822-
". Model may be too large for local memory; try a smaller model or increase resources"
823-
)
819+
guidance = ". Model may be too large for local memory; try a smaller model or increase resources"
824820
raise ModelError(f"Ollama generation failed: {detail or exc}{guidance}") from exc
825821

826822
except Exception as e:

rlm_code/rlm/frameworks/google_adk_adapter.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from __future__ import annotations
66

77
import asyncio
8+
import sys
89
from dataclasses import dataclass
910
from typing import Any
1011

@@ -21,6 +22,14 @@ class GoogleADKFrameworkAdapter:
2122
reference_impl: str = "google.adk (installed package)"
2223

2324
def doctor(self) -> tuple[bool, str]:
25+
cached = sys.modules.get("google.adk")
26+
if cached is not None:
27+
return (True, "google-adk available")
28+
if "google.adk" in sys.modules and cached is None:
29+
return (
30+
False,
31+
"google-adk not installed. Install with: pip install 'rlm-code[adk]'",
32+
)
2433
try:
2534
import google.adk # noqa: F401
2635
except Exception:

rlm_code/ui/__init__.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,21 @@
66

77
from .animations import ThinkingAnimation, get_random_thinking_message
88
from .prompts import get_user_input
9-
from .tui_app import run_textual_tui
109
from .welcome import show_welcome_screen
1110

11+
12+
def run_textual_tui(*args, **kwargs):
13+
"""
14+
Lazily import and launch the Textual TUI.
15+
16+
Keeps non-TUI imports (e.g., utility modules/tests) working even when the
17+
optional ``textual`` extra is not installed.
18+
"""
19+
from .tui_app import run_textual_tui as _run_textual_tui
20+
21+
return _run_textual_tui(*args, **kwargs)
22+
23+
1224
__all__ = [
1325
"ThinkingAnimation",
1426
"get_random_thinking_message",

rlm_code/ui/pty_terminal.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,8 @@
2929
# ---- ANSI escape code stripper (for plain-text fallback) ----
3030

3131
_ANSI_RE = re.compile(r"\x1b\[[0-9;]*[A-Za-z]|\x1b\].*?\x07|\x1b\[.*?[a-zA-Z]")
32+
33+
3234
def strip_ansi(text: str) -> str:
3335
"""Remove ANSI escape sequences from text."""
3436
return _ANSI_RE.sub("", text)
@@ -706,7 +708,16 @@ def on_key(self, event: events.Key) -> None:
706708
elif event.key == "delete":
707709
# Keep behavior simple and conservative for status preview.
708710
self._update_status()
709-
elif event.key in {"up", "down", "left", "right", "home", "end", "page_up", "page_down"}:
711+
elif event.key in {
712+
"up",
713+
"down",
714+
"left",
715+
"right",
716+
"home",
717+
"end",
718+
"page_up",
719+
"page_down",
720+
}:
710721
# Cursor/history edits are hard to mirror exactly; reset preview to avoid stale text.
711722
self._typed_buffer = ""
712723
self._update_status()

rlm_code/ui/tui_app.py

Lines changed: 25 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1316,7 +1316,9 @@ def _resolve_connection_mode(self) -> str:
13161316

13171317
def _render_status_snapshot(self, *, title: str = "Status Snapshot") -> None:
13181318
connected = bool(self.connector.current_model)
1319-
model = self.connector.current_model_id or self.connector.current_model or "disconnected"
1319+
model = (
1320+
self.connector.current_model_id or self.connector.current_model or "disconnected"
1321+
)
13201322
provider = self.connector.model_type or "-"
13211323
mode = self._resolve_connection_mode()
13221324
route = str(self._last_response_route or "direct-llm").strip().lower() or "direct-llm"
@@ -1357,7 +1359,7 @@ def _render_status_snapshot(self, *, title: str = "Status Snapshot") -> None:
13571359
if connected:
13581360
tips.append("/models", style=f"bold {PALETTE.info}")
13591361
tips.append(" ", style=PALETTE.text_dim)
1360-
tips.append("/rlm run \"task\" steps=4", style=f"bold {PALETTE.success}")
1362+
tips.append('/rlm run "task" steps=4', style=f"bold {PALETTE.success}")
13611363
tips.append(" ", style=PALETTE.text_dim)
13621364
tips.append("/connect", style=f"bold {PALETTE.warning}")
13631365
else:
@@ -1386,7 +1388,7 @@ def _render_connection_success(self, provider: str, model: str) -> None:
13861388
steps.append("Ready: ", style=f"bold {PALETTE.success}")
13871389
steps.append("/status", style=f"bold {PALETTE.info}")
13881390
steps.append(" ", style=PALETTE.text_dim)
1389-
steps.append("/rlm run \"your task\" steps=6", style=f"bold {PALETTE.success}")
1391+
steps.append('/rlm run "your task" steps=6', style=f"bold {PALETTE.success}")
13901392
steps.append(" ", style=PALETTE.text_dim)
13911393
steps.append("/models", style=f"bold {PALETTE.info}")
13921394

@@ -1444,7 +1446,9 @@ def _render_rlm_run_started(self, command: str) -> None:
14441446
)
14451447
)
14461448
try:
1447-
summary = self._cached_research_summary or self.query_one("#research_summary", Static)
1449+
summary = self._cached_research_summary or self.query_one(
1450+
"#research_summary", Static
1451+
)
14481452
summary.update(
14491453
"[yellow]Run started...[/yellow] waiting for runtime events. "
14501454
"Open [cyan]Research Lab -> Events[/cyan] for live logs."
@@ -1499,7 +1503,9 @@ def _update_research_live_from_event(self, name: str, payload: dict[str, Any]) -
14991503
except Exception:
15001504
pass
15011505
try:
1502-
state["reward"] = float(payload.get("total_reward", state.get("reward", 0.0)) or 0.0)
1506+
state["reward"] = float(
1507+
payload.get("total_reward", state.get("reward", 0.0)) or 0.0
1508+
)
15031509
except Exception:
15041510
pass
15051511
else:
@@ -1535,7 +1541,9 @@ def _update_research_live_from_event(self, name: str, payload: dict[str, Any]) -
15351541
pass
15361542

15371543
try:
1538-
summary = self._cached_research_summary or self.query_one("#research_summary", Static)
1544+
summary = self._cached_research_summary or self.query_one(
1545+
"#research_summary", Static
1546+
)
15391547
summary.update(
15401548
f"{status_text} | Reward: [bold]{reward:.3f}[/bold] | "
15411549
f"Steps: {steps} | Run: [dim]{run_id}[/dim]{task_line}"
@@ -1581,8 +1589,12 @@ def _render_rlm_run_summary_from_context(self) -> None:
15811589
body.add_column()
15821590
body.add_row(lines)
15831591
if final_response:
1584-
preview = final_response if len(final_response) <= 320 else f"{final_response[:317]}..."
1585-
body.add_row(Panel(preview, title="Final Response (preview)", border_style="#3b82f6"))
1592+
preview = (
1593+
final_response if len(final_response) <= 320 else f"{final_response[:317]}..."
1594+
)
1595+
body.add_row(
1596+
Panel(preview, title="Final Response (preview)", border_style="#3b82f6")
1597+
)
15861598
body.add_row(tail)
15871599

15881600
self._chat_log().write(
@@ -2197,9 +2209,7 @@ def _set_command_running(self, command: str) -> None:
21972209
status.append(f"Running {short}", style=PALETTE.text_secondary)
21982210
self._thinking_status().update(status)
21992211

2200-
def _render_slash_footer(
2201-
self, command: str, *, handled: bool, error: str | None
2202-
) -> None:
2212+
def _render_slash_footer(self, command: str, *, handled: bool, error: str | None) -> None:
22032213
started = self._active_slash_started_at
22042214
elapsed = None
22052215
if started is not None:
@@ -3390,7 +3400,9 @@ def _handle_rlm_abort_fast(self, args: list[str]) -> bool:
33903400
f"[yellow]Requested cancellation for run '{run_id}'.[/yellow]"
33913401
)
33923402
else:
3393-
self._chat_log().write("[yellow]Requested cancellation for all active runs.[/yellow]")
3403+
self._chat_log().write(
3404+
"[yellow]Requested cancellation for all active runs.[/yellow]"
3405+
)
33943406

33953407
if active_runs:
33963408
joined = ", ".join(str(item) for item in active_runs)
@@ -3400,9 +3412,7 @@ def _handle_rlm_abort_fast(self, args: list[str]) -> bool:
34003412

34013413
if pending:
34023414
joined = ", ".join(str(item) for item in pending)
3403-
self._chat_log().write(
3404-
f"[dim]Pending run-specific cancellations:[/dim] {joined}"
3405-
)
3415+
self._chat_log().write(f"[dim]Pending run-specific cancellations:[/dim] {joined}")
34063416

34073417
return True
34083418

tests/test_rlm_runner.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -265,8 +265,7 @@ def test_rlm_status_and_events(tmp_path):
265265
def test_rlm_run_task_can_be_cancelled(tmp_path):
266266
connector = _FakeConnector(
267267
responses=[
268-
'{"action":"run_python","code":"print(\\"tick\\")","done":false}'
269-
for _ in range(200)
268+
'{"action":"run_python","code":"print(\\"tick\\")","done":false}' for _ in range(200)
270269
]
271270
)
272271
engine = _SlowExecutionEngine(delay_seconds=0.03)
@@ -297,9 +296,7 @@ def test_rlm_run_task_can_be_cancelled(tmp_path):
297296

298297

299298
def test_rlm_request_cancel_all_without_active_runs_does_not_latch(tmp_path):
300-
connector = _FakeConnector(
301-
responses=['{"action":"final","done":true,"final_response":"done"}']
302-
)
299+
connector = _FakeConnector(responses=['{"action":"final","done":true,"final_response":"done"}'])
303300
engine = _FakeExecutionEngine()
304301
runner = RLMRunner(llm_connector=connector, execution_engine=engine, run_dir=tmp_path)
305302

0 commit comments

Comments
 (0)