From 2c4a63b096dcb63f1a2f6aa4c57494c4679b0de0 Mon Sep 17 00:00:00 2001 From: DannyP39 <98972125+Danipiza@users.noreply.github.com> Date: Tue, 4 Nov 2025 12:53:09 +0100 Subject: [PATCH 01/40] [#23892] Added textual dependencies Signed-off-by: danipiza --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 27e9482..4313e6c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,6 +21,9 @@ dependencies = [ "prompt_toolkit==3.0.52", "rich==14.1.0", "sentence-transformers==5.1.0", + textual==6.5.0, + textual-dev==1.8.0, + textual-serve==1.1.3, ] [project.optional-dependencies] From 63e52ca4a13458fea01f7d819ba4e704765b95e7 Mon Sep 17 00:00:00 2001 From: danipiza Date: Wed, 12 Nov 2025 14:31:08 +0100 Subject: [PATCH 02/40] [#23892] Added Textual terminal initial commit Signed-off-by: danipiza --- src/vulcanai/console/console.py | 817 ++++++++++++++++++++++++---- src/vulcanai/core/agent.py | 18 +- src/vulcanai/core/executor.py | 36 +- src/vulcanai/core/manager.py | 255 +++++++++ src/vulcanai/tools/tool_registry.py | 20 +- 5 files changed, 1011 insertions(+), 135 deletions(-) diff --git a/src/vulcanai/console/console.py b/src/vulcanai/console/console.py index 6450548..11eaed1 100644 --- a/src/vulcanai/console/console.py +++ b/src/vulcanai/console/console.py @@ -1,3 +1,5 @@ +from __future__ import annotations + # Copyright 2025 Proyectos y Sistemas de Mantenimiento SL (eProsima). # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,6 +24,27 @@ from vulcanai.console.logger import console +from textual.app import App, ComposeResult +from textual.widgets import Input, Log, Static +from textual import events + + +from rich.text import Text +from rich.markup import escape + +from textual.containers import VerticalScroll + +import asyncio, time +from typing import Callable + + +class Prompt(Static): + """A tiny prompt label shown to the left of the input.""" + + def __init__(self, text: str = "> "): + super().__init__(text, id="prompt") + + class SpinnerHook(IModelHooks): """ Single entrant spinner controller for console. @@ -34,6 +57,8 @@ def __init__(self, console): def on_request_start(self) -> None: with self._lock: + #self.console.call_from_thread(self.console.show_spinner) + # TODO. danip add spinner in textualize terminal # First request => create spinner self._progress = Progress( SpinnerColumn(spinner_name="dots2"), @@ -46,6 +71,7 @@ def on_request_start(self) -> None: def on_request_end(self) -> None: with self._lock: + #self.console.call_from_thread(self.console.hide_spinner) if self._progress is not None: # Last request finished => stop spinner try: @@ -58,8 +84,86 @@ def on_request_end(self) -> None: self._task_id = None -class VulcanConsole: - def __init__(self, model: str = "gpt-5-nano", k: int = 7, iterative: bool = False): +class VulcanConsole(App): + + # Terminal style + """CSS = + Screen { + layout: vertical; + } + + # Header at top, then the log grows, then input row at bottom + # A small prompt sits to the left of the input for terminal vibes + + # Output area + # Make it fill remaining space and look terminal-ish + # Log already has a good style; just ensure it expands + # and wraps nicely. + Log#log { + height: 1fr; + border: solid rgb(0, 205, 0); + background: $boost; + padding: 1 2; + overflow-y: auto; + } + + .input-row { + height: auto; + layout: horizontal; + padding: 0 1; + dock: bottom; + background: $panel; + } + + # Prompt label + # Slightly dim to look like a shell prompt + # Input stretches to fill the row + # The Input gets a monospace look by default under Textual + + # Prompt label style + # (Using Static for a simple label avoids extra dependencies.) + + # Make the input larger for comfortable typing + Input#cmd { + width: 1fr; + padding: 0 1; + } + + Static#prompt { + width: auto; + color: $text-muted; + content-align: left middle; + } + + Static#hint { + height: auto; + color: $text-muted; + padding: 0 2; + } + """ + + CSS = """ + #log { height: 1fr; } + #cmd { dock: bottom; } + """ + + """BINDINGS = [ + ("ctrl+q", "app_quit", "Quit"), + ("ctrl+p", "clear", "Clear"), + ("f1", "help", "Help"), + ("ctrl+alt+c", "copy", "Copy Selection"), # new: mirrors terminal habit + ]""" + + BINDINGS = [ + ("ctrl+c", "copy_to_clipboard", "Copy log to clipboard"), + ("y", "copy_log", "Copy log to clipboard"), + #("ctrl+l", "clear", "Clear the terminal"), + ] + + def __init__(self, tools_from_entrypoints: str = "", user_context: str = "", main_node = None, + model: str = "gpt-5-nano", k: int = 7, iterative: bool = False): + super().__init__() # textual lib + self.manager = None self.session = PromptSession() self.last_plan = None @@ -69,7 +173,58 @@ def __init__(self, model: str = "gpt-5-nano", k: int = 7, iterative: bool = Fals self.model = model self.k = k - self.init_manager(iterative) + self.iterative = iterative + self.tools_from_entrypoints = tools_from_entrypoints + self.user_context = user_context + self.main_node = main_node + + self.commands = None + self._tab_matches = [] + self._tab_index = 0 + self._log_lines = [] + + # terminal qol + self.history = [] + + async def on_mount(self) -> None: + + self._set_input_enabled(False) + + self._log("Starting VulcanAI...", log_color=1) + + await asyncio.sleep(0) + asyncio.create_task(self._bootstrap()) + + """self.cmd_input.focus()""" + + """ + # Disable commands until we finish boot + #self._set_input_enabled(False) + + self.init _manager(self.iterative) + + # command registry: name -> handler + self.commands: Dict[str, Callable[[List[str]], None]] = { + "/help": self.cmd_help, + #"help": self.cmd_help, + #"h": self.cmd_help, + "/tools": self.cmd_tools, + "/change_k": self.cmd_change_k, + "/history": self.cmd_history_index, + "/show_history": self.cmd_show_history, + "/plan": self.cmd_plan, + "/rerun": self.cmd_rerun, + "/bb": self.cmd_blackboard_state, + "/clear": self.cmd_clear, + #"clear": self.cmd_clear, + "/exit": self.cmd_quit, + #"q": self.cmd_quit, + #"exit": self.cmd_quit, + } + + # cycling through tab matches + self._tab_matches = [] + self._tab_index = 0 # Override hooks with spinner controller try: @@ -77,11 +232,552 @@ def __init__(self, model: str = "gpt-5-nano", k: int = 7, iterative: bool = Fals except Exception: pass - def run(self): - self.print("VulcanAI Interactive Console") - self.print("Type 'exit' to quit.\n") + if self.tools_from_entrypoints != "": + self.manager.register_tools_from_entry_points(self.tools_from_entrypoints) + + self.manager.add_user_context(self.user_context) + + # Add the shared node to the console manager blackboard to be used by tools + if self.main_node != None: + self.manager.bb["main_node"] = self.main_node""" + + def compose(self) -> ComposeResult: + """yield Header(show_clock=True) + #yield Log(id="term", highlight=True)#, markup=True) + yield Log(id="log", highlight=True) + yield Static("Press [b]F1[/b] for help • [b]Ctrl+C[/b] to quit • [b]Tab[/b] to autocomplete", id="hint") + with Static(classes="input-row"): + yield Prompt("> ") + yield Input(placeholder="Type a command and press Enter…", id="cmd") + yield Footer()""" + + """yield Static("", id="log") + yield Input(placeholder="> ", id="cmd")""" + + with VerticalScroll(id="logview"): + yield Static("", id="logcontent") + yield Input(placeholder="> ", id="cmd") + + async def _bootstrap(self) -> None: + """ + Function used to print information in runtime execution of a function + + TODO. danip update this function to allow queries information and add the spinner + """ + def worker(log_cb: Callable[[str], None]) -> None: + self.init_manager(log_cb) + + # add the commands + # command registry: name -> handler + self.commands = { + "/help": self.cmd_help, + #"help": self.cmd_help, + #"h": self.cmd_help, + "/tools": self.cmd_tools, + "/change_k": self.cmd_change_k, + "/history": self.cmd_history_index, + "/show_history": self.cmd_show_history, + "/plan": self.cmd_plan, + "/rerun": self.cmd_rerun, + "/bb": self.cmd_blackboard_state, + "/clear": self.cmd_clear, + #"clear": self.cmd_clear, + "/exit": self.cmd_quit, + #"q": self.cmd_quit, + #"exit": self.cmd_quit, + } + + #log_cb("Added commands.") + + # cycling through tab matches + self._tab_matches = [] + self._tab_index = 0 + + # Override hooks with spinner controller + try: + self.manager.llm.set_hooks(self.hooks) + except Exception: + pass + + #log_cb("Added hooks.") + + if self.tools_from_entrypoints != "": + self.manager.register_tools_from_entry_points(self.tools_from_entrypoints) + + #log_cb("Added tools.") + + self.manager.add_user_context(self.user_context) + + #log_cb("Added user_context.") + + # Add the shared node to the console manager blackboard to be used by tools + if self.main_node != None: + self.manager.bb["main_node"] = self.main_node + + def log_cb(msg: str) -> None: + """ + Print the msg while executing a function + """ + self.call_from_thread(self._log, msg) + + loop = asyncio.get_running_loop() + await loop.run_in_executor(None, lambda: worker(log_cb)) + + # TODO. danip. add the queries information + if True: + self._is_ready = True + self._set_input_enabled(True) + self._log("VulcanAI Interactive Console", log_color=2) + self._log("Type 'exit' to quit.\n", log_color=2) + + # region Utilities - while True: + """@property + def term(self): + return self.query_one("#log", Log) + + @property + def cmd_input(self): + return self.query_one("#cmd", Input)#, id="term") + + def print_system(self, message: str): + #self.term.write(f"[bold cyan]system[/]: {message}") + self.term.write(f"{message}") + + #self.term.write(Text.from_markup(f"[{green_hex}]system[/]: {escape(message)}")) + #self.term.write(Text.from_markup(f"{escape(message)}")) + + def print_output(self, message: str): + self.term.write(message)""" + + # endregion + + # region Commands + + def cmd_help(self, _) -> None: + table = "\n".join( + [ + "Available commands:\n" + "/help - Show this help message\n" + "/tools - List available tools\n" + "/change_k - Change the 'k' value for the top_k algorithm selection or show the current value if no is provided\n" + "/history - Change the history depth or show the current value if no is provided\n" + "/show_history - Show the current history\n" + "/plan - Show the last generated plan\n" + "/rerun - Rerun the last plan\n" + "/bb - Show the last blackboard state\n" + "/clear - Clear the console screen\n" + "/exit - Exit the console\n" + "Query any other text to process it with the LLM and execute the plan generated.\n" + "Add --image= to include images in the query. It can be used multiple times to add more images.\n" + "Example: ' --image=/path/to/image1 --image=/path/to/image2'" + ] + ) + self._log(table, log_color=2) + + def cmd_tools(self, _) -> None: + help_msg = f"Available tools (current index k={self.manager.k}):\n" + for tool in self.manager.registry.tools.values(): + help_msg += f"- {tool.name}: {tool.description}\n" + self._log(help_msg, log_color=2) + + def cmd_change_k(self, args) -> None: + if len(args) == 0: + self._log(f"Current 'k' is {self.manager.k}", log_color=2) + return + if len(args) != 1 or not args[0].isdigit(): + self._log(f"Usage: /change_k - Actual 'k' is {self.manager.k}", + log_type="error", log_color=2) + return + + new_k = int(args[0]) + self.manager.k = new_k + self.manager.update_k_index(new_k) + + def cmd_history_index(self, args) -> None: + if len(args) == 0: + self._log(f"Current 'history depth' is {self.manager.history_depth}", + log_color=2) + return + if len(args) != 1 or not args[0].isdigit(): + self._log(f"Usage: /history - Actual 'history depth' is {self.manager.history_depth}", + log_type="error", log_color=2) + return + + new_hist = int(args[0]) + self.manager.update_history_depth(new_hist) + + def cmd_show_history(self, _) -> None: + if not self.manager.history: + self._log("No history available.", log_color=2) + return + + help_msg = "\nCurrent history (oldest first):\n" + for i, (user_text, plan_summary) in enumerate(self.manager.history): + help_msg += f"{i+1}. User: {user_text}\n Plan summary: {plan_summary}\n" + + self._log(help_msg, log_color=2) + + def cmd_plan(self, _) -> None: + if self.last_plan: + self._log("Last generated plan:", log_color=2) + self._log(self.last_plan, log_color=2) + else: + self._log("No plan has been generated yet.", log_color=2) + + def cmd_rerun(self, _) -> None: + if self.last_plan: + self._log("Rerunning last plan...", log_color=2) + result = self.manager.executor.run(self.last_plan, self.manager.bb) + self.last_bb = result.get("blackboard", None) + self._log(f"Output of rerun: {result.get('blackboard', {None})}", log_color=2) + else: + self._log("No plan to rerun.", log_color=2) + + def cmd_blackboard_state(self, _) -> None: + if self.last_bb: + self._log("Last blackboard state:", log_color=2) + self._log(self.last_bb, log_color=2) + else: + self._log("No blackboard available.", log_color=2) + + def cmd_clear(self, _) -> None: + self._log_lines.clear() + self.query_one("#logcontent", Static).update("") + + def cmd_quit(self, _) -> None: + self.exit() + + def cmd_echo(self, args) -> None: + self._log(" ".join(args)) + + # endregion + + # region Logging + + def _render_log(self) -> None: + """self.query_one("#log", Static).update("\n".join(self._log_lines))""" + log_static = self.query_one("#logcontent", Static) + log_static.update("\n".join(self._log_lines)) + + self.query_one("#logview", VerticalScroll).scroll_end(animate=False) + + """def _render_log(self): + log = self.query_one("#log", Static) + # Allow Rich markup for colors and styles + log.update("\n".join(self._log_lines), markup=True)""" + + def _log(self, line: str, log_type: str = "", log_color: int = -1) -> None: + msg = "" + + color_type = "" + + if log_type == "register": + msg = "[bold cyan]\[REGISTRY][/bold cyan] " + elif log_type == "manager": + msg = "[bold blue]\[MANAGER][/bold blue] " + elif log_type == "executor": + msg = "[bold light green]\[EXECUTOR][/bold light green] " + elif log_type == "validator": + msg = "[bold orange_red1]\[VALIDATOR][/bold orange_red1] " + elif log_type == "error": + msg = "[bold red]\[ERROR][/bold red] " + + if log_color == 0: + color_type = "#FF0000" + elif log_color == 1: + color_type = "#4E9A06" + elif log_color == 2: + color_type = "#8F6296" + elif log_color == 3: + color_type = "#C49C00" + elif log_color == 4: + color_type = "#069899" + else: + color_type = "#FFFFFF" + + msg += f"[{color_type}]{line}[/{color_type}]" + self._log_lines.append(msg) + self._render_log() + + """def _log(self, line: str): + if "error" in line.lower(): + line = f"[red]{line}[/red]" + elif "warn" in line.lower(): + line = f"[yellow]{line}[/yellow]" + elif "success" in line.lower(): + line = f"[green]{line}[/green]" + self._log_lines.append(line) + self._render_log()""" + + # endregion + + # region Input + + def _set_input_enabled(self, enabled: bool) -> None: + cmd = self.query_one("#cmd", Input) + cmd.disabled = not enabled + if enabled: + self.set_focus(cmd) + + async def on_input_submitted(self, event: Input.Submitted) -> None: + """ + Enter key + """ + + if not self._is_ready: + return + cmd = event.value.strip() + if not cmd: + return + + try: + if event.input.id != "cmd": + return + user_input = (event.value or "").strip() + + # the the user_input in the history navigation list (used when the up, down keys are pressed) + self.history.append(user_input) + event.input.value = "" + event.input.focus() + + # reset tab state + self._tab_matches = [] + self._tab_index = 0 + + if not user_input: + self.cmd_input.focus() + return + + # Terminal history inputs navigation + self._history_index = None + + # echo what the user typed (keep this if you like the prompt arrow) + self._log(f"\[USER] >>> {cmd}") + + # If it doesn't start with '/', just print it as output and stop here + if user_input.startswith("/"): + self.handle_command(user_input) + return + + # Check for image input. Must be always at the end of the input + images = [] + if "--image=" in user_input: + images = self.get_images(user_input) + + # Handle user request + try: + result = self.manager.handle_user_request(user_input, context={"images": images}) + except Exception as e: + #self.print(f"[error]Error handling request:[/error] {e}") + self._log(f"[error]Error handling request:[/error] {e}") + return + + self.last_plan = result.get("plan", None) + self.last_bb = result.get("blackboard", None) + + #self.print(f"Output of plan: {result.get('blackboard', {None})}") + self._log(f"Output of plan: {result.get('blackboard', {None})}") + + except KeyboardInterrupt: + #console.print("[yellow]Exiting...[/yellow]") + self._log("[yellow]Exiting...[/yellow]") + return + except EOFError: + #console.print("[yellow]Exiting...[/yellow]") + self._log("[yellow]Exiting...[/yellow]") + return + + """ + self.print_output(user_input) + event.input.value = "" + event.input.focus() + return + """ + + def handle_command(self, user_input: str) -> None: + # Otherwise, parse as a command + parts = user_input.split() + cmd = parts[0].lower() + args = parts[1:] + + handler = self.commands.get(cmd) + if handler is None: + # only complain for slash-commands + self._log(f"Unknown command: [b]{cmd}[/]. Type '/help'.", log_color=2) + else: + try: + handler(args) + except Exception as e: + self._log(f"Error: {e!r}", log_color=0) + + async def on_key(self, event: events.Key) -> None: + """Handle Up/Down for history navigation.""" + key = event.key + cmd_input = self.query_one("#cmd", Input) + + + if key in ("up", "down"): + # Only handle history navigation if input is focused + if self.focused is not cmd_input: + return + + if not self.history: + return + + # Initialize history index if not already set + if not hasattr(self, "_history_index") or self._history_index is None: + self._history_index = len(self.history) + + if key == "up" and self._history_index > 0: + self._history_index -= 1 + elif key == "down" and self._history_index < len(self.history): + self._history_index += 1 + else: + return # ignore if out of range + + # Update input value based on history + if 0 <= self._history_index < len(self.history): + cmd_input.value = self.history[self._history_index] + else: + cmd_input.value = "" + + # Move cursor to end + cmd_input.cursor_position = len(cmd_input.value) + event.stop() + return + + # AUTOCOMPLETE: Tab + if key == "tab": + # Current text (don’t strip right-side spaces; keep user’s spacing) + raw = cmd_input.value or "" + # Leading spaces are not part of the command token + left = len(raw) - len(raw.lstrip()) + value = raw[left:] + + if len(value) <= 0: + return + + # Split into head (first token) and the remainder + head, *rest = value.split(maxsplit=1) + remainder = rest[0] if rest else "" + + # Nothing typed yet: list all commands + all_cmds = sorted(self.commands) if self.commands else [] + if not all_cmds: + return + + self._tab_matches = [c for c in all_cmds if c.startswith(head)] if head else all_cmds + self._tab_index = 0 + + matches = self._tab_matches + if not matches: + cmd_input.focus() + event.prevent_default() + event.stop() + return + + # If multiple matches, check for a longer common prefix to insert first + if len(matches) > 1: + prefix = self._common_prefix(matches) + new_value = prefix + else: + # Single match: complete directly + new_value = matches[0] + + # Rebuild the input value: + cmd_input.value = new_value + cmd_input.cursor_position = len(cmd_input.value) + + cmd_input.focus() # keep caret in the input + event.prevent_default() + event.stop() + return + + if key == "ctrl+w": + value = cmd_input.value + cursor = cmd_input.cursor_position + i = cursor-1 + + while i > 0: + if(value[i] == ' '): + break + i -= 1 + + cmd_input.value = value[:i] + value[cursor:] + cmd_input.cursor_position = i + + cmd_input.focus() # keep caret in the input + event.prevent_default() + event.stop() + return + + if key in ("ctrl+delete", "escape") : + value = cmd_input.value + cursor = cmd_input.cursor_position + i = cursor-1 + n = len(value) + + while i < n: + if(value[i] == ' '): # TODO danip. mirar cuando tiene que borrar espacios en blanco + break + i += 1 + + cmd_input.value = value[:cursor] + value[i:] + cmd_input.cursor_position = i + + cmd_input.focus() # keep caret in the input + event.prevent_default() + event.stop() + return + + # Any other keypress resets tab cycle if the prefix changes + if len(key) == 1 or key in ("backspace", "delete"): + self._tab_matches = [] + self._tab_index = 0 + + def _common_prefix(self, strings: str) -> str: + if not strings: + return "" + + common_prefix = strings[0] + commands = strings[0] + + for i in range(1, len(strings)): + commands += f" {strings[i]}" + + tmp = "" + n = min(len(common_prefix), len(strings[i])) + j = 0 + + while j < n: + if common_prefix[j] != strings[i][j]: + break + tmp += common_prefix[j] + + j += 1 + + if j < n: + common_prefix = tmp + + self._log(commands, log_color=2) + + return common_prefix + + # endregion + + # region Actions (key bindings) + + + # endregion + + def run_console(self) -> None: + #self.print("VulcanAI Interactive Console") + #self.print("Type 'exit' to quit.\n") + + self.run() + + """while True: try: user_input = self.session.prompt("[USER] >>> ") if user_input.strip().lower() in ("exit", "quit"): @@ -114,107 +810,27 @@ def run(self): break except EOFError: console.print("[yellow]Exiting...[/yellow]") - break + break""" - def init_manager(self, iterative: bool = False): - if iterative: + def init_manager(self, log_cb: Callable[[str], None]) -> None: + if self.iterative: from vulcanai.core.manager_iterator import IterativeManager as ConsoleManager else: from vulcanai.core.manager_plan import PlanManager as ConsoleManager - console.print(f"[console]Initializing Manager '{ConsoleManager.__name__}'...[/console]") - self.manager = ConsoleManager(model=self.model, k=self.k) - self.print(f"Manager initialized with model '{self.model}'.") - - def handle_command(self, cmd: str): - """Process internal console commands.""" - if cmd == "/help": - help_msg = ( - "Available commands:\n" - "/help - Show this help message\n" - "/tools - List available tools\n" - "/change_k - Change the 'k' value for the top_k algorithm selection\n" - "/history - Change the history depth or show the current value if no is provided\n" - "/show_history - Show the current history\n" - "/plan - Show the last generated plan\n" - "/rerun - Rerun the last plan\n" - "/bb - Show the last blackboard state\n" - "/clear - Clear the console screen\n" - "exit - Exit the console\n" - "Query any other text to process it with the LLM and execute the plan generated." - "Add --image= to include images in the query. It can be used multiple times to add more images." - " Example: ' --image=/path/to/image1 --image=/path/to/image2'" - ) - self.print(help_msg) - - elif cmd == "/tools": - help_msg = f"\nAvailable tools (current index k={self.manager.k}):\n" - for tool in self.manager.registry.tools.values(): - help_msg += f"- {tool.name}: {tool.description}\n" - self.print(help_msg) - - elif cmd.startswith("/change_k"): - parts = cmd.split() - if len(parts) != 2 or not parts[1].isdigit(): - self.print(f"[error]Usage: /change_k [/error] - Actual k is {self.manager.k}") - return - new_k = int(parts[1]) - self.manager.k = new_k - self.print(f"Changed k to {new_k}") - - elif cmd.startswith("/history"): - parts = cmd.split() - if len(parts) == 1: - self.print(f"Current history depth is {self.manager.history_depth}") - return - if len(parts) != 2 or not parts[1].isdigit(): - self.print(f"[error]Usage: /history [/error] - Actual history depth is {self.manager.history_depth}") - return - new_hist = int(parts[1]) - self.manager.update_history_depth(new_hist) - elif cmd == "/show_history": - if not self.manager.history: - self.print("No history available.") - return - help_msg = "\nCurrent history (oldest first):\n" - for i, (user_text, plan_summary) in enumerate(self.manager.history): - help_msg += f"{i+1}. User: {user_text}\n Plan summary: {plan_summary}\n" - self.print(help_msg) - - elif cmd == "/plan": - if self.last_plan: - self.print("Last generated plan:") - console.print(self.last_plan) - else: - self.print("No plan has been generated yet.") - - elif cmd == "/rerun": - if self.last_plan: - self.print("Rerunning last plan...") - result = self.manager.executor.run(self.last_plan, self.manager.bb) - self.last_bb = result.get("blackboard", None) - self.print(f"Output of rerun: {result.get('blackboard', {None})}") - else: - self.print("No plan to rerun.") + # TODO. use log_cb to print the log information + #console.print(f"[console]Initializing Manager '{ConsoleManager.__name__}'...[/console]") + self._log(f"Initializing Manager '{ConsoleManager.__name__}'...", log_color=2) - elif cmd == "/bb": - if self.last_bb: - self.print("Last blackboard state:") - console.print(self.last_bb) - else: - self.print("No blackboard available.") - - elif cmd == "/clear": - os.system("clear") - - else: - self.print(f"[error]Unknown command {cmd}[/error]") + self.manager = ConsoleManager(model=self.model, k=self.k, logger=self._log) + #self.print(f"Manager initialized with model '{self.model}'.") + self._log(f"Manager initialized with model '{self.model}'", log_color=2) - def print(self, msg: str): + def print(self, msg: str) -> None: console.print(f"[console]{msg}[/console]") - def get_images(self, user_input: str): + def get_images(self, user_input: str) -> None: parts = user_input.split() images = [] @@ -224,7 +840,7 @@ def get_images(self, user_input: str): return images -def main(): +def main() -> None: parser = argparse.ArgumentParser(description="VulcanAI Interactive Console") parser.add_argument( "--model", type=str, default="gpt-5-nano", @@ -247,6 +863,7 @@ def main(): help="Enable Iterative Manager (default: off)" ) + # TODO. change args = parser.parse_args() console = VulcanConsole(model=args.model, k=args.k, iterative=args.iterative) if args.register_from_file: @@ -255,7 +872,7 @@ def main(): if args.register_from_entry_point: for entry_point in args.register_from_entry_point: console.manager.register_tools_from_entry_points(entry_point) - console.run() + console.run_console() if __name__ == "__main__": diff --git a/src/vulcanai/core/agent.py b/src/vulcanai/core/agent.py index 4abf08c..205ddff 100644 --- a/src/vulcanai/core/agent.py +++ b/src/vulcanai/core/agent.py @@ -29,10 +29,10 @@ class Agent: def __init__(self, model_name: str, logger=None): self.brand, name = self._detect_brand(model_name) self.model = None - self.logger = logger or VulcanAILogger.log_manager + self.logger = logger #or VulcanAILogger.log_manager self._load_model(name) - def inference_plan( + def inference( self, system_context: str, user_prompt: str, @@ -93,7 +93,7 @@ def inference_validation( history: list[tuple[str, str]], ) -> AIValidation: """ - Perform inference using the selected LLM model to generate a validation. + Perform inference using the selected LLM model to generate a goal. :param system_context: The system prompt or context for the LLM. :param user_prompt: The user's input or request. @@ -133,17 +133,17 @@ def _detect_brand(self, model_name: str) -> tuple[Brand, str]: def _load_model(self, model_name: str): if self.brand == Brand.gpt: from vulcanai.models.openai import OpenAIModel - self.logger(f"Using OpenAI API with model: {model_name}") + self.logger(f"Using OpenAI API with model: {model_name}", log_type="manager") self.model = OpenAIModel(model_name, self.logger) elif self.brand == Brand.gemini: from vulcanai.models.gemini import GeminiModel - self.logger(f"Using Gemini API with model: {model_name}") + self.logger(f"Using Gemini API with model: {model_name}", log_type="manager") self.model = GeminiModel(model_name, self.logger) elif self.brand == Brand.ollama: from vulcanai.models.ollama_model import OllamaModel - self.logger(f"Using Ollama API with model: {model_name}") + self.logger(f"Using Ollama API with model: {model_name}", log_type="manager") self.model = OllamaModel(model_name, self.logger) else: @@ -154,8 +154,8 @@ def set_hooks(self, hooks) -> None: if self.model: try: self.model.hooks = hooks - self.logger("LLM hooks set.") + self.logger("LLM hooks set.", log_type="manager") except Exception as e: - self.logger(f"Failed to set LLM hooks: {e}", error=True) + self.logger(f"Failed to set LLM hooks: {e}", log_type="manager", log_color=0) # error else: - self.logger("LLM model not initialized, cannot set hooks.", error=True) + self.logger("LLM model not initialized, cannot set hooks.", log_type="manager", log_color=0) # error diff --git a/src/vulcanai/core/executor.py b/src/vulcanai/core/executor.py index d473ee0..9507cbd 100644 --- a/src/vulcanai/core/executor.py +++ b/src/vulcanai/core/executor.py @@ -59,7 +59,7 @@ class PlanExecutor: def __init__(self, registry, logger=None): self.registry = registry - self.logger = logger or VulcanAILogger.log_executor + self.logger = logger #or VulcanAILogger.log_executor def run(self, plan: GlobalPlan, bb: Blackboard) -> Dict[str, Any]: """ @@ -80,19 +80,19 @@ def _run_plan_node(self, node: PlanBase, bb: Blackboard) -> bool: """Run a PlanNode with execution control parameters.""" # Evaluate PlanNode-level condition if node.condition and not self.safe_eval(node.condition, bb): - self.logger(f"Skipping PlanNode {node.kind} due to not fulfilled condition={node.condition}") + self.logger(f"Skipping PlanNode {node.kind} due to not fulfilled condition={node.condition}", log_type="executor") return True attempts = node.retry + 1 if node.retry else 1 for i in range(attempts): ok = self._execute_plan_node_with_timeout(node, bb) if ok and self._check_success(node, bb): - self.logger(f"PlanNode {node.kind} succeeded on attempt {i+1}/{attempts}") + self.logger(f"PlanNode {node.kind} succeeded on attempt {i+1}/{attempts}", log_type="executor") return True - self.logger(f"PlanNode {node.kind} failed on attempt {i+1}/{attempts}", error=True) + self.logger(f"PlanNode {node.kind} failed on attempt {i+1}/{attempts}", log_type="executor", log_color=0) # error if node.on_fail: - self.logger(f"Executing on_fail branch for PlanNode {node.kind}") + self.logger(f"Executing on_fail branch for PlanNode {node.kind}", log_type="executor") # Execute the on_fail branch but ignore its result and return False self._run_plan_node(node.on_fail, bb) @@ -106,7 +106,7 @@ def _execute_plan_node_with_timeout(self, node: PlanBase, bb: Blackboard) -> boo future = executor.submit(self._execute_plan_node, node, bb) return future.result(timeout=node.timeout_ms / 1000.0) except concurrent.futures.TimeoutError: - self.logger(f"PlanNode {node.kind} timed out after {node.timeout_ms} ms") + self.logger(f"PlanNode {node.kind} timed out after {node.timeout_ms} ms", log_type="executor") return False else: return self._execute_plan_node(node, bb) @@ -126,13 +126,13 @@ def _execute_plan_node(self, node: PlanBase, bb: Blackboard) -> bool: return all(results) # Pydantic should have validated this already - self.logger(f"Unknown PlanNode kind {node.kind}, skipping", error=True) + self.logger(f"Unknown PlanNode kind {node.kind}, skipping", log_type="executor", log_color=0)# error return True def _run_step(self, step: Step, bb: Blackboard, parallel: bool = False) -> bool: # Evaluate Step-level condition if step.condition and not self.safe_eval(step.condition, bb): - self.logger(f"Skipping step [italic]'{step.tool}'[/italic] due to condition={step.condition}") + self.logger(f"Skipping step [italic]'{step.tool}'[/italic] due to condition={step.condition}", log_type="executor") return True # Bind args with blackboard placeholders @@ -150,7 +150,7 @@ def _run_step(self, step: Step, bb: Blackboard, parallel: bool = False) -> bool: if ok and self._check_success(step, bb, is_step=True): return True else: - self.logger(f"Step [italic]'{step.tool}'[/italic] attempt {i+1}/{attempts} failed") + self.logger(f"Step [italic]'{step.tool}'[/italic] attempt {i+1}/{attempts} failed", log_type="executor") return False @@ -161,10 +161,10 @@ def _check_success(self, entity: Step | PlanBase, bb: Blackboard, is_step: bool return True log_value = entity.tool if is_step else entity.kind if self.safe_eval(entity.success_criteria, bb): - self.logger(f"Entity '{log_value}' succeeded with criteria={entity.success_criteria}") + self.logger(f"Entity '{log_value}' succeeded with criteria={entity.success_criteria}", log_type="executor") return True else: - self.logger(f"Entity '{log_value}' failed with criteria={entity.success_criteria}") + self.logger(f"Entity '{log_value}' failed with criteria={entity.success_criteria}", log_type="executor") return False def safe_eval(self, expr: str, bb: Blackboard) -> bool: @@ -192,7 +192,7 @@ def _make_bb_subs(self, expr: str, bb: Blackboard) -> str: expr = expr.replace(f"{{{{{match}}}}}", str(val)) return expr except Exception as e: - self.logger(f"Blackboard substitution failed: {expr} ({e})", error=True) + self.logger(f"Blackboard substitution failed: {expr} ({e})", log_type="executor", log_color=0)# error return expr def _bind_args(self, args: List[ArgValue], schema: List[Tuple[str, str]], bb: Blackboard) -> List[ArgValue]: @@ -243,7 +243,7 @@ def _call_tool(self, """Invoke a registered tool.""" tool = self.registry.tools.get(tool_name) if not tool: - self.logger(f"Tool [italic]'{tool_name}'[/italic] not found", error=True) + self.logger(f"Tool [italic]'{tool_name}'[/italic] not found", log_type="executor", log_color=0) # error return False, None # Convert args list to dict @@ -251,7 +251,7 @@ def _call_tool(self, tool.bb = bb start = time.time() - self.logger(f"Invoking [italic]'{tool_name}'[/italic] with args: [italic]'{arg_dict}'[/italic]") + self.logger(f"Invoking [italic]'{tool_name}'[/italic] with args: [italic]'{arg_dict}'[/italic]", log_type="executor") tool_log = "" try: if timeout_ms: @@ -274,13 +274,13 @@ def _call_tool(self, result = tool.run(**arg_dict) tool_log = buff.getvalue().strip() if tool_log: - self.logger(f"{tool_log}", tool=True, tool_name=tool_name) + self.logger(f"{tool_log}: {tool_name} TODO. danip", log_type="executor")#, tool_name=tool_name) # TODO danip elapsed = (time.time() - start) * 1000 - self.logger(f"Executed [italic]'{tool_name}'[/italic] in {elapsed:.1f} ms with result: {result}") + self.logger(f"Executed [italic]'{tool_name}'[/italic] in {elapsed:.1f} ms with result: {result}", log_type="executor") return True, result except concurrent.futures.TimeoutError: - self.logger(f"Execution of [italic]'{tool_name}'[/italic] timed out after {timeout_ms} ms") + self.logger(f"Execution of [italic]'{tool_name}'[/italic] timed out after {timeout_ms} ms", log_type="executor") return False, None except Exception as e: - self.logger(f"Execution failed for [italic]'{tool_name}'[/italic]: {e}") + self.logger(f"Execution failed for [italic]'{tool_name}'[/italic]: {e}", log_type="executor") return False, None diff --git a/src/vulcanai/core/manager.py b/src/vulcanai/core/manager.py index 128335a..631bd77 100644 --- a/src/vulcanai/core/manager.py +++ b/src/vulcanai/core/manager.py @@ -14,6 +14,23 @@ from typing import Any, Dict, Optional, Tuple +from vulcanai.console.logger import VulcanAILogger +from vulcanai.core.executor import Blackboard, PlanExecutoa# Copyright 2025 Proyectos y Sistemas de Mantenimiento SL (eProsima). +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, Optional, Tuple + from vulcanai.console.logger import VulcanAILogger from vulcanai.core.executor import Blackboard, PlanExecutor from vulcanai.core.agent import Agent @@ -22,6 +39,244 @@ from vulcanai.tools.tool_registry import ToolRegistry +class ToolManager: + """Manages the LLM Agent and calls the executor with the LLM output.""" + def __init__( + self, + model: str, + registry: Optional[ToolRegistry]=None, + validator: Optional[PlanValidator]=None, + k: int=10, + hist_depth: int = 3, + logger=None + ): + self.logger = logger #or VulcanAILogger.log_manager + self.llm = Agent(model, self.logger) + self.k = k + self.registry = registry or ToolRegistry(logger=(logger))# or VulcanAILogger.log_registry)) + self.validator = validator + self.executor = PlanExecutor(self.registry, logger=(logger))# or VulcanAILogger.log_executor)) + self.bb = Blackboard() + self.user_context = "" + # History is saved as a list of Tuples of user requests and plan summaries + self.history = [] + # How many previous interactions to include in the prompt as history + self.history_depth = hist_depth + + def register_tool(self, tool, solve_deps: bool = True): + """ + Wrapper for registering a single tool. + + :param tool: The tool class (ITool) to register. + """ + self.registry.register_tool(tool, solve_deps=solve_deps) + + def register_tools_from_file(self, path: str): + """ + Wrapper for discovering tools from a file. + + :param path: The absolute path to the file containing tool definitions. + """ + self.registry.discover_tools_from_file(path) + + def register_tools_from_entry_points(self, group: str = "custom_tools"): + """ + Wrapper for discovering tools from entry points. + + :param group: The entry point group name. Default is "custom_tools". + """ + self.registry.discover_tools_from_entry_points(group) + + def add_user_context(self, context: str): + """ + Add additional context to be included in the prompt. + + :param context: The context string to add. + """ + self.user_context = context + + def handle_user_request(self, user_text: str, context: Dict[str, Any]) -> Dict[str, Any]: + """ + Given a natural language request, ask LLM to generate a plan. + Then execute it via PlanExecutor. + + :param user_text: The user request in natural language. + :param context: Additional context that may help the LLM to choose the best tool. + :return: A dictionary with the execution result, including the plan used and the final blackboard state. + """ + try: + # Get plan from LLM + plan = self.get_plan_from_user_request(user_text, context) + if not plan: + return {"error": "No plan generated by LLM."} + # Validate plan if validator is available + if self.validator: + try: + self.validator.validate(plan) + except Exception as e: + self.logger(f"Plan validation error: {e}", log_type="validator") # error + raise e + # Execute plan + ret = self.execute_plan(plan) + except Exception as e: + self.logger(f"Error handling user request: {e}", log_type="manager", log_color=0) # error + ret = {"error": str(e)} + + return ret + + def get_plan_from_user_request(self, user_text: str, context: Dict[str, Any] = None) -> GlobalPlan: + """ + Given a natural language request, ask LLM to generate a plan. + + :param user_text: The user request in natural language. + :param context: Additional context that may help the LLM to choose the best tool. + :return: A dictionary with the execution result, including the plan used and the final blackboard state. + :raises Exception: If there is an error during LLM inference. + """ + # Build prompt with available tools + system_prompt, user_prompt = self._build_prompt(user_text, context) + + if not system_prompt or not user_prompt: + return {} + + images = [] + if context and "images" in context: + # Images should be a list of paths + images = context["images"] + + # Query LLM + plan = self.llm.inference(system_prompt, user_prompt, images, self.history) + self.logger(f"Plan received:\n{plan}", log_type="manager") + # Save to history + if plan: + self._add_to_history(user_prompt, plan.summary) + return plan + + def execute_plan(self, plan: GlobalPlan) -> Dict[str, Any]: + """ + Execute a given plan via PlanExecutor. + + :param plan: The plan to execute. + :return: A dictionary with the execution result, including the final blackboard state. + """ + result = self.executor.run(plan, self.bb) + return {"plan": plan, **result} + + def _parse_user_context(self) -> str: + """ + Parse the user context into a string format suitable for the prompt. + + :param context: The user context in string format. + :return: A formatted string representing the user context. + """ + if not self.user_context: + return "" + return f"\n## User context:\n{self.user_context}\n" + + def _build_prompt(self, user_text: str, ctx: Dict[str, Any]) -> Tuple[str, str]: + """ + Create a simple prompt listing available tools and asking for one. + The prompt is divided into 'system' and 'user' parts, which will be handled by the LLM agent + to create the most appropriate prompt for the specific LLM. + """ + tools = self.registry.top_k(user_text, self.k) + if not tools: + self.logger("No tools available in the registry.", log_type="manager", log_color=0) # error + return "", "" + tool_descriptions = [] + for tool in tools: + tool_descriptions.append( + f"- *{tool.name}*: {tool.description}\n" + f" Inputs: {tool.input_schema}\n" + f" Outputs: {tool.output_schema}\n" + ) + tools_text = "\n".join(tool_descriptions) + user_context = self._parse_user_context() + user_prompt = "User request:\n" + user_text + + return self._get_prompt_template().format(tools_text=tools_text, user_context=user_context), user_prompt + + def _add_to_history(self, user_text: str, plan_summary: str): + """Add a new interaction to the history and trim if necessary.""" + self.history.append((user_text, plan_summary)) + # Keep only the last `history_depth` interactions + if len(self.history) > self.history_depth: + if self.history_depth <= 0: + self.history = [] + else: + self.history = self.history[-self.history_depth:] + + def update_history_depth(self, new_depth: int): + """ + Update the history depth and trim the history if necessary. + + :param new_depth: The new history depth. + """ + self.history_depth = max(0, int(new_depth)) + self.logger(f"Updated history depth to {new_depth}", log_type="manager", log_color=2) + if len(self.history) > self.history_depth: + if self.history_depth <= 0: + self.history = [] + else: + self.history = self.history[-self.history_depth:] + + def update_k_index(self, new_k: int): + """ + Update the k index. + + :param new_k: The new k index. + """ + self.k = max(1, int(new_k)) + self.logger(f"Updated k index to {new_k}", log_type="manager", log_color=2) + + def _get_prompt_template(self) -> str: + template = """ +You are a planner assistant controlling a robotic system. +Your job is to take a user request and generate a valid execution plan, containing only ONE step. +Be sure to understand the text received and select the best action command from the available options. +{user_context} +## Available tools: +{tools_text} + +## Plan format: + +plan = GlobalPlan( + plan=[ + PlanNode( + kind="SEQUENCE | PARALLEL", + steps=[ + Step(tool="tool_name", args=[ArgValue(key="arg_name", val="value or {{{{bb.tool.key}}}}"), ...]), + ], + ## Optional execution control parameters: + condition="Python expression to evaluate before executing this PlanNode", + success_criteria="Python expression to determine if this PlanNode succeeded", + timeout_ms=0, + retry=0, + on_fail=PlanNode( + kind="SEQUENCE | PARALLEL", + steps=[ + Step(tool="tool_name", args=[ArgValue(key="arg_name", val="value or {{{{bb.tool.key}}}}"), ...]), + ], + ), + ) + ], +) + +Use "{{{{bb.tool.key}}}}" to reference the output of a previous step. +For example, if tool 'detect_object' outputs {{"pose": [1.0, 2.0]}}, you can pass it to navigate as: +"args": {{"target": "{{{{bb.detect_object.pose}}}}"}} + +Choose the most appropriate tool and arguments to satisfy the request. +Add only optional execution control parameters if strictly necessary or requested by the user. +""" + return template +r +from vulcanai.core.agent import Agent +from vulcanai.core.plan_types import GlobalPlan +from vulcanai.core.validator import PlanValidator +from vulcanai.tools.tool_registry import ToolRegistry + + class ToolManager: """Manages the LLM Agent and calls the executor with the LLM output.""" def __init__( diff --git a/src/vulcanai/tools/tool_registry.py b/src/vulcanai/tools/tool_registry.py index 40dc0a9..11cfafb 100644 --- a/src/vulcanai/tools/tool_registry.py +++ b/src/vulcanai/tools/tool_registry.py @@ -75,7 +75,7 @@ class ToolRegistry: """Holds all known tools and performs vector search over metadata.""" def __init__(self, embedder=None, logger=None): # Logging function - self.logger = logger or VulcanAILogger.log_registry + self.logger = logger #or VulcanAILogger.log_registry # Dictionary of tool name -> tool instance self.tools: Dict[str, ITool] = {} # Embedding model for tool metadata @@ -100,7 +100,7 @@ def register_tool(self, tool: ITool, solve_deps: bool = True): self.validation_tools.append(tool.name) emb = self.embedder.embed(self._doc(tool)) self._index.append((tool.name, emb)) - self.logger(f"Registered tool: {tool.name}") + self.logger(f"Registered tool: {tool.name}", log_type="register") self.help_tool.available_tools = self.tools if solve_deps: # Get class of tool @@ -129,7 +129,8 @@ def _resolve_dependencies(self, tool: CompositeTool): for dep_name in tool.dependencies: dep_tool = self.tools.get(dep_name) if dep_tool is None: - self.logger(f"Dependency '{dep_name}' for tool '{tool.name}' not found.", error=True) + self.logger(f"Dependency '{dep_name}' for tool '{tool.name}' not found.", + log_type="register", log_color=0) # error else: tool.resolved_deps[dep_name] = dep_tool @@ -148,8 +149,8 @@ def _load_tools_from_file(self, path: str): spec.loader.exec_module(module) self._loaded_modules.append(module) except Exception as e: - self.logger(f"Error loading tools from {path}: {e}", error=True) - + self.logger(f"Error loading tools from {path}: {e}", + log_type="register", log_color=0) # error def discover_tools_from_file(self, path: str): """Load tools from a Python file and register them.""" self._load_tools_from_file(path) @@ -172,7 +173,8 @@ def discover_ros(self): def top_k(self, query: str, k: int = 5, validation: bool = False) -> list[ITool]: """Return top-k tools most relevant to the query.""" if not self._index: - self.logger("No tools registered.", error=True) + self.logger("No tools registered.", + log_type="register", log_color=0) # error return [] # Filter tools based on validation flag @@ -186,7 +188,8 @@ def top_k(self, query: str, k: int = 5, validation: bool = False) -> list[ITool] if not active_names: # If there is no tool for the requested category, be explicit and return [] self.logger( - f"No matching tools for the requested mode ({'validation' if validation else 'action'}).", error=True + f"No matching tools for the requested mode ({'validation' if validation else 'action'}).", + log_type="register", log_color=0 # error ) return [] @@ -197,7 +200,8 @@ def top_k(self, query: str, k: int = 5, validation: bool = False) -> list[ITool] filtered_index = [(name, vec) for (name, vec) in self._index if name in active_names] if not filtered_index: # Index might be stale; log and return [] - self.logger("Index has no entries for the selected tool subset.", error=True) + self.logger("Index has no entries for the selected tool subset.", + log_type="register", log_color=0) # error return [] # If k > number of required tools, return required tools From a89ef9e1095530feddc30aeb08e2a6681cd84f75 Mon Sep 17 00:00:00 2001 From: danipiza Date: Wed, 12 Nov 2025 14:38:42 +0100 Subject: [PATCH 03/40] [#23892] Solved copy errors in manager.py Signed-off-by: danipiza --- src/vulcanai/core/agent.py | 4 +- src/vulcanai/core/manager.py | 255 ----------------------------------- 2 files changed, 2 insertions(+), 257 deletions(-) diff --git a/src/vulcanai/core/agent.py b/src/vulcanai/core/agent.py index 205ddff..d71829a 100644 --- a/src/vulcanai/core/agent.py +++ b/src/vulcanai/core/agent.py @@ -32,7 +32,7 @@ def __init__(self, model_name: str, logger=None): self.logger = logger #or VulcanAILogger.log_manager self._load_model(name) - def inference( + def inference_plan( self, system_context: str, user_prompt: str, @@ -93,7 +93,7 @@ def inference_validation( history: list[tuple[str, str]], ) -> AIValidation: """ - Perform inference using the selected LLM model to generate a goal. + Perform inference using the selected LLM model to generate a validation. :param system_context: The system prompt or context for the LLM. :param user_prompt: The user's input or request. diff --git a/src/vulcanai/core/manager.py b/src/vulcanai/core/manager.py index 631bd77..128335a 100644 --- a/src/vulcanai/core/manager.py +++ b/src/vulcanai/core/manager.py @@ -14,23 +14,6 @@ from typing import Any, Dict, Optional, Tuple -from vulcanai.console.logger import VulcanAILogger -from vulcanai.core.executor import Blackboard, PlanExecutoa# Copyright 2025 Proyectos y Sistemas de Mantenimiento SL (eProsima). -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Any, Dict, Optional, Tuple - from vulcanai.console.logger import VulcanAILogger from vulcanai.core.executor import Blackboard, PlanExecutor from vulcanai.core.agent import Agent @@ -39,244 +22,6 @@ from vulcanai.tools.tool_registry import ToolRegistry -class ToolManager: - """Manages the LLM Agent and calls the executor with the LLM output.""" - def __init__( - self, - model: str, - registry: Optional[ToolRegistry]=None, - validator: Optional[PlanValidator]=None, - k: int=10, - hist_depth: int = 3, - logger=None - ): - self.logger = logger #or VulcanAILogger.log_manager - self.llm = Agent(model, self.logger) - self.k = k - self.registry = registry or ToolRegistry(logger=(logger))# or VulcanAILogger.log_registry)) - self.validator = validator - self.executor = PlanExecutor(self.registry, logger=(logger))# or VulcanAILogger.log_executor)) - self.bb = Blackboard() - self.user_context = "" - # History is saved as a list of Tuples of user requests and plan summaries - self.history = [] - # How many previous interactions to include in the prompt as history - self.history_depth = hist_depth - - def register_tool(self, tool, solve_deps: bool = True): - """ - Wrapper for registering a single tool. - - :param tool: The tool class (ITool) to register. - """ - self.registry.register_tool(tool, solve_deps=solve_deps) - - def register_tools_from_file(self, path: str): - """ - Wrapper for discovering tools from a file. - - :param path: The absolute path to the file containing tool definitions. - """ - self.registry.discover_tools_from_file(path) - - def register_tools_from_entry_points(self, group: str = "custom_tools"): - """ - Wrapper for discovering tools from entry points. - - :param group: The entry point group name. Default is "custom_tools". - """ - self.registry.discover_tools_from_entry_points(group) - - def add_user_context(self, context: str): - """ - Add additional context to be included in the prompt. - - :param context: The context string to add. - """ - self.user_context = context - - def handle_user_request(self, user_text: str, context: Dict[str, Any]) -> Dict[str, Any]: - """ - Given a natural language request, ask LLM to generate a plan. - Then execute it via PlanExecutor. - - :param user_text: The user request in natural language. - :param context: Additional context that may help the LLM to choose the best tool. - :return: A dictionary with the execution result, including the plan used and the final blackboard state. - """ - try: - # Get plan from LLM - plan = self.get_plan_from_user_request(user_text, context) - if not plan: - return {"error": "No plan generated by LLM."} - # Validate plan if validator is available - if self.validator: - try: - self.validator.validate(plan) - except Exception as e: - self.logger(f"Plan validation error: {e}", log_type="validator") # error - raise e - # Execute plan - ret = self.execute_plan(plan) - except Exception as e: - self.logger(f"Error handling user request: {e}", log_type="manager", log_color=0) # error - ret = {"error": str(e)} - - return ret - - def get_plan_from_user_request(self, user_text: str, context: Dict[str, Any] = None) -> GlobalPlan: - """ - Given a natural language request, ask LLM to generate a plan. - - :param user_text: The user request in natural language. - :param context: Additional context that may help the LLM to choose the best tool. - :return: A dictionary with the execution result, including the plan used and the final blackboard state. - :raises Exception: If there is an error during LLM inference. - """ - # Build prompt with available tools - system_prompt, user_prompt = self._build_prompt(user_text, context) - - if not system_prompt or not user_prompt: - return {} - - images = [] - if context and "images" in context: - # Images should be a list of paths - images = context["images"] - - # Query LLM - plan = self.llm.inference(system_prompt, user_prompt, images, self.history) - self.logger(f"Plan received:\n{plan}", log_type="manager") - # Save to history - if plan: - self._add_to_history(user_prompt, plan.summary) - return plan - - def execute_plan(self, plan: GlobalPlan) -> Dict[str, Any]: - """ - Execute a given plan via PlanExecutor. - - :param plan: The plan to execute. - :return: A dictionary with the execution result, including the final blackboard state. - """ - result = self.executor.run(plan, self.bb) - return {"plan": plan, **result} - - def _parse_user_context(self) -> str: - """ - Parse the user context into a string format suitable for the prompt. - - :param context: The user context in string format. - :return: A formatted string representing the user context. - """ - if not self.user_context: - return "" - return f"\n## User context:\n{self.user_context}\n" - - def _build_prompt(self, user_text: str, ctx: Dict[str, Any]) -> Tuple[str, str]: - """ - Create a simple prompt listing available tools and asking for one. - The prompt is divided into 'system' and 'user' parts, which will be handled by the LLM agent - to create the most appropriate prompt for the specific LLM. - """ - tools = self.registry.top_k(user_text, self.k) - if not tools: - self.logger("No tools available in the registry.", log_type="manager", log_color=0) # error - return "", "" - tool_descriptions = [] - for tool in tools: - tool_descriptions.append( - f"- *{tool.name}*: {tool.description}\n" - f" Inputs: {tool.input_schema}\n" - f" Outputs: {tool.output_schema}\n" - ) - tools_text = "\n".join(tool_descriptions) - user_context = self._parse_user_context() - user_prompt = "User request:\n" + user_text - - return self._get_prompt_template().format(tools_text=tools_text, user_context=user_context), user_prompt - - def _add_to_history(self, user_text: str, plan_summary: str): - """Add a new interaction to the history and trim if necessary.""" - self.history.append((user_text, plan_summary)) - # Keep only the last `history_depth` interactions - if len(self.history) > self.history_depth: - if self.history_depth <= 0: - self.history = [] - else: - self.history = self.history[-self.history_depth:] - - def update_history_depth(self, new_depth: int): - """ - Update the history depth and trim the history if necessary. - - :param new_depth: The new history depth. - """ - self.history_depth = max(0, int(new_depth)) - self.logger(f"Updated history depth to {new_depth}", log_type="manager", log_color=2) - if len(self.history) > self.history_depth: - if self.history_depth <= 0: - self.history = [] - else: - self.history = self.history[-self.history_depth:] - - def update_k_index(self, new_k: int): - """ - Update the k index. - - :param new_k: The new k index. - """ - self.k = max(1, int(new_k)) - self.logger(f"Updated k index to {new_k}", log_type="manager", log_color=2) - - def _get_prompt_template(self) -> str: - template = """ -You are a planner assistant controlling a robotic system. -Your job is to take a user request and generate a valid execution plan, containing only ONE step. -Be sure to understand the text received and select the best action command from the available options. -{user_context} -## Available tools: -{tools_text} - -## Plan format: - -plan = GlobalPlan( - plan=[ - PlanNode( - kind="SEQUENCE | PARALLEL", - steps=[ - Step(tool="tool_name", args=[ArgValue(key="arg_name", val="value or {{{{bb.tool.key}}}}"), ...]), - ], - ## Optional execution control parameters: - condition="Python expression to evaluate before executing this PlanNode", - success_criteria="Python expression to determine if this PlanNode succeeded", - timeout_ms=0, - retry=0, - on_fail=PlanNode( - kind="SEQUENCE | PARALLEL", - steps=[ - Step(tool="tool_name", args=[ArgValue(key="arg_name", val="value or {{{{bb.tool.key}}}}"), ...]), - ], - ), - ) - ], -) - -Use "{{{{bb.tool.key}}}}" to reference the output of a previous step. -For example, if tool 'detect_object' outputs {{"pose": [1.0, 2.0]}}, you can pass it to navigate as: -"args": {{"target": "{{{{bb.detect_object.pose}}}}"}} - -Choose the most appropriate tool and arguments to satisfy the request. -Add only optional execution control parameters if strictly necessary or requested by the user. -""" - return template -r -from vulcanai.core.agent import Agent -from vulcanai.core.plan_types import GlobalPlan -from vulcanai.core.validator import PlanValidator -from vulcanai.tools.tool_registry import ToolRegistry - - class ToolManager: """Manages the LLM Agent and calls the executor with the LLM output.""" def __init__( From 36a927c3954c49a6002f0694dd18e09f2e8375ab Mon Sep 17 00:00:00 2001 From: danipiza Date: Thu, 13 Nov 2025 07:12:49 +0100 Subject: [PATCH 04/40] [#23892] Solved pyproject.toml textual dependencies Signed-off-by: danipiza --- pyproject.toml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4313e6c..64d19d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,11 +19,12 @@ dependencies = [ "ollama==0.6.0", "openai==1.108.0", "prompt_toolkit==3.0.52", - "rich==14.1.0", + "rich==14.2.0", "sentence-transformers==5.1.0", - textual==6.5.0, - textual-dev==1.8.0, - textual-serve==1.1.3, + "textual==6.5.0", + "textual-dev==1.8.0", + "textual-serve==1.1.3", + "typeguard==2.13", ] [project.optional-dependencies] From 8fcc605a8beaa7a91e85352754f0301a97f08b51 Mon Sep 17 00:00:00 2001 From: danipiza Date: Thu, 13 Nov 2025 07:26:47 +0100 Subject: [PATCH 05/40] [#23892] Added tools checbox command Signed-off-by: danipiza --- src/vulcanai/console/console.py | 94 ++++- src/vulcanai_turtlesim_demo/package.xml | 19 + .../resource/vulcanai_turtlesim_demo | 0 src/vulcanai_turtlesim_demo/setup.cfg | 4 + src/vulcanai_turtlesim_demo/setup.py | 34 ++ .../test/test_copyright.py | 25 ++ .../test/test_flake8.py | 25 ++ .../test/test_pep257.py | 23 ++ .../test/test_xmllint.py | 23 ++ .../vulcanai_turtlesim_demo/__init__.py | 0 .../vulcanai_turtlesim_demo/ros2_node.py | 90 +++++ .../turtlesim_tools.py | 373 ++++++++++++++++++ 12 files changed, 706 insertions(+), 4 deletions(-) create mode 100644 src/vulcanai_turtlesim_demo/package.xml create mode 100644 src/vulcanai_turtlesim_demo/resource/vulcanai_turtlesim_demo create mode 100644 src/vulcanai_turtlesim_demo/setup.cfg create mode 100644 src/vulcanai_turtlesim_demo/setup.py create mode 100644 src/vulcanai_turtlesim_demo/test/test_copyright.py create mode 100644 src/vulcanai_turtlesim_demo/test/test_flake8.py create mode 100644 src/vulcanai_turtlesim_demo/test/test_pep257.py create mode 100644 src/vulcanai_turtlesim_demo/test/test_xmllint.py create mode 100644 src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/__init__.py create mode 100644 src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/ros2_node.py create mode 100644 src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/turtlesim_tools.py diff --git a/src/vulcanai/console/console.py b/src/vulcanai/console/console.py index 11eaed1..3490ff0 100644 --- a/src/vulcanai/console/console.py +++ b/src/vulcanai/console/console.py @@ -23,19 +23,22 @@ from vulcanai.models.model import IModelHooks from vulcanai.console.logger import console - +from textual.screen import ModalScreen from textual.app import App, ComposeResult -from textual.widgets import Input, Log, Static +from textual.widgets import Input, Log, Static, Checkbox, Button, Label from textual import events from rich.text import Text from rich.markup import escape -from textual.containers import VerticalScroll +from textual import work + +from textual.containers import VerticalScroll, Horizontal, Vertical import asyncio, time -from typing import Callable +from typing import Callable, Iterable, Optional + class Prompt(Static): @@ -84,6 +87,62 @@ def on_request_end(self) -> None: self._task_id = None +# ---------- Modal checklist ---------- +class CheckListScreen(ModalScreen[list[str] | None]): + """A modal screen with the tools checkboxes and Submit/Cancel buttons.""" + + DEFAULT_CSS = """ + CheckListScreen { + align: center middle; + } + .dialog { + width: 60%; + max-width: 90%; + border: round $accent; + padding: 1 2; + background: $panel; + } + .title { + text-align: center; + margin-bottom: 1; + } + .btns { + height: auto; + dock: bottom; + padding-top: 1; + content-align: right middle; + } + """ + + + def __init__(self, lines: Iterable[str]) -> None: + super().__init__() + self._lines = list(lines) + + def compose(self) -> ComposeResult: + with Vertical(classes="dialog"): + yield Label("Pick the lines you want to print", classes="title") + # Make one checkbox per provided line + for i, line in enumerate(self._lines, start=1): + yield Checkbox(line, value=True, id=f"cb{i}") + with Horizontal(classes="btns"): + yield Button("Cancel", variant="default", id="cancel") + yield Button("Submit", variant="primary", id="submit") + + def on_button_pressed(self, event: Button.Pressed) -> None: + if event.button.id == "submit": + boxes = list(self.query(Checkbox)) + # Use the original strings instead of Checkbox.label (renderable) + selected = [self._lines[i] for i, cb in enumerate(boxes) if cb.value] + self.dismiss(selected) # -> list[str] + elif event.button.id == "cancel": + self.dismiss(None) + + def on_mount(self) -> None: + # Focus the first checkbox for keyboard toggling with space + first_cb = self.query_one(Checkbox) + self.set_focus(first_cb) + class VulcanConsole(App): # Terminal style @@ -274,6 +333,7 @@ def worker(log_cb: Callable[[str], None]) -> None: #"help": self.cmd_help, #"h": self.cmd_help, "/tools": self.cmd_tools, + "/edit_tools": self.cmd_edit_tools, "/change_k": self.cmd_change_k, "/history": self.cmd_history_index, "/show_history": self.cmd_show_history, @@ -350,6 +410,24 @@ def print_system(self, message: str): def print_output(self, message: str): self.term.write(message)""" + @work # runs in a worker so waiting won't freeze the UI + async def open_checklist(self, tools_list: list[str]) -> None: + """ + Function used in '/edit_tools' command. + It creates a dialog with all the tools. + """ + # create the checklist dialog + selected = await self.push_screen_wait(CheckListScreen(tools_list)) + + if selected is None: + self._log("Selection cancelled.", log_color=3) + elif not selected: + self._log("No items selected.", log_color=3) + else: + self._log("Submitting selected lines:", log_color=1) + for line in selected: + self._log(line, log_color=2) + # endregion # region Commands @@ -360,6 +438,7 @@ def cmd_help(self, _) -> None: "Available commands:\n" "/help - Show this help message\n" "/tools - List available tools\n" + "/edit_tools - Edit the list of available tools\n" "/change_k - Change the 'k' value for the top_k algorithm selection or show the current value if no is provided\n" "/history - Change the history depth or show the current value if no is provided\n" "/show_history - Show the current history\n" @@ -381,6 +460,13 @@ def cmd_tools(self, _) -> None: help_msg += f"- {tool.name}: {tool.description}\n" self._log(help_msg, log_color=2) + def cmd_edit_tools(self, _) -> None: + tools_list = [] + for tool in self.manager.registry.tools.values(): + tools_list.append(f"- {tool.name}")#: {tool.description}") + + self.open_checklist(tools_list) + def cmd_change_k(self, args) -> None: if len(args) == 0: self._log(f"Current 'k' is {self.manager.k}", log_color=2) diff --git a/src/vulcanai_turtlesim_demo/package.xml b/src/vulcanai_turtlesim_demo/package.xml new file mode 100644 index 0000000..2501110 --- /dev/null +++ b/src/vulcanai_turtlesim_demo/package.xml @@ -0,0 +1,19 @@ + + + + vulcanai_turtlesim_demo + 0.0.0 + TODO: Package description + danipiza + TODO: License declaration + + ament_copyright + ament_flake8 + ament_pep257 + ament_xmllint + python3-pytest + + + ament_python + + diff --git a/src/vulcanai_turtlesim_demo/resource/vulcanai_turtlesim_demo b/src/vulcanai_turtlesim_demo/resource/vulcanai_turtlesim_demo new file mode 100644 index 0000000..e69de29 diff --git a/src/vulcanai_turtlesim_demo/setup.cfg b/src/vulcanai_turtlesim_demo/setup.cfg new file mode 100644 index 0000000..5fc02a6 --- /dev/null +++ b/src/vulcanai_turtlesim_demo/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/vulcanai_turtlesim_demo +[install] +install_scripts=$base/lib/vulcanai_turtlesim_demo diff --git a/src/vulcanai_turtlesim_demo/setup.py b/src/vulcanai_turtlesim_demo/setup.py new file mode 100644 index 0000000..d38154b --- /dev/null +++ b/src/vulcanai_turtlesim_demo/setup.py @@ -0,0 +1,34 @@ +from setuptools import find_packages, setup + +package_name = 'vulcanai_turtlesim_demo' + +setup( + name=package_name, + version='0.0.0', + packages=find_packages(exclude=['test']), + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='danipiza', + maintainer_email='dpizarrogallego@gmail.com', + description='TODO: Package description', + license='TODO: License declaration', + extras_require={ + 'test': [ + 'pytest', + ], + }, + entry_points={ + "console_scripts": [ + "vulcanai_turtlesim_demo = vulcanai_turtlesim_demo.ros2_node:main", + ], + "turtle_tools": [ + "turtle_tools = vulcanai_turtlesim_demo.turtlesim_tools", + ], + }, + +) diff --git a/src/vulcanai_turtlesim_demo/test/test_copyright.py b/src/vulcanai_turtlesim_demo/test/test_copyright.py new file mode 100644 index 0000000..97a3919 --- /dev/null +++ b/src/vulcanai_turtlesim_demo/test/test_copyright.py @@ -0,0 +1,25 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_copyright.main import main +import pytest + + +# Remove the `skip` decorator once the source file(s) have a copyright header +@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.') +@pytest.mark.copyright +@pytest.mark.linter +def test_copyright(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found errors' diff --git a/src/vulcanai_turtlesim_demo/test/test_flake8.py b/src/vulcanai_turtlesim_demo/test/test_flake8.py new file mode 100644 index 0000000..27ee107 --- /dev/null +++ b/src/vulcanai_turtlesim_demo/test/test_flake8.py @@ -0,0 +1,25 @@ +# Copyright 2017 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_flake8.main import main_with_errors +import pytest + + +@pytest.mark.flake8 +@pytest.mark.linter +def test_flake8(): + rc, errors = main_with_errors(argv=[]) + assert rc == 0, \ + 'Found %d code style errors / warnings:\n' % len(errors) + \ + '\n'.join(errors) diff --git a/src/vulcanai_turtlesim_demo/test/test_pep257.py b/src/vulcanai_turtlesim_demo/test/test_pep257.py new file mode 100644 index 0000000..b234a38 --- /dev/null +++ b/src/vulcanai_turtlesim_demo/test/test_pep257.py @@ -0,0 +1,23 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_pep257.main import main +import pytest + + +@pytest.mark.linter +@pytest.mark.pep257 +def test_pep257(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found code style errors / warnings' diff --git a/src/vulcanai_turtlesim_demo/test/test_xmllint.py b/src/vulcanai_turtlesim_demo/test/test_xmllint.py new file mode 100644 index 0000000..3e08c02 --- /dev/null +++ b/src/vulcanai_turtlesim_demo/test/test_xmllint.py @@ -0,0 +1,23 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_xmllint.main import main +import pytest + + +@pytest.mark.linter +@pytest.mark.xmllint +def test_xmllint() -> None: + rc = main(argv=[]) + assert rc == 0, 'Found code style errors / warnings' diff --git a/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/__init__.py b/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/ros2_node.py b/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/ros2_node.py new file mode 100644 index 0000000..e81ef68 --- /dev/null +++ b/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/ros2_node.py @@ -0,0 +1,90 @@ +import threading + +import rclpy +from rclpy.node import Node +from rclpy.task import Future + +from vulcanai import VulcanConsole + +class SharedNode(Node): + def __init__(self, name: str = "vulcanai_shared_node"): + super().__init__(name) + # Dictionary to store created clients + self._vulcan_clients = {} + # Dictionary to store created publishers + self._vulcan_publishers = {} + + # Ensure entities creation is thread-safe. + self.node_lock = threading.Lock() + + def get_client(self, srv_type, srv_name): + """ + Get a cached client for the specified service type and name or + create a new one if it doesn't exist. + """ + key = (srv_type, srv_name) + with self.node_lock: + if key not in self._vulcan_clients: + client = self.create_client(srv_type, srv_name) + self._vulcan_clients[key] = client + self.get_logger().info(f"Created new client for {srv_name}") + return self._vulcan_clients[key] + + def get_publisher(self, msg_type, topic_name): + """ + Get a cached publisher for the specified message type and topic name or + create a new one if it doesn't exist. + """ + key = (msg_type, topic_name) + with self.node_lock: + if key not in self._vulcan_publishers: + publisher = self.create_publisher(msg_type, topic_name, 10) + self._vulcan_publishers[key] = publisher + self.get_logger().info(f"Created new publisher for {topic_name}") + return self._vulcan_publishers[key] + + def wait_for_message(self, msg_type, topic: str, timeout_sec: float = None): + """ + Block until a message is received or timeout expires. + Subscriptions are created on demand and destroyed after use to avoid + handling spins and callbacks in a separate thread. + """ + future = Future() + + def callback(msg): + if not future.done(): + future.set_result(msg) + + sub = self.create_subscription(msg_type, topic, callback, 10) + + rclpy.spin_until_future_complete(self, future, timeout_sec=timeout_sec) + self.destroy_subscription(sub) + + if future.done(): + return future.result() + return None + +def main(args=None): + # Create a ROS 2 node that will be used by the tools to avoid + # recurrent creation and destruction of DDS Participants + rclpy.init(args=args) + node = SharedNode(name="vulcanai_shared_node") + + user_context = """\ +You are controlling the turtlesim simulation from ROS 2. +The simulation has one or more turtles that can move around, drawing on the screen as they go.""" + + """console = VulcanConsole() + console.manager.register_tools_from_entry_points("turtle_tools") + console.manager.add_user_context(user_context) + # Add the shared node to the console manager blackboard to be used by tools + console.manager.bb["main_node"] = node + console.run()""" + + + console = VulcanConsole("turtle_tools", user_context, node) + console.run() + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/turtlesim_tools.py b/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/turtlesim_tools.py new file mode 100644 index 0000000..4a926ba --- /dev/null +++ b/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/turtlesim_tools.py @@ -0,0 +1,373 @@ +""" +This file contains example tools to interact with ROS 2 turtlesim and +demonstrate how to create custom tools compatible with VulcanAI. +""" + +from math import cos, sin, pi +import time + +import rclpy +from geometry_msgs.msg import Twist +from std_srvs.srv import Empty +from turtlesim_msgs.srv import Kill, SetPen, Spawn, TeleportAbsolute, TeleportRelative +from turtlesim_msgs.msg import Pose + +from vulcanai import AtomicTool, CompositeTool, vulcanai_tool + +@vulcanai_tool +class SpawnTurtleTool(AtomicTool): + name = "spawn_turtle" + description = "Spawn a new turtle in turtlesim. 'spawned' indicates success." + tags = ["turtlesim", "spawn", "add", "create", "service"] + input_schema = [ + ("x", "float"), + ("y", "float"), + ("theta", "float"), + ("name", "string"), + ] + output_schema = {"name": "string", "spawned": "bool"} + + def run(self, **kwargs): + node = self.bb.get("main_node", None) + if node is None: + raise Exception("Could not find shared node, aborting...") + + client = node.get_client(Spawn, "spawn") + if not client.wait_for_service(timeout_sec=5.0): + raise Exception("Service not available, aborting...") + + req = Spawn.Request() + req.x = float(kwargs.get("x", 5.54)) + req.y = float(kwargs.get("y", 5.54)) + req.theta = float(kwargs.get("theta", 0.0)) + req.name = kwargs.get("name", "") + + with node.node_lock: + future = client.call_async(req) + rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) + result = future.result() + if result is None: + raise Exception("Service call failed timeout, aborting...") + + return {"name": result.name, "spawned": True} + +from std_msgs.msg import String + +@vulcanai_tool +class KillTurtleTool(AtomicTool): + name = "kill_turtle" + description = "Kill a turtle 'name' in turtlesim. 'killed' indicates success." + tags = ["turtlesim", "kill", "remove", "delete", "service"] + input_schema = [ + ("name", "string"), + ] + output_schema = {"name": "string", "killed": "bool"} + + def run(self, name: str = ""): + node = self.bb.get("main_node", None) + if node is None: + raise Exception("Could not find shared node, aborting...") + if not name: + print("No turtle name provided, aborting...") + return {"name": "", "killed": False} + + client = node.get_client(Kill, "kill") + if not client.wait_for_service(timeout_sec=5.0): + raise Exception("Service not available, aborting...") + + req = Kill.Request() + req.name = name + with node.node_lock: + future = client.call_async(req) + rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) + result = future.result() + if result is None: + raise Exception("Service call failed timeout, aborting...") + + return {"name": req.name, "killed": True} + + +@vulcanai_tool +class ResetTurtleSimTool(AtomicTool): + name = "reset_turtlesim" + description = "Reset the turtlesim environment. 'reset' indicates success." + tags = ["turtlesim", "reset", "environment", "service"] + input_schema = [] + output_schema = {"reset": "bool"} + + def run(self): + node = self.bb.get("main_node", None) + if node is None: + raise Exception("Could not find shared node, aborting...") + + client = node.get_client(Empty, "reset") + if not client.wait_for_service(timeout_sec=5.0): + raise Exception("Service not available, aborting...") + + req = Empty.Request() + with node.node_lock: + future = client.call_async(req) + rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) + result = future.result() + if result is None: + raise Exception("Service call failed timeout, aborting...") + + return {"reset": True} + + +@vulcanai_tool +class ClearTurtleSimTool(AtomicTool): + name = "clear_turtlesim" + description = "Clear the turtlesim environment, meaning it erases every turtle trail. 'cleared' indicates success." + tags = ["turtlesim", "clear", "erase", "service"] + input_schema = [] + output_schema = {"cleared": "bool"} + + def run(self): + node = self.bb.get("main_node", None) + if node is None: + raise Exception("Could not find shared node, aborting...") + + client = node.get_client(Empty, "clear") + if not client.wait_for_service(timeout_sec=5.0): + raise Exception("Service not available, aborting...") + + req = Empty.Request() + with node.node_lock: + future = client.call_async(req) + rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) + result = future.result() + if result is None: + raise Exception("Service call failed timeout, aborting...") + + return {"cleared": True} + + +@vulcanai_tool +class AbsoluteTeleportTurtleTool(AtomicTool): + name = "absolute_teleport_turtle" + description = "Teleport a turtle 'name' to an absolute position in turtlesim. 'teleported' indicates success." + tags = ["turtlesim", "teleport", "absolute", "service"] + input_schema = [ + ("name", "string"), + ("x", "float"), + ("y", "float"), + ("theta", "float"), + ] + output_schema = {"name": "string", "teleported": "bool"} + + def run(self, **kwargs): + node = self.bb.get("main_node", None) + if node is None: + raise Exception("Could not find shared node, aborting...") + if not kwargs.get("name", ""): + print("No turtle name provided, aborting...") + return {"name": "", "teleported": False} + + name = kwargs.get("name", "") + tp_srv_name = f"/{name}/teleport_absolute" + pen_srv_name = f"/{name}/set_pen" + + try: + # Try to disable the pen before teleporting to avoid drawing a line + # Do not fail if the pen service is not available + pen_result = None + pen_client = node.get_client(SetPen, pen_srv_name) + pen_req = SetPen.Request(r=179, g=184, b=255, width=3, off=1) + with node.node_lock: + pen_future = pen_client.call_async(pen_req) + rclpy.spin_until_future_complete(node, pen_future, timeout_sec=3.0) + pen_result = pen_future.result() + + client = node.get_client(TeleportAbsolute, tp_srv_name) + if not client.wait_for_service(timeout_sec=5.0): + raise Exception("Service not available, aborting...") + + req = TeleportAbsolute.Request() + req.x = kwargs.get("x", 0.0) + req.y = kwargs.get("y", 0.0) + req.theta = kwargs.get("theta", 0.0) + + with node.node_lock: + future = client.call_async(req) + rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) + result = future.result() + if result is None: + raise Exception("Service call failed timeout, aborting...") + finally: + if pen_result is not None: + # Restore the pen state after teleporting + pen_req.off = 0 + with node.node_lock: + pen_future = pen_client.call_async(pen_req) + rclpy.spin_until_future_complete(node, pen_future, timeout_sec=3.0) + pen_future.result() + + return {"name": name, "teleported": True} + + +@vulcanai_tool +class RelativeTeleportTurtleTool(AtomicTool): + name = "relative_teleport_turtle" + description = "Teleport a turtle 'name' to a relative position in turtlesim. 'teleported' indicates success." + tags = ["turtlesim", "teleport", "relative", "service"] + input_schema = [ + ("name", "string"), + ("linear", "float"), + ("angular", "float"), + ] + output_schema = {"name": "string", "teleported": "bool"} + + def run(self, **kwargs): + node = self.bb.get("main_node", None) + if node is None: + raise Exception("Could not find shared node, aborting...") + if not kwargs.get("name", ""): + print("No turtle name provided, aborting...") + return {"name": "", "teleported": False} + + name = kwargs.get("name", "") + client_name = f"/{name}/teleport_relative" + + client = node.get_client(TeleportRelative, client_name) + if not client.wait_for_service(timeout_sec=5.0): + raise Exception("Service not available, aborting...") + + req = TeleportRelative.Request() + req.linear = kwargs.get("linear", 0.0) + req.angular = kwargs.get("angular", 0.0) + + with node.node_lock: + future = client.call_async(req) + rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) + result = future.result() + if result is None: + raise Exception("Service call failed timeout, aborting...") + + return {"name": name, "teleported": True} + + +@vulcanai_tool +class GetTurtlePose(AtomicTool): + name = "get_turtle_pose" + description = "Get the current pose of a turtle 'name' in turtlesim. Fails if something goes wrong." + tags = ["turtlesim", "pose", "position", "location"] + input_schema = [ + ("name", "string"), + ] + output_schema = { + "name": "string", + "x": "float", + "y": "float", + "theta": "float", + } + + def run(self, name: str = ""): + node = self.bb.get("main_node", None) + if node is None: + raise Exception("Could not find shared node, aborting...") + name = name or "turtle1" + topic = f"/{name}/pose" + + msg = node.wait_for_message(Pose, topic, timeout_sec=5.0) + if msg is None: + print(f"Could not get pose for turtle '{name}', aborting...") + raise Exception("No pose message received") + + return {"name": name, "x": msg.x, "y": msg.y, "theta": msg.theta} + + +@vulcanai_tool +class MoveTurtleTool(AtomicTool): + name = "move_turtle" + description = "Move the turtle 'name' with 'linear' and 'angular' velocity by publishing the message 'duration' times (seconds). Use zero velocity to stop. 'success' indicates if the command was sent correctly." + tags = ["turtlesim", "move", "velocity", "cmd_vel", "stop", "draw"] + input_schema = [ + ("name", "string"), + ("linear", "float"), + ("angular", "float"), + ("duration", "int"), + ] + output_schema = [{"success": "bool"}] + + def run(self, **kwargs): + node = self.bb.get("main_node", None) + if node is None: + raise Exception("Could not find shared node, aborting...") + if not kwargs.get("name", ""): + print("No turtle name provided, aborting...") + return {"success": False} + + name = kwargs.get('name', "") + pub = node.get_publisher(Twist, f"/{name}/cmd_vel") + msg = Twist() + msg.linear.x = float(kwargs.get("linear", 0.0)) + msg.angular.z = float(kwargs.get("angular", 0.0)) + for idx in range(int(kwargs.get("duration", 1.0))): + node.get_logger().info(f"Publishing message {idx + 1} to topic /{name}/cmd_vel: linear={msg.linear.x}, angular={msg.angular.z}") + pub.publish(msg) + time.sleep(1) + return {"success": True} + + +@vulcanai_tool +class DrawRectangleTool(CompositeTool): + name = "draw_rectangle" + description = "Move the turtle 'name' in a rectangular shape where 'size' is the length of the shortest sides. 'success' indicates if rectangle was drawn successfully." + tags = ["turtlesim", "draw", "rectangle", "move", "cmd_vel"] + input_schema = [ + ("name", "string"), + ("size", "float"), + ] + output_schema = {"success": "bool"} + dependencies = ["move_turtle", "relative_teleport_turtle"] + + def run(self, name: str = "", size: float = 2.0): + node = self.bb.get("main_node", None) + if node is None: + raise Exception("Could not find shared node, aborting...") + if not name: + print("No turtle name provided, aborting...") + return {"success": False} + + # Access the instances of the dependent tools and set their blackboards + move_tool = self.resolved_deps.get("move_turtle", None) + tp_relative_tool = self.resolved_deps.get("relative_teleport_turtle", None) + move_tool.bb = self.bb + tp_relative_tool.bb = self.bb + + name = name or "turtle1" + size = size + linear_speed = 1.0 + angular_turn = pi / 2 + + # Arguments are passed as dictionaries when calling directly other tools + side_1_args = { + "name": name, + "linear": linear_speed, + "angular": 0.0, + "duration": size / linear_speed, + } + side_2_args = { + "name": name, + "linear": linear_speed, + "angular": 0.0, + "duration": (size + 1) / linear_speed, + } + turn_args = { + "name": name, + "linear": 0.0, + "angular": angular_turn, + } + + # Pass arguments as kwargs + move_tool.run(**side_1_args) + tp_relative_tool.run(**turn_args) + move_tool.run(**side_2_args) + tp_relative_tool.run(**turn_args) + move_tool.run(**side_1_args) + tp_relative_tool.run(**turn_args) + move_tool.run(**side_2_args) + tp_relative_tool.run(**turn_args) + + return {"success": True} \ No newline at end of file From 2d588fd99a93b7c0f095655e9bda616457ddbc4c Mon Sep 17 00:00:00 2001 From: danipiza Date: Thu, 13 Nov 2025 10:23:10 +0100 Subject: [PATCH 06/40] [#23892] Updated Spinner Signed-off-by: danipiza --- src/vulcanai/console/console.py | 180 ++++++++++++++++++++------------ 1 file changed, 113 insertions(+), 67 deletions(-) diff --git a/src/vulcanai/console/console.py b/src/vulcanai/console/console.py index 3490ff0..b12310f 100644 --- a/src/vulcanai/console/console.py +++ b/src/vulcanai/console/console.py @@ -39,6 +39,8 @@ import asyncio, time from typing import Callable, Iterable, Optional +from textual.timer import Timer + class Prompt(Static): @@ -47,44 +49,68 @@ class Prompt(Static): def __init__(self, text: str = "> "): super().__init__(text, id="prompt") +class SpinnerHook: -class SpinnerHook(IModelHooks): - """ - Single entrant spinner controller for console. - - Starts the spinner on the first LLM request. - - Stops the spinner when LLM request is over. - """ def __init__(self, console): + self.console = console - self._lock = Lock() - - def on_request_start(self) -> None: - with self._lock: - #self.console.call_from_thread(self.console.show_spinner) - # TODO. danip add spinner in textualize terminal - # First request => create spinner - self._progress = Progress( - SpinnerColumn(spinner_name="dots2"), - TextColumn("[blue]Querying LLM...[/blue]"), - console=self.console, - transient=True, - ) - self._progress.start() - self._task_id = self._progress.add_task("llm", start=True) - - def on_request_end(self) -> None: - with self._lock: - #self.console.call_from_thread(self.console.hide_spinner) - if self._progress is not None: - # Last request finished => stop spinner - try: - if self._task_id is not None: - self._progress.remove_task(self._task_id) - except Exception: - pass - self._progress.stop() - self._progress = None - self._task_id = None + + # spinner states + self.spinner_timer: Timer | None = None + self.spinner_frames = ["⠋","⠙","⠹","⠸","⠼","⠴","⠦","⠧","⠇","⠏"] + self.spinner_frame_index = 0 + self.spinner_line_index: int | None = None + + + def on_request_start(self, text: str = "Querying LLM...") -> None: + """ + Create the spinner line at the end of the log and start updating it. + """ + + self.running_color = "blue" + self.end_color = "bold green" + + if self.spinner_timer is not None: + return # already running + + # Add a new line for the spinner and remember its index + self.spinner_line_index = len(self.console._log_lines) + self.console._log_lines.append(f"[{self.running_color}]{text}[/{self.running_color}]") + self.spinner_frame_index = 0 + + # Update every 0.1s + self.spinner_timer = self.console.set_interval(0.1, self.update_spinner) + self.console._render_log() + + def update_spinner(self) -> None: + """ + Timer callback. Rotate the spinner frame on the stored last log line. + """ + + if self.spinner_line_index is None: + return + + frame = self.spinner_frames[self.spinner_frame_index] + self.spinner_frame_index = (self.spinner_frame_index + 1) % len(self.spinner_frames) + + # Update that specific line only + self.console._log_lines[self.spinner_line_index] = f"[{self.running_color}] Sleeping {frame} [/{self.running_color}]" + self.console._render_log() + + def on_request_end(self, final_text: str | None = None) -> None: + """ + Stop the spinner. + Optional, replace the line with final_text.""" + + if self.spinner_timer is not None: + self.spinner_timer.stop() + self.spinner_timer = None + + if self.spinner_line_index is not None: + if final_text is not None: + self.console._log_lines[self.spinner_line_index] = f"[{self.end_color}]{final_text}[/{self.end_color}]" + self.spinner_line_index = None + self.console._render_log() # ---------- Modal checklist ---------- @@ -227,7 +253,7 @@ def __init__(self, tools_from_entrypoints: str = "", user_context: str = "", mai self.session = PromptSession() self.last_plan = None self.last_bb = None - self.hooks = SpinnerHook(console) + self.hooks = SpinnerHook(self) self.model = model self.k = k @@ -247,18 +273,18 @@ def __init__(self, tools_from_entrypoints: str = "", user_context: str = "", mai async def on_mount(self) -> None: - self._set_input_enabled(False) + self.set_input_enabled(False) self._log("Starting VulcanAI...", log_color=1) await asyncio.sleep(0) - asyncio.create_task(self._bootstrap()) + asyncio.create_task(self.bootstrap()) """self.cmd_input.focus()""" """ # Disable commands until we finish boot - #self._set_input_enabled(False) + #self.set_input_enabled(False) self.init _manager(self.iterative) @@ -317,7 +343,7 @@ def compose(self) -> ComposeResult: yield Static("", id="logcontent") yield Input(placeholder="> ", id="cmd") - async def _bootstrap(self) -> None: + async def bootstrap(self) -> None: """ Function used to print information in runtime execution of a function @@ -386,7 +412,7 @@ def log_cb(msg: str) -> None: # TODO. danip. add the queries information if True: self._is_ready = True - self._set_input_enabled(True) + self.set_input_enabled(True) self._log("VulcanAI Interactive Console", log_color=2) self._log("Type 'exit' to quit.\n", log_color=2) @@ -541,14 +567,14 @@ def cmd_echo(self, args) -> None: # region Logging - def _render_log(self) -> None: + def render_log(self) -> None: """self.query_one("#log", Static).update("\n".join(self._log_lines))""" log_static = self.query_one("#logcontent", Static) log_static.update("\n".join(self._log_lines)) self.query_one("#logview", VerticalScroll).scroll_end(animate=False) - """def _render_log(self): + """def render_log(self): log = self.query_one("#log", Static) # Allow Rich markup for colors and styles log.update("\n".join(self._log_lines), markup=True)""" @@ -563,7 +589,7 @@ def _log(self, line: str, log_type: str = "", log_color: int = -1) -> None: elif log_type == "manager": msg = "[bold blue]\[MANAGER][/bold blue] " elif log_type == "executor": - msg = "[bold light green]\[EXECUTOR][/bold light green] " + msg = "[bold green]\[EXECUTOR][/bold green] " elif log_type == "validator": msg = "[bold orange_red1]\[VALIDATOR][/bold orange_red1] " elif log_type == "error": @@ -584,7 +610,7 @@ def _log(self, line: str, log_type: str = "", log_color: int = -1) -> None: msg += f"[{color_type}]{line}[/{color_type}]" self._log_lines.append(msg) - self._render_log() + self.render_log() """def _log(self, line: str): if "error" in line.lower(): @@ -594,18 +620,55 @@ def _log(self, line: str, log_type: str = "", log_color: int = -1) -> None: elif "success" in line.lower(): line = f"[green]{line}[/green]" self._log_lines.append(line) - self._render_log()""" + self.render_log()""" # endregion # region Input - def _set_input_enabled(self, enabled: bool) -> None: + def set_input_enabled(self, enabled: bool) -> None: cmd = self.query_one("#cmd", Input) cmd.disabled = not enabled if enabled: self.set_focus(cmd) + @work # runs in a worker so waiting won't freeze the UI + async def handle_user_query(self, user_input) -> None: + """ + Function used in '/edit_tools' command. + It creates a dialog with all the tools. + """ + # create the checklist dialog + # Check for image input. Must be always at the end of the input + + try: + images = [] + if "--image=" in user_input: + images = self.get_images(user_input) + + # Handle user request + try: + result = self.manager.handle_user_request(user_input, context={"images": images}) + except Exception as e: + #self.print(f"[error]Error handling request:[/error] {e}") + self._log(f"[error]Error handling request:[/error] {e}") + return + + self.last_plan = result.get("plan", None) + self.last_bb = result.get("blackboard", None) + + #self.print(f"Output of plan: {result.get('blackboard', {None})}") + self._log(f"Output of plan: {result.get('blackboard', {None})}") + + except KeyboardInterrupt: + #console.print("[yellow]Exiting...[/yellow]") + self._log("[yellow]Exiting...[/yellow]") + return + except EOFError: + #console.print("[yellow]Exiting...[/yellow]") + self._log("[yellow]Exiting...[/yellow]") + return + async def on_input_submitted(self, event: Input.Submitted) -> None: """ Enter key @@ -646,24 +709,7 @@ async def on_input_submitted(self, event: Input.Submitted) -> None: self.handle_command(user_input) return - # Check for image input. Must be always at the end of the input - images = [] - if "--image=" in user_input: - images = self.get_images(user_input) - - # Handle user request - try: - result = self.manager.handle_user_request(user_input, context={"images": images}) - except Exception as e: - #self.print(f"[error]Error handling request:[/error] {e}") - self._log(f"[error]Error handling request:[/error] {e}") - return - - self.last_plan = result.get("plan", None) - self.last_bb = result.get("blackboard", None) - - #self.print(f"Output of plan: {result.get('blackboard', {None})}") - self._log(f"Output of plan: {result.get('blackboard', {None})}") + self.handle_user_query(user_input) except KeyboardInterrupt: #console.print("[yellow]Exiting...[/yellow]") @@ -765,7 +811,7 @@ async def on_key(self, event: events.Key) -> None: # If multiple matches, check for a longer common prefix to insert first if len(matches) > 1: - prefix = self._common_prefix(matches) + prefix = self.common_prefix(matches) new_value = prefix else: # Single match: complete directly @@ -822,7 +868,7 @@ async def on_key(self, event: events.Key) -> None: self._tab_matches = [] self._tab_index = 0 - def _common_prefix(self, strings: str) -> str: + def common_prefix(self, strings: str) -> str: if not strings: return "" From 609ec82bb044827a077b388c4aaabcdb5989e238 Mon Sep 17 00:00:00 2001 From: danipiza Date: Fri, 14 Nov 2025 10:09:18 +0100 Subject: [PATCH 07/40] [#23892] Added new command 'edit_tools' with checkbox Signed-off-by: danipiza --- src/vulcanai/console/console.py | 27 +++++++++++++++++------ src/vulcanai/core/manager.py | 27 +++++++++++++++-------- src/vulcanai/tools/tool_registry.py | 34 ++++++++++++++++++++++++++++- 3 files changed, 71 insertions(+), 17 deletions(-) diff --git a/src/vulcanai/console/console.py b/src/vulcanai/console/console.py index b12310f..bbabcd9 100644 --- a/src/vulcanai/console/console.py +++ b/src/vulcanai/console/console.py @@ -141,16 +141,17 @@ class CheckListScreen(ModalScreen[list[str] | None]): """ - def __init__(self, lines: Iterable[str]) -> None: + def __init__(self, lines: Iterable[str], active_tools_num: int=0) -> None: super().__init__() self._lines = list(lines) + self.active_tools_num = active_tools_num def compose(self) -> ComposeResult: with Vertical(classes="dialog"): yield Label("Pick the lines you want to print", classes="title") # Make one checkbox per provided line for i, line in enumerate(self._lines, start=1): - yield Checkbox(line, value=True, id=f"cb{i}") + yield Checkbox(line, value=i<=self.active_tools_num, id=f"cb{i}")#True, id=f"cb{i}") with Horizontal(classes="btns"): yield Button("Cancel", variant="default", id="cancel") yield Button("Submit", variant="primary", id="submit") @@ -437,13 +438,13 @@ def print_output(self, message: str): self.term.write(message)""" @work # runs in a worker so waiting won't freeze the UI - async def open_checklist(self, tools_list: list[str]) -> None: + async def open_checklist(self, tools_list: list[str], active_tools_num: int) -> None: """ Function used in '/edit_tools' command. It creates a dialog with all the tools. """ # create the checklist dialog - selected = await self.push_screen_wait(CheckListScreen(tools_list)) + selected = await self.push_screen_wait(CheckListScreen(tools_list, active_tools_num)) if selected is None: self._log("Selection cancelled.", log_color=3) @@ -451,8 +452,14 @@ async def open_checklist(self, tools_list: list[str]) -> None: self._log("No items selected.", log_color=3) else: self._log("Submitting selected lines:", log_color=1) - for line in selected: - self._log(line, log_color=2) + + for tool_tmp in tools_list: + tool = tool_tmp[2:] # remove "- " + if tool_tmp in selected: + self.manager.registry.activate_tool(tool) + else: + self.manager.registry.deactivate_tool(tool) + self._log(f"Deactivated tool '{tool}'", log_color=2) # endregion @@ -491,7 +498,13 @@ def cmd_edit_tools(self, _) -> None: for tool in self.manager.registry.tools.values(): tools_list.append(f"- {tool.name}")#: {tool.description}") - self.open_checklist(tools_list) + active_tools_num = len(tools_list) + + # TODO. danip check + for deactivated_tool in self.manager.registry.deactivated_tools.values(): + tools_list.append(f"- {deactivated_tool.name}") + + self.open_checklist(tools_list, active_tools_num) def cmd_change_k(self, args) -> None: if len(args) == 0: diff --git a/src/vulcanai/core/manager.py b/src/vulcanai/core/manager.py index 128335a..c6bad39 100644 --- a/src/vulcanai/core/manager.py +++ b/src/vulcanai/core/manager.py @@ -33,12 +33,12 @@ def __init__( hist_depth: int = 3, logger=None ): - self.logger = logger or VulcanAILogger.log_manager + self.logger = logger #or VulcanAILogger.log_manager self.llm = Agent(model, self.logger) self.k = k - self.registry = registry or ToolRegistry(logger=(logger or VulcanAILogger.log_registry)) + self.registry = registry or ToolRegistry(logger=(logger))# or VulcanAILogger.log_registry)) self.validator = validator - self.executor = PlanExecutor(self.registry, logger=(logger or VulcanAILogger.log_executor)) + self.executor = PlanExecutor(self.registry, logger=(logger))# or VulcanAILogger.log_executor)) self.bb = Blackboard() self.user_context = "" # History is saved as a list of Tuples of user requests and plan summaries @@ -97,12 +97,12 @@ def handle_user_request(self, user_text: str, context: Dict[str, Any]) -> Dict[s try: self.validator.validate(plan) except Exception as e: - VulcanAILogger.log_validator(f"Plan validation error: {e}") + self.logger(f"Plan validation error: {e}", log_type="validator") # error raise e # Execute plan ret = self.execute_plan(plan) except Exception as e: - self.logger(f"Error handling user request: {e}", error=True) + self.logger(f"Error handling user request: {e}", log_type="manager", log_color=0) # error ret = {"error": str(e)} return ret @@ -128,8 +128,8 @@ def get_plan_from_user_request(self, user_text: str, context: Dict[str, Any] = N images = context["images"] # Query LLM - plan = self.llm.inference_plan(system_prompt, user_prompt, images, self.history) - self.logger(f"Plan received:\n{plan}") + plan = self.llm.inference(system_prompt, user_prompt, images, self.history) + self.logger(f"Plan received:\n{plan}", log_type="manager") # Save to history if plan: self._add_to_history(user_prompt, plan.summary) @@ -164,7 +164,7 @@ def _build_prompt(self, user_text: str, ctx: Dict[str, Any]) -> Tuple[str, str]: """ tools = self.registry.top_k(user_text, self.k) if not tools: - self.logger("No tools available in the registry.", error=True) + self.logger("No tools available in the registry.", log_type="manager", log_color=0) # error return "", "" tool_descriptions = [] for tool in tools: @@ -196,13 +196,22 @@ def update_history_depth(self, new_depth: int): :param new_depth: The new history depth. """ self.history_depth = max(0, int(new_depth)) - self.logger(f"Updated history depth to {new_depth}") + self.logger(f"Updated history depth to {new_depth}", log_type="manager", log_color=2) if len(self.history) > self.history_depth: if self.history_depth <= 0: self.history = [] else: self.history = self.history[-self.history_depth:] + def update_k_index(self, new_k: int): + """ + Update the k index. + + :param new_k: The new k index. + """ + self.k = max(1, int(new_k)) + self.logger(f"Updated k index to {new_k}", log_type="manager", log_color=2) + def _get_prompt_template(self) -> str: template = """ You are a planner assistant controlling a robotic system. diff --git a/src/vulcanai/tools/tool_registry.py b/src/vulcanai/tools/tool_registry.py index 11cfafb..9be404f 100644 --- a/src/vulcanai/tools/tool_registry.py +++ b/src/vulcanai/tools/tool_registry.py @@ -76,8 +76,10 @@ class ToolRegistry: def __init__(self, embedder=None, logger=None): # Logging function self.logger = logger #or VulcanAILogger.log_registry - # Dictionary of tool name -> tool instance + # Dictionary of tools (name -> tool instance) self.tools: Dict[str, ITool] = {} + # Dictionary of deactivated_tools (name -> tool instance) + self.deactivated_tools: Dict[str, ITool] = {} # Embedding model for tool metadata self.embedder = embedder or SBERTEmbedder() # Simple in-memory index of (name, embedding) @@ -107,6 +109,36 @@ def register_tool(self, tool: ITool, solve_deps: bool = True): if issubclass(type(tool), CompositeTool): self._resolve_dependencies(tool) + def activate_tool(self, tool_name): + # check if the tool is already active + if tool_name in self.tools: + return + # check if the tool is deactivated + if tool_name not in self.deactivated_tools: + self.logger(f"Tool '{tool_name}' not found in the deactivated tools list.", error=True) + return + + # add the tool to the active tools + self.tools[tool_name] = self.deactivated_tools[tool_name] + + # removed the tool from the deactivated tools + del self.deactivated_tools[tool_name] + + def deactivate_tool(self, tool_name): + # check if the tool is already deactivated + if tool_name in self.deactivated_tools: + return + # check if the tool is active + if tool_name not in self.tools: + self.logger(f"Tool '{tool_name}' not found int the active tools list.", error=True) + return + + # add the tool to the deactivated tools + self.deactivated_tools[tool_name] = self.tools[tool_name] + + # removed the tool from the active tools + del self.tools[tool_name] + def register(self): """Register all loaded classes marked with @vulcanai_tool.""" composite_classes = [] From 50a7a37db243340e7fdb14f7e3a755ca780c695e Mon Sep 17 00:00:00 2001 From: danipiza Date: Fri, 14 Nov 2025 13:31:23 +0100 Subject: [PATCH 08/40] [#23892] Updated code to print information while querying Signed-off-by: danipiza --- src/vulcanai/console/console.py | 267 ++++++++++++++++------------ src/vulcanai/core/manager.py | 2 +- src/vulcanai/models/gemini.py | 4 +- src/vulcanai/models/ollama_model.py | 4 +- src/vulcanai/models/openai.py | 10 +- 5 files changed, 167 insertions(+), 120 deletions(-) diff --git a/src/vulcanai/console/console.py b/src/vulcanai/console/console.py index bbabcd9..1b68a78 100644 --- a/src/vulcanai/console/console.py +++ b/src/vulcanai/console/console.py @@ -62,6 +62,12 @@ def __init__(self, console): self.spinner_line_index: int | None = None + #self.running_query = False + #self.create_spinner_timer() + + async def create_spinner_timer(self): + self.spinner_timer = self.console.set_interval(0.1, self.update_spinner) + def on_request_start(self, text: str = "Querying LLM...") -> None: """ Create the spinner line at the end of the log and start updating it. @@ -69,23 +75,31 @@ def on_request_start(self, text: str = "Querying LLM...") -> None: self.running_color = "blue" self.end_color = "bold green" + self.text = text if self.spinner_timer is not None: return # already running # Add a new line for the spinner and remember its index self.spinner_line_index = len(self.console._log_lines) - self.console._log_lines.append(f"[{self.running_color}]{text}[/{self.running_color}]") + self.console._log(f"[{self.running_color}]{text}[/{self.running_color}]") + #self.console._log_lines.append(f"[{self.running_color}]{text}[/{self.running_color}]") self.spinner_frame_index = 0 # Update every 0.1s self.spinner_timer = self.console.set_interval(0.1, self.update_spinner) - self.console._render_log() + #self.call_from_thread(self.create_spinner_timer) + #self.running_query = True + + self.console.render_log() def update_spinner(self) -> None: """ Timer callback. Rotate the spinner frame on the stored last log line. """ + #if self.running_query == False: + # return + if self.spinner_line_index is None: return @@ -94,10 +108,11 @@ def update_spinner(self) -> None: self.spinner_frame_index = (self.spinner_frame_index + 1) % len(self.spinner_frames) # Update that specific line only - self.console._log_lines[self.spinner_line_index] = f"[{self.running_color}] Sleeping {frame} [/{self.running_color}]" - self.console._render_log() + self.console._log_lines[self.spinner_line_index] = f"[{self.running_color}]{self.text} {frame} [/{self.running_color}]" + #self.console._log(f"[{self.running_color}] Sleeping {frame} [/{self.running_color}]") + self.console.render_log() - def on_request_end(self, final_text: str | None = None) -> None: + def on_request_end(self) -> None: """ Stop the spinner. Optional, replace the line with final_text.""" @@ -107,10 +122,9 @@ def on_request_end(self, final_text: str | None = None) -> None: self.spinner_timer = None if self.spinner_line_index is not None: - if final_text is not None: - self.console._log_lines[self.spinner_line_index] = f"[{self.end_color}]{final_text}[/{self.end_color}]" + self.console._log_lines[self.spinner_line_index] = f"[{self.end_color}]Query finished.[/{self.end_color}]" self.spinner_line_index = None - self.console._render_log() + self.console.render_log() # ---------- Modal checklist ---------- @@ -281,52 +295,6 @@ async def on_mount(self) -> None: await asyncio.sleep(0) asyncio.create_task(self.bootstrap()) - """self.cmd_input.focus()""" - - """ - # Disable commands until we finish boot - #self.set_input_enabled(False) - - self.init _manager(self.iterative) - - # command registry: name -> handler - self.commands: Dict[str, Callable[[List[str]], None]] = { - "/help": self.cmd_help, - #"help": self.cmd_help, - #"h": self.cmd_help, - "/tools": self.cmd_tools, - "/change_k": self.cmd_change_k, - "/history": self.cmd_history_index, - "/show_history": self.cmd_show_history, - "/plan": self.cmd_plan, - "/rerun": self.cmd_rerun, - "/bb": self.cmd_blackboard_state, - "/clear": self.cmd_clear, - #"clear": self.cmd_clear, - "/exit": self.cmd_quit, - #"q": self.cmd_quit, - #"exit": self.cmd_quit, - } - - # cycling through tab matches - self._tab_matches = [] - self._tab_index = 0 - - # Override hooks with spinner controller - try: - self.manager.llm.set_hooks(self.hooks) - except Exception: - pass - - if self.tools_from_entrypoints != "": - self.manager.register_tools_from_entry_points(self.tools_from_entrypoints) - - self.manager.add_user_context(self.user_context) - - # Add the shared node to the console manager blackboard to be used by tools - if self.main_node != None: - self.manager.bb["main_node"] = self.main_node""" - def compose(self) -> ComposeResult: """yield Header(show_clock=True) #yield Log(id="term", highlight=True)#, markup=True) @@ -344,78 +312,152 @@ def compose(self) -> ComposeResult: yield Static("", id="logcontent") yield Input(placeholder="> ", id="cmd") - async def bootstrap(self) -> None: + def log_cb(self, msg: str) -> None: """ - Function used to print information in runtime execution of a function + Print the msg while executing a function + """ + self.call_from_thread(self._log, msg) - TODO. danip update this function to allow queries information and add the spinner + async def bootstrap(self, user_input: str="") -> None: + """ + Function used to print information in runtime execution of a function """ - def worker(log_cb: Callable[[str], None]) -> None: - self.init_manager(log_cb) - - # add the commands - # command registry: name -> handler - self.commands = { - "/help": self.cmd_help, - #"help": self.cmd_help, - #"h": self.cmd_help, - "/tools": self.cmd_tools, - "/edit_tools": self.cmd_edit_tools, - "/change_k": self.cmd_change_k, - "/history": self.cmd_history_index, - "/show_history": self.cmd_show_history, - "/plan": self.cmd_plan, - "/rerun": self.cmd_rerun, - "/bb": self.cmd_blackboard_state, - "/clear": self.cmd_clear, - #"clear": self.cmd_clear, - "/exit": self.cmd_quit, - #"q": self.cmd_quit, - #"exit": self.cmd_quit, - } - - #log_cb("Added commands.") - - # cycling through tab matches - self._tab_matches = [] - self._tab_index = 0 - # Override hooks with spinner controller - try: - self.manager.llm.set_hooks(self.hooks) - except Exception: - pass + def worker(log_cb: Callable[[str], None], user_input: str="") -> None: + + if user_input == "": + self.init_manager(log_cb) + + # add the commands + # command registry: name -> handler + self.commands = { + "/help": self.cmd_help, + "/tools": self.cmd_tools, + "/edit_tools": self.cmd_edit_tools, + "/change_k": self.cmd_change_k, + "/history": self.cmd_history_index, + "/show_history": self.cmd_show_history, + "/plan": self.cmd_plan, + "/rerun": self.cmd_rerun, + "/bb": self.cmd_blackboard_state, + "/clear": self.cmd_clear, + "/exit": self.cmd_quit, + } + + #log_cb("Added commands.") + + # cycling through tab matches + self._tab_matches = [] + self._tab_index = 0 + + # Override hooks with spinner controller + try: + self.manager.llm.set_hooks(self.hooks) + except Exception: + pass - #log_cb("Added hooks.") + #log_cb("Added hooks.") - if self.tools_from_entrypoints != "": - self.manager.register_tools_from_entry_points(self.tools_from_entrypoints) + if self.tools_from_entrypoints != "": + self.manager.register_tools_from_entry_points(self.tools_from_entrypoints) - #log_cb("Added tools.") + #log_cb("Added tools.") - self.manager.add_user_context(self.user_context) + self.manager.add_user_context(self.user_context) - #log_cb("Added user_context.") + #log_cb("Added user_context.") - # Add the shared node to the console manager blackboard to be used by tools - if self.main_node != None: - self.manager.bb["main_node"] = self.main_node + # Add the shared node to the console manager blackboard to be used by tools + if self.main_node != None: + self.manager.bb["main_node"] = self.main_node + else: + self.set_input_enabled(False) - def log_cb(msg: str) -> None: + try: + images = [] + if "--image=" in user_input: + images = self.get_images(user_input) + + # Handle user request + try: + result = self.manager.handle_user_request(user_input, context={"images": images}) + except Exception as e: + #self.print(f"[error]Error handling request:[/error] {e}") + self._log(f"[error]Error handling request:[/error] {e}") + return + + self.last_plan = result.get("plan", None) + self.last_bb = result.get("blackboard", None) + + #self.print(f"Output of plan: {result.get('blackboard', {None})}") + self._log(f"Output of plan: {result.get('blackboard', {None})}") + + except KeyboardInterrupt: + #console.print("[yellow]Exiting...[/yellow]") + self._log("[yellow]Exiting...[/yellow]") + return + except EOFError: + #console.print("[yellow]Exiting...[/yellow]") + self._log("[yellow]Exiting...[/yellow]") + return + + def handle_user_query(self, user_input) -> None: """ - Print the msg while executing a function + Function used in '/edit_tools' command. + It creates a dialog with all the tools. """ - self.call_from_thread(self._log, msg) + # create the checklist dialog + # Check for image input. Must be always at the end of the input + #user_input = "move trutle 1 unit forward." + + try: + self.set_input_enabled(False) + + images = [] + if "--image=" in user_input: + images = self.get_images(user_input) + + # Handle user request + try: + result = self.manager.handle_user_request(user_input, context={"images": images}) + except Exception as e: + #self.print(f"[error]Error handling request:[/error] {e}") + self._log(f"[error]Error handling request:[/error] {e}") + return + + self.last_plan = result.get("plan", None) + self.last_bb = result.get("blackboard", None) + + #self.print(f"Output of plan: {result.get('blackboard', {None})}") + self._log(f"Output of plan: {result.get('blackboard', {None})}") + + except KeyboardInterrupt: + #console.print("[yellow]Exiting...[/yellow]") + self._log("[yellow]Exiting...[/yellow]") + return + except EOFError: + #console.print("[yellow]Exiting...[/yellow]") + self._log("[yellow]Exiting...[/yellow]") + return + + if user_input != "": + self.hooks.on_request_start() loop = asyncio.get_running_loop() - await loop.run_in_executor(None, lambda: worker(log_cb)) + await loop.run_in_executor(None, lambda: worker(self.log_cb, user_input)) + + if user_input == "": + #await loop.run_in_executor(None, lambda: worker(log_cb, user_input)) - # TODO. danip. add the queries information - if True: self._is_ready = True self.set_input_enabled(True) self._log("VulcanAI Interactive Console", log_color=2) self._log("Type 'exit' to quit.\n", log_color=2) + else: + self.set_input_enabled(True) + + #await loop.run_in_executor(None, lambda: worker(log_cb)) + #await loop.run_in_executor(None, lambda: handle_user_query()) # region Utilities @@ -500,7 +542,6 @@ def cmd_edit_tools(self, _) -> None: active_tools_num = len(tools_list) - # TODO. danip check for deactivated_tool in self.manager.registry.deactivated_tools.values(): tools_list.append(f"- {deactivated_tool.name}") @@ -645,12 +686,12 @@ def set_input_enabled(self, enabled: bool) -> None: if enabled: self.set_focus(cmd) - @work # runs in a worker so waiting won't freeze the UI + """@work # runs in a worker so waiting won't freeze the UI async def handle_user_query(self, user_input) -> None: - """ + #"" Function used in '/edit_tools' command. It creates a dialog with all the tools. - """ + #"" # create the checklist dialog # Check for image input. Must be always at the end of the input @@ -680,7 +721,7 @@ async def handle_user_query(self, user_input) -> None: except EOFError: #console.print("[yellow]Exiting...[/yellow]") self._log("[yellow]Exiting...[/yellow]") - return + return""" async def on_input_submitted(self, event: Input.Submitted) -> None: """ @@ -722,7 +763,9 @@ async def on_input_submitted(self, event: Input.Submitted) -> None: self.handle_command(user_input) return - self.handle_user_query(user_input) + #self.handle_user_query(user_input) + await asyncio.sleep(0) + asyncio.create_task(self.bootstrap(user_input)) except KeyboardInterrupt: #console.print("[yellow]Exiting...[/yellow]") diff --git a/src/vulcanai/core/manager.py b/src/vulcanai/core/manager.py index c6bad39..2f34383 100644 --- a/src/vulcanai/core/manager.py +++ b/src/vulcanai/core/manager.py @@ -128,7 +128,7 @@ def get_plan_from_user_request(self, user_text: str, context: Dict[str, Any] = N images = context["images"] # Query LLM - plan = self.llm.inference(system_prompt, user_prompt, images, self.history) + plan = self.llm.inference_plan(system_prompt, user_prompt, images, self.history) self.logger(f"Plan received:\n{plan}", log_type="manager") # Save to history if plan: diff --git a/src/vulcanai/models/gemini.py b/src/vulcanai/models/gemini.py index 2816c46..253795c 100644 --- a/src/vulcanai/models/gemini.py +++ b/src/vulcanai/models/gemini.py @@ -73,10 +73,10 @@ def _inference( ) # Notify hooks of request start - try: + """try: self.hooks.on_request_start() except Exception as e: - pass + pass""" response = self.model.models.generate_content( model=self.model_name, diff --git a/src/vulcanai/models/ollama_model.py b/src/vulcanai/models/ollama_model.py index 6ce1dd4..7292166 100644 --- a/src/vulcanai/models/ollama_model.py +++ b/src/vulcanai/models/ollama_model.py @@ -63,10 +63,10 @@ def _inference( messages = self._build_messages(system_prompt, user_content, history) # Notify hooks of request start - try: + """try: self.hooks.on_request_start() except Exception as e: - pass + pass""" # Call Ollama with response_format bound to the desired schema/class try: diff --git a/src/vulcanai/models/openai.py b/src/vulcanai/models/openai.py index dde0f69..2f2598e 100644 --- a/src/vulcanai/models/openai.py +++ b/src/vulcanai/models/openai.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + from openai import OpenAI from typing import Any, Dict, Iterable, Optional, Type, TypeVar import mimetypes @@ -26,11 +27,12 @@ class OpenAIModel(IModel): """ Wrapper for OpenAI models. """ - def __init__(self, model_name: str, logger=None, hooks: Optional[IModelHooks] = None): + def __init__(self, model_name: str, logger=None, hooks: Optional[IModelHooks] = None):#, console = None): super().__init__() self.logger = logger self.model_name = model_name self.hooks = hooks + #self.console = console try: self.model = OpenAI() except Exception as e: @@ -64,10 +66,11 @@ def _inference( messages = self._build_messages(system_prompt, user_content, history) # Notify hooks of request start - try: + """try: self.hooks.on_request_start() + #self.console.call_from_thread(self.hooks.on_request_start) except Exception as e: - pass + pass""" # Call OpenAI with response_format bound to the desired schema/class try: @@ -83,6 +86,7 @@ def _inference( # Notify hooks of request end try: self.hooks.on_request_end() + #self.console.call_from_thread(self.hooks.on_request_end) except Exception as e: pass From 16097955588596168b00081b6d4bd03cee334284 Mon Sep 17 00:00:00 2001 From: danipiza Date: Mon, 17 Nov 2025 07:28:03 +0100 Subject: [PATCH 09/40] [#23892] Removed 'vulcanai_turtlesim_demo' Signed-off-by: danipiza --- src/vulcanai_turtlesim_demo/package.xml | 19 - .../resource/vulcanai_turtlesim_demo | 0 src/vulcanai_turtlesim_demo/setup.cfg | 4 - src/vulcanai_turtlesim_demo/setup.py | 34 -- .../test/test_copyright.py | 25 -- .../test/test_flake8.py | 25 -- .../test/test_pep257.py | 23 -- .../test/test_xmllint.py | 23 -- .../vulcanai_turtlesim_demo/__init__.py | 0 .../vulcanai_turtlesim_demo/ros2_node.py | 90 ----- .../turtlesim_tools.py | 373 ------------------ 11 files changed, 616 deletions(-) delete mode 100644 src/vulcanai_turtlesim_demo/package.xml delete mode 100644 src/vulcanai_turtlesim_demo/resource/vulcanai_turtlesim_demo delete mode 100644 src/vulcanai_turtlesim_demo/setup.cfg delete mode 100644 src/vulcanai_turtlesim_demo/setup.py delete mode 100644 src/vulcanai_turtlesim_demo/test/test_copyright.py delete mode 100644 src/vulcanai_turtlesim_demo/test/test_flake8.py delete mode 100644 src/vulcanai_turtlesim_demo/test/test_pep257.py delete mode 100644 src/vulcanai_turtlesim_demo/test/test_xmllint.py delete mode 100644 src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/__init__.py delete mode 100644 src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/ros2_node.py delete mode 100644 src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/turtlesim_tools.py diff --git a/src/vulcanai_turtlesim_demo/package.xml b/src/vulcanai_turtlesim_demo/package.xml deleted file mode 100644 index 2501110..0000000 --- a/src/vulcanai_turtlesim_demo/package.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - - vulcanai_turtlesim_demo - 0.0.0 - TODO: Package description - danipiza - TODO: License declaration - - ament_copyright - ament_flake8 - ament_pep257 - ament_xmllint - python3-pytest - - - ament_python - - diff --git a/src/vulcanai_turtlesim_demo/resource/vulcanai_turtlesim_demo b/src/vulcanai_turtlesim_demo/resource/vulcanai_turtlesim_demo deleted file mode 100644 index e69de29..0000000 diff --git a/src/vulcanai_turtlesim_demo/setup.cfg b/src/vulcanai_turtlesim_demo/setup.cfg deleted file mode 100644 index 5fc02a6..0000000 --- a/src/vulcanai_turtlesim_demo/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/vulcanai_turtlesim_demo -[install] -install_scripts=$base/lib/vulcanai_turtlesim_demo diff --git a/src/vulcanai_turtlesim_demo/setup.py b/src/vulcanai_turtlesim_demo/setup.py deleted file mode 100644 index d38154b..0000000 --- a/src/vulcanai_turtlesim_demo/setup.py +++ /dev/null @@ -1,34 +0,0 @@ -from setuptools import find_packages, setup - -package_name = 'vulcanai_turtlesim_demo' - -setup( - name=package_name, - version='0.0.0', - packages=find_packages(exclude=['test']), - data_files=[ - ('share/ament_index/resource_index/packages', - ['resource/' + package_name]), - ('share/' + package_name, ['package.xml']), - ], - install_requires=['setuptools'], - zip_safe=True, - maintainer='danipiza', - maintainer_email='dpizarrogallego@gmail.com', - description='TODO: Package description', - license='TODO: License declaration', - extras_require={ - 'test': [ - 'pytest', - ], - }, - entry_points={ - "console_scripts": [ - "vulcanai_turtlesim_demo = vulcanai_turtlesim_demo.ros2_node:main", - ], - "turtle_tools": [ - "turtle_tools = vulcanai_turtlesim_demo.turtlesim_tools", - ], - }, - -) diff --git a/src/vulcanai_turtlesim_demo/test/test_copyright.py b/src/vulcanai_turtlesim_demo/test/test_copyright.py deleted file mode 100644 index 97a3919..0000000 --- a/src/vulcanai_turtlesim_demo/test/test_copyright.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2015 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_copyright.main import main -import pytest - - -# Remove the `skip` decorator once the source file(s) have a copyright header -@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.') -@pytest.mark.copyright -@pytest.mark.linter -def test_copyright(): - rc = main(argv=['.', 'test']) - assert rc == 0, 'Found errors' diff --git a/src/vulcanai_turtlesim_demo/test/test_flake8.py b/src/vulcanai_turtlesim_demo/test/test_flake8.py deleted file mode 100644 index 27ee107..0000000 --- a/src/vulcanai_turtlesim_demo/test/test_flake8.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2017 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_flake8.main import main_with_errors -import pytest - - -@pytest.mark.flake8 -@pytest.mark.linter -def test_flake8(): - rc, errors = main_with_errors(argv=[]) - assert rc == 0, \ - 'Found %d code style errors / warnings:\n' % len(errors) + \ - '\n'.join(errors) diff --git a/src/vulcanai_turtlesim_demo/test/test_pep257.py b/src/vulcanai_turtlesim_demo/test/test_pep257.py deleted file mode 100644 index b234a38..0000000 --- a/src/vulcanai_turtlesim_demo/test/test_pep257.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2015 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_pep257.main import main -import pytest - - -@pytest.mark.linter -@pytest.mark.pep257 -def test_pep257(): - rc = main(argv=['.', 'test']) - assert rc == 0, 'Found code style errors / warnings' diff --git a/src/vulcanai_turtlesim_demo/test/test_xmllint.py b/src/vulcanai_turtlesim_demo/test/test_xmllint.py deleted file mode 100644 index 3e08c02..0000000 --- a/src/vulcanai_turtlesim_demo/test/test_xmllint.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2015 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_xmllint.main import main -import pytest - - -@pytest.mark.linter -@pytest.mark.xmllint -def test_xmllint() -> None: - rc = main(argv=[]) - assert rc == 0, 'Found code style errors / warnings' diff --git a/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/__init__.py b/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/ros2_node.py b/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/ros2_node.py deleted file mode 100644 index e81ef68..0000000 --- a/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/ros2_node.py +++ /dev/null @@ -1,90 +0,0 @@ -import threading - -import rclpy -from rclpy.node import Node -from rclpy.task import Future - -from vulcanai import VulcanConsole - -class SharedNode(Node): - def __init__(self, name: str = "vulcanai_shared_node"): - super().__init__(name) - # Dictionary to store created clients - self._vulcan_clients = {} - # Dictionary to store created publishers - self._vulcan_publishers = {} - - # Ensure entities creation is thread-safe. - self.node_lock = threading.Lock() - - def get_client(self, srv_type, srv_name): - """ - Get a cached client for the specified service type and name or - create a new one if it doesn't exist. - """ - key = (srv_type, srv_name) - with self.node_lock: - if key not in self._vulcan_clients: - client = self.create_client(srv_type, srv_name) - self._vulcan_clients[key] = client - self.get_logger().info(f"Created new client for {srv_name}") - return self._vulcan_clients[key] - - def get_publisher(self, msg_type, topic_name): - """ - Get a cached publisher for the specified message type and topic name or - create a new one if it doesn't exist. - """ - key = (msg_type, topic_name) - with self.node_lock: - if key not in self._vulcan_publishers: - publisher = self.create_publisher(msg_type, topic_name, 10) - self._vulcan_publishers[key] = publisher - self.get_logger().info(f"Created new publisher for {topic_name}") - return self._vulcan_publishers[key] - - def wait_for_message(self, msg_type, topic: str, timeout_sec: float = None): - """ - Block until a message is received or timeout expires. - Subscriptions are created on demand and destroyed after use to avoid - handling spins and callbacks in a separate thread. - """ - future = Future() - - def callback(msg): - if not future.done(): - future.set_result(msg) - - sub = self.create_subscription(msg_type, topic, callback, 10) - - rclpy.spin_until_future_complete(self, future, timeout_sec=timeout_sec) - self.destroy_subscription(sub) - - if future.done(): - return future.result() - return None - -def main(args=None): - # Create a ROS 2 node that will be used by the tools to avoid - # recurrent creation and destruction of DDS Participants - rclpy.init(args=args) - node = SharedNode(name="vulcanai_shared_node") - - user_context = """\ -You are controlling the turtlesim simulation from ROS 2. -The simulation has one or more turtles that can move around, drawing on the screen as they go.""" - - """console = VulcanConsole() - console.manager.register_tools_from_entry_points("turtle_tools") - console.manager.add_user_context(user_context) - # Add the shared node to the console manager blackboard to be used by tools - console.manager.bb["main_node"] = node - console.run()""" - - - console = VulcanConsole("turtle_tools", user_context, node) - console.run() - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/turtlesim_tools.py b/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/turtlesim_tools.py deleted file mode 100644 index 4a926ba..0000000 --- a/src/vulcanai_turtlesim_demo/vulcanai_turtlesim_demo/turtlesim_tools.py +++ /dev/null @@ -1,373 +0,0 @@ -""" -This file contains example tools to interact with ROS 2 turtlesim and -demonstrate how to create custom tools compatible with VulcanAI. -""" - -from math import cos, sin, pi -import time - -import rclpy -from geometry_msgs.msg import Twist -from std_srvs.srv import Empty -from turtlesim_msgs.srv import Kill, SetPen, Spawn, TeleportAbsolute, TeleportRelative -from turtlesim_msgs.msg import Pose - -from vulcanai import AtomicTool, CompositeTool, vulcanai_tool - -@vulcanai_tool -class SpawnTurtleTool(AtomicTool): - name = "spawn_turtle" - description = "Spawn a new turtle in turtlesim. 'spawned' indicates success." - tags = ["turtlesim", "spawn", "add", "create", "service"] - input_schema = [ - ("x", "float"), - ("y", "float"), - ("theta", "float"), - ("name", "string"), - ] - output_schema = {"name": "string", "spawned": "bool"} - - def run(self, **kwargs): - node = self.bb.get("main_node", None) - if node is None: - raise Exception("Could not find shared node, aborting...") - - client = node.get_client(Spawn, "spawn") - if not client.wait_for_service(timeout_sec=5.0): - raise Exception("Service not available, aborting...") - - req = Spawn.Request() - req.x = float(kwargs.get("x", 5.54)) - req.y = float(kwargs.get("y", 5.54)) - req.theta = float(kwargs.get("theta", 0.0)) - req.name = kwargs.get("name", "") - - with node.node_lock: - future = client.call_async(req) - rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) - result = future.result() - if result is None: - raise Exception("Service call failed timeout, aborting...") - - return {"name": result.name, "spawned": True} - -from std_msgs.msg import String - -@vulcanai_tool -class KillTurtleTool(AtomicTool): - name = "kill_turtle" - description = "Kill a turtle 'name' in turtlesim. 'killed' indicates success." - tags = ["turtlesim", "kill", "remove", "delete", "service"] - input_schema = [ - ("name", "string"), - ] - output_schema = {"name": "string", "killed": "bool"} - - def run(self, name: str = ""): - node = self.bb.get("main_node", None) - if node is None: - raise Exception("Could not find shared node, aborting...") - if not name: - print("No turtle name provided, aborting...") - return {"name": "", "killed": False} - - client = node.get_client(Kill, "kill") - if not client.wait_for_service(timeout_sec=5.0): - raise Exception("Service not available, aborting...") - - req = Kill.Request() - req.name = name - with node.node_lock: - future = client.call_async(req) - rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) - result = future.result() - if result is None: - raise Exception("Service call failed timeout, aborting...") - - return {"name": req.name, "killed": True} - - -@vulcanai_tool -class ResetTurtleSimTool(AtomicTool): - name = "reset_turtlesim" - description = "Reset the turtlesim environment. 'reset' indicates success." - tags = ["turtlesim", "reset", "environment", "service"] - input_schema = [] - output_schema = {"reset": "bool"} - - def run(self): - node = self.bb.get("main_node", None) - if node is None: - raise Exception("Could not find shared node, aborting...") - - client = node.get_client(Empty, "reset") - if not client.wait_for_service(timeout_sec=5.0): - raise Exception("Service not available, aborting...") - - req = Empty.Request() - with node.node_lock: - future = client.call_async(req) - rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) - result = future.result() - if result is None: - raise Exception("Service call failed timeout, aborting...") - - return {"reset": True} - - -@vulcanai_tool -class ClearTurtleSimTool(AtomicTool): - name = "clear_turtlesim" - description = "Clear the turtlesim environment, meaning it erases every turtle trail. 'cleared' indicates success." - tags = ["turtlesim", "clear", "erase", "service"] - input_schema = [] - output_schema = {"cleared": "bool"} - - def run(self): - node = self.bb.get("main_node", None) - if node is None: - raise Exception("Could not find shared node, aborting...") - - client = node.get_client(Empty, "clear") - if not client.wait_for_service(timeout_sec=5.0): - raise Exception("Service not available, aborting...") - - req = Empty.Request() - with node.node_lock: - future = client.call_async(req) - rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) - result = future.result() - if result is None: - raise Exception("Service call failed timeout, aborting...") - - return {"cleared": True} - - -@vulcanai_tool -class AbsoluteTeleportTurtleTool(AtomicTool): - name = "absolute_teleport_turtle" - description = "Teleport a turtle 'name' to an absolute position in turtlesim. 'teleported' indicates success." - tags = ["turtlesim", "teleport", "absolute", "service"] - input_schema = [ - ("name", "string"), - ("x", "float"), - ("y", "float"), - ("theta", "float"), - ] - output_schema = {"name": "string", "teleported": "bool"} - - def run(self, **kwargs): - node = self.bb.get("main_node", None) - if node is None: - raise Exception("Could not find shared node, aborting...") - if not kwargs.get("name", ""): - print("No turtle name provided, aborting...") - return {"name": "", "teleported": False} - - name = kwargs.get("name", "") - tp_srv_name = f"/{name}/teleport_absolute" - pen_srv_name = f"/{name}/set_pen" - - try: - # Try to disable the pen before teleporting to avoid drawing a line - # Do not fail if the pen service is not available - pen_result = None - pen_client = node.get_client(SetPen, pen_srv_name) - pen_req = SetPen.Request(r=179, g=184, b=255, width=3, off=1) - with node.node_lock: - pen_future = pen_client.call_async(pen_req) - rclpy.spin_until_future_complete(node, pen_future, timeout_sec=3.0) - pen_result = pen_future.result() - - client = node.get_client(TeleportAbsolute, tp_srv_name) - if not client.wait_for_service(timeout_sec=5.0): - raise Exception("Service not available, aborting...") - - req = TeleportAbsolute.Request() - req.x = kwargs.get("x", 0.0) - req.y = kwargs.get("y", 0.0) - req.theta = kwargs.get("theta", 0.0) - - with node.node_lock: - future = client.call_async(req) - rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) - result = future.result() - if result is None: - raise Exception("Service call failed timeout, aborting...") - finally: - if pen_result is not None: - # Restore the pen state after teleporting - pen_req.off = 0 - with node.node_lock: - pen_future = pen_client.call_async(pen_req) - rclpy.spin_until_future_complete(node, pen_future, timeout_sec=3.0) - pen_future.result() - - return {"name": name, "teleported": True} - - -@vulcanai_tool -class RelativeTeleportTurtleTool(AtomicTool): - name = "relative_teleport_turtle" - description = "Teleport a turtle 'name' to a relative position in turtlesim. 'teleported' indicates success." - tags = ["turtlesim", "teleport", "relative", "service"] - input_schema = [ - ("name", "string"), - ("linear", "float"), - ("angular", "float"), - ] - output_schema = {"name": "string", "teleported": "bool"} - - def run(self, **kwargs): - node = self.bb.get("main_node", None) - if node is None: - raise Exception("Could not find shared node, aborting...") - if not kwargs.get("name", ""): - print("No turtle name provided, aborting...") - return {"name": "", "teleported": False} - - name = kwargs.get("name", "") - client_name = f"/{name}/teleport_relative" - - client = node.get_client(TeleportRelative, client_name) - if not client.wait_for_service(timeout_sec=5.0): - raise Exception("Service not available, aborting...") - - req = TeleportRelative.Request() - req.linear = kwargs.get("linear", 0.0) - req.angular = kwargs.get("angular", 0.0) - - with node.node_lock: - future = client.call_async(req) - rclpy.spin_until_future_complete(node, future, timeout_sec=5.0) - result = future.result() - if result is None: - raise Exception("Service call failed timeout, aborting...") - - return {"name": name, "teleported": True} - - -@vulcanai_tool -class GetTurtlePose(AtomicTool): - name = "get_turtle_pose" - description = "Get the current pose of a turtle 'name' in turtlesim. Fails if something goes wrong." - tags = ["turtlesim", "pose", "position", "location"] - input_schema = [ - ("name", "string"), - ] - output_schema = { - "name": "string", - "x": "float", - "y": "float", - "theta": "float", - } - - def run(self, name: str = ""): - node = self.bb.get("main_node", None) - if node is None: - raise Exception("Could not find shared node, aborting...") - name = name or "turtle1" - topic = f"/{name}/pose" - - msg = node.wait_for_message(Pose, topic, timeout_sec=5.0) - if msg is None: - print(f"Could not get pose for turtle '{name}', aborting...") - raise Exception("No pose message received") - - return {"name": name, "x": msg.x, "y": msg.y, "theta": msg.theta} - - -@vulcanai_tool -class MoveTurtleTool(AtomicTool): - name = "move_turtle" - description = "Move the turtle 'name' with 'linear' and 'angular' velocity by publishing the message 'duration' times (seconds). Use zero velocity to stop. 'success' indicates if the command was sent correctly." - tags = ["turtlesim", "move", "velocity", "cmd_vel", "stop", "draw"] - input_schema = [ - ("name", "string"), - ("linear", "float"), - ("angular", "float"), - ("duration", "int"), - ] - output_schema = [{"success": "bool"}] - - def run(self, **kwargs): - node = self.bb.get("main_node", None) - if node is None: - raise Exception("Could not find shared node, aborting...") - if not kwargs.get("name", ""): - print("No turtle name provided, aborting...") - return {"success": False} - - name = kwargs.get('name', "") - pub = node.get_publisher(Twist, f"/{name}/cmd_vel") - msg = Twist() - msg.linear.x = float(kwargs.get("linear", 0.0)) - msg.angular.z = float(kwargs.get("angular", 0.0)) - for idx in range(int(kwargs.get("duration", 1.0))): - node.get_logger().info(f"Publishing message {idx + 1} to topic /{name}/cmd_vel: linear={msg.linear.x}, angular={msg.angular.z}") - pub.publish(msg) - time.sleep(1) - return {"success": True} - - -@vulcanai_tool -class DrawRectangleTool(CompositeTool): - name = "draw_rectangle" - description = "Move the turtle 'name' in a rectangular shape where 'size' is the length of the shortest sides. 'success' indicates if rectangle was drawn successfully." - tags = ["turtlesim", "draw", "rectangle", "move", "cmd_vel"] - input_schema = [ - ("name", "string"), - ("size", "float"), - ] - output_schema = {"success": "bool"} - dependencies = ["move_turtle", "relative_teleport_turtle"] - - def run(self, name: str = "", size: float = 2.0): - node = self.bb.get("main_node", None) - if node is None: - raise Exception("Could not find shared node, aborting...") - if not name: - print("No turtle name provided, aborting...") - return {"success": False} - - # Access the instances of the dependent tools and set their blackboards - move_tool = self.resolved_deps.get("move_turtle", None) - tp_relative_tool = self.resolved_deps.get("relative_teleport_turtle", None) - move_tool.bb = self.bb - tp_relative_tool.bb = self.bb - - name = name or "turtle1" - size = size - linear_speed = 1.0 - angular_turn = pi / 2 - - # Arguments are passed as dictionaries when calling directly other tools - side_1_args = { - "name": name, - "linear": linear_speed, - "angular": 0.0, - "duration": size / linear_speed, - } - side_2_args = { - "name": name, - "linear": linear_speed, - "angular": 0.0, - "duration": (size + 1) / linear_speed, - } - turn_args = { - "name": name, - "linear": 0.0, - "angular": angular_turn, - } - - # Pass arguments as kwargs - move_tool.run(**side_1_args) - tp_relative_tool.run(**turn_args) - move_tool.run(**side_2_args) - tp_relative_tool.run(**turn_args) - move_tool.run(**side_1_args) - tp_relative_tool.run(**turn_args) - move_tool.run(**side_2_args) - tp_relative_tool.run(**turn_args) - - return {"success": True} \ No newline at end of file From 9a4e2667c2130a3fe8f7196572b26db54b60a6d1 Mon Sep 17 00:00:00 2001 From: danipiza Date: Tue, 18 Nov 2025 10:14:09 +0100 Subject: [PATCH 10/40] [#23892] Added stdout/stderr/ros print wrappers Signed-off-by: danipiza --- src/vulcanai/console/console.py | 66 ++++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) diff --git a/src/vulcanai/console/console.py b/src/vulcanai/console/console.py index 1b68a78..6f4c468 100644 --- a/src/vulcanai/console/console.py +++ b/src/vulcanai/console/console.py @@ -41,6 +41,33 @@ from textual.timer import Timer +import sys + + +class StreamToTextual: + """ + Class used to redirect the stdout/stderr streams in the textual terminal + """ + + def __init__(self, app, stream_name: str = "stdout"): + self.app = app + self._real_stream = getattr(sys, stream_name) + + def write(self, data: str): + if not data: + return + + # optional: still write to real stdout/stderr + self._real_stream.write(data) + self._real_stream.flush() + + if data.strip(): + # Ensure update happens on the app thread + self.app.call_from_thread(self.app.append_log_text, data) + + def flush(self): + self._real_stream.flush() + class Prompt(Static): @@ -289,6 +316,8 @@ def __init__(self, tools_from_entrypoints: str = "", user_context: str = "", mai async def on_mount(self) -> None: self.set_input_enabled(False) + sys.stdout = StreamToTextual(self, "stdout") + sys.stderr = StreamToTextual(self, "stderr") self._log("Starting VulcanAI...", log_color=1) @@ -312,6 +341,39 @@ def compose(self) -> ComposeResult: yield Static("", id="logcontent") yield Input(placeholder="> ", id="cmd") + def append_log_text(self, text: str) -> None: + """Append text to the logcontent Static.""" + text = text.rstrip("\n") + if not text: + return + + self._log_lines.append(text) + content = "\n".join(self._log_lines) + + log_static = self.query_one("#logcontent", Static) + log_static.update(content) + + + def attach_ros_logger_to_console(self, node): + logger = node.get_logger() + + def info_hook(msg, *args, **kwargs): + self.call_from_thread(self._log, f"[gray]\[ROS] \[INFO] {msg}[/gray]") + #return original_info(msg, *args, **kwargs) + + def warn_hook(msg, *args, **kwargs): + self.call_from_thread(self._log, f"[gray]\[ROS] \[WARN] {msg}[/gray]") + #return original_warn(msg, *args, **kwargs) + + def error_hook(msg, *args, **kwargs): + self.call_from_thread(self._log, f"[gray]\[ROS] \[ERROR] {msg}[/gray]") + #return original_error(msg, *args, **kwargs) + + logger.info = info_hook + logger.warning = warn_hook + logger.error = error_hook + + def log_cb(self, msg: str) -> None: """ Print the msg while executing a function @@ -324,7 +386,7 @@ async def bootstrap(self, user_input: str="") -> None: """ def worker(log_cb: Callable[[str], None], user_input: str="") -> None: - + if user_input == "": self.init_manager(log_cb) @@ -370,6 +432,8 @@ def worker(log_cb: Callable[[str], None], user_input: str="") -> None: # Add the shared node to the console manager blackboard to be used by tools if self.main_node != None: self.manager.bb["main_node"] = self.main_node + self.attach_ros_logger_to_console(self.main_node) + #attach_ros_logger_to_console(self, self.main_node) else: self.set_input_enabled(False) From b1dde58cce5c6e930cc30e3866964744dddb0c8d Mon Sep 17 00:00:00 2001 From: danipiza Date: Tue, 18 Nov 2025 16:17:48 +0100 Subject: [PATCH 11/40] [#23892] Updated terminal style (part 1) Signed-off-by: danipiza --- src/vulcanai/console/console.py | 264 +++++++++++++++++----------- src/vulcanai/core/agent.py | 5 +- src/vulcanai/core/executor.py | 33 +++- src/vulcanai/core/manager.py | 2 +- src/vulcanai/core/plan_types.py | 12 +- src/vulcanai/models/openai.py | 16 +- src/vulcanai/tools/tool_registry.py | 32 ++-- 7 files changed, 240 insertions(+), 124 deletions(-) diff --git a/src/vulcanai/console/console.py b/src/vulcanai/console/console.py index 6f4c468..4c734eb 100644 --- a/src/vulcanai/console/console.py +++ b/src/vulcanai/console/console.py @@ -100,8 +100,8 @@ def on_request_start(self, text: str = "Querying LLM...") -> None: Create the spinner line at the end of the log and start updating it. """ - self.running_color = "blue" - self.end_color = "bold green" + self.color = "#0d87c0" + self.update_color = "#15B606" self.text = text if self.spinner_timer is not None: @@ -109,7 +109,7 @@ def on_request_start(self, text: str = "Querying LLM...") -> None: # Add a new line for the spinner and remember its index self.spinner_line_index = len(self.console._log_lines) - self.console._log(f"[{self.running_color}]{text}[/{self.running_color}]") + self.console._log(f"[{self.color}]{text}[/{self.color}]") #self.console._log_lines.append(f"[{self.running_color}]{text}[/{self.running_color}]") self.spinner_frame_index = 0 @@ -135,7 +135,7 @@ def update_spinner(self) -> None: self.spinner_frame_index = (self.spinner_frame_index + 1) % len(self.spinner_frames) # Update that specific line only - self.console._log_lines[self.spinner_line_index] = f"[{self.running_color}]{self.text} {frame} [/{self.running_color}]" + self.console._log_lines[self.spinner_line_index] = f"[{self.update_color}]{frame}[/{self.update_color}] [{self.color}]{self.text}[/{self.color}]" #self.console._log(f"[{self.running_color}] Sleeping {frame} [/{self.running_color}]") self.console.render_log() @@ -149,7 +149,7 @@ def on_request_end(self) -> None: self.spinner_timer = None if self.spinner_line_index is not None: - self.console._log_lines[self.spinner_line_index] = f"[{self.end_color}]Query finished.[/{self.end_color}]" + self.console._log_lines[self.spinner_line_index] += f"[{self.update_color}] Query finished![/{self.update_color}]" self.spinner_line_index = None self.console.render_log() @@ -319,7 +319,73 @@ async def on_mount(self) -> None: sys.stdout = StreamToTextual(self, "stdout") sys.stderr = StreamToTextual(self, "stderr") - self._log("Starting VulcanAI...", log_color=1) + # TODO + # https://patorjk.com/software/taag/#p=display&f=Small+Slant&t=VulcanAI&x=none&v=4&h=4&w=80&we=false +#Standard + vulcanai_tittle_std = \ +""" + __ __ _ _ ___ + \ \ / / _| | ___ __ _ _ __ / \ |_ _| + \ \ / / | | | |/ __/ _` | '_ \ / _ \ | | + \ V /| |_| | | (_| (_| | | | |/ ___ \ | | + \_/ \__,_|_|\___\__,_|_| |_/_/ \_\___| +""" + +#slant + vulcanai_tittle_slant = \ +""" + _ __ __ ___ ____ +| | / /_ __/ /________ ____ / | / _/ +| | / / / / / / ___/ __ `/ __ \/ /| | / / +| |/ / /_/ / / /__/ /_/ / / / / ___ |_/ / +|___/\__,_/_/\___/\__,_/_/ /_/_/ |_/___/ +""" +#small slant + vulcanai_tittle_small_slant = \ +""" + _ __ __ ___ ____ + | | / /_ __/ /______ ____ / _ | / _/ + | |/ / // / / __/ _ `/ _ \/ __ |_/ / + |___/\_,_/_/\__/\_,_/_//_/_/ |_/___/ +""" + +#Doom + vulcanai_tittle_doom = \ +""" + _ _ _ ___ _____ +| | | | | | / _ \|_ _| +| | | |_ _| | ___ __ _ _ __ / /_\ \ | | +| | | | | | | |/ __/ _` | '_ \| _ | | | +\ \_/ / |_| | | (_| (_| | | | | | | |_| |_ + \___/ \__,_|_|\___\__,_|_| |_\_| |_/\___/ +""" +# Small Block + vulcanai_tittle_block = \ +""" +▌ ▌ ▜ ▞▀▖▜▘ +▚▗▘▌ ▌▐ ▞▀▖▝▀▖▛▀▖▙▄▌▐ +▝▞ ▌ ▌▐ ▌ ▖▞▀▌▌ ▌▌ ▌▐ + ▘ ▝▀▘ ▘▝▀ ▝▀▘▘ ▘▘ ▘▀▘ +""" +#Small + vulcanai_tittle_small = \ +""" + __ __ _ _ ___ + \ \ / / _| |__ __ _ _ _ /_\ |_ _| + \ V / || | / _/ _` | ' \ / _ \ | | + \_/ \_,_|_\__\__,_|_||_/_/ \_\___| +""" + + #self._log(f"{vulcanai_tittle_std}", log_color=1) + #self._log(f"") + self._log(f"{vulcanai_tittle_slant}", log_color=1) + #self._log(f"") + #self._log(f"{vulcanai_tittle_small_slant}", log_color=1) + #self._log(f"") + #self._log(f"{vulcanai_tittle_block}", log_color=1) + #self._log(f"") + #self._log(f"{vulcanai_tittle_small}", log_color=1) + await asyncio.sleep(0) asyncio.create_task(self.bootstrap()) @@ -374,21 +440,15 @@ def error_hook(msg, *args, **kwargs): logger.error = error_hook - def log_cb(self, msg: str) -> None: - """ - Print the msg while executing a function - """ - self.call_from_thread(self._log, msg) - async def bootstrap(self, user_input: str="") -> None: """ Function used to print information in runtime execution of a function """ - def worker(log_cb: Callable[[str], None], user_input: str="") -> None: + def worker(user_input: str="") -> None: if user_input == "": - self.init_manager(log_cb) + self.init_manager() # add the commands # command registry: name -> handler @@ -406,7 +466,6 @@ def worker(log_cb: Callable[[str], None], user_input: str="") -> None: "/exit": self.cmd_quit, } - #log_cb("Added commands.") # cycling through tab matches self._tab_matches = [] @@ -418,16 +477,13 @@ def worker(log_cb: Callable[[str], None], user_input: str="") -> None: except Exception: pass - #log_cb("Added hooks.") if self.tools_from_entrypoints != "": self.manager.register_tools_from_entry_points(self.tools_from_entrypoints) - #log_cb("Added tools.") self.manager.add_user_context(self.user_context) - #log_cb("Added user_context.") # Add the shared node to the console manager blackboard to be used by tools if self.main_node != None: @@ -454,7 +510,7 @@ def worker(log_cb: Callable[[str], None], user_input: str="") -> None: self.last_bb = result.get("blackboard", None) #self.print(f"Output of plan: {result.get('blackboard', {None})}") - self._log(f"Output of plan: {result.get('blackboard', {None})}") + self._log(f"Output of plan: {result.get('blackboard', {None})}", log_color=2) except KeyboardInterrupt: #console.print("[yellow]Exiting...[/yellow]") @@ -465,64 +521,21 @@ def worker(log_cb: Callable[[str], None], user_input: str="") -> None: self._log("[yellow]Exiting...[/yellow]") return - def handle_user_query(self, user_input) -> None: - """ - Function used in '/edit_tools' command. - It creates a dialog with all the tools. - """ - # create the checklist dialog - # Check for image input. Must be always at the end of the input - #user_input = "move trutle 1 unit forward." - - try: - self.set_input_enabled(False) - - images = [] - if "--image=" in user_input: - images = self.get_images(user_input) - - # Handle user request - try: - result = self.manager.handle_user_request(user_input, context={"images": images}) - except Exception as e: - #self.print(f"[error]Error handling request:[/error] {e}") - self._log(f"[error]Error handling request:[/error] {e}") - return - - self.last_plan = result.get("plan", None) - self.last_bb = result.get("blackboard", None) - - #self.print(f"Output of plan: {result.get('blackboard', {None})}") - self._log(f"Output of plan: {result.get('blackboard', {None})}") - - except KeyboardInterrupt: - #console.print("[yellow]Exiting...[/yellow]") - self._log("[yellow]Exiting...[/yellow]") - return - except EOFError: - #console.print("[yellow]Exiting...[/yellow]") - self._log("[yellow]Exiting...[/yellow]") - return - if user_input != "": self.hooks.on_request_start() loop = asyncio.get_running_loop() - await loop.run_in_executor(None, lambda: worker(self.log_cb, user_input)) + await loop.run_in_executor(None, lambda: worker(user_input)) if user_input == "": - #await loop.run_in_executor(None, lambda: worker(log_cb, user_input)) self._is_ready = True self.set_input_enabled(True) self._log("VulcanAI Interactive Console", log_color=2) - self._log("Type 'exit' to quit.\n", log_color=2) + self._log("Type [bold]'exit'[/bold] to quit.\n", log_color=2) else: self.set_input_enabled(True) - #await loop.run_in_executor(None, lambda: worker(log_cb)) - #await loop.run_in_executor(None, lambda: handle_user_query()) - # region Utilities """@property @@ -557,15 +570,14 @@ async def open_checklist(self, tools_list: list[str], active_tools_num: int) -> elif not selected: self._log("No items selected.", log_color=3) else: - self._log("Submitting selected lines:", log_color=1) for tool_tmp in tools_list: tool = tool_tmp[2:] # remove "- " if tool_tmp in selected: self.manager.registry.activate_tool(tool) else: - self.manager.registry.deactivate_tool(tool) - self._log(f"Deactivated tool '{tool}'", log_color=2) + if self.manager.registry.deactivate_tool(tool): + self._log(f"Deactivated tool [bold]'{tool}'[/bold]", log_color=2) # endregion @@ -574,19 +586,20 @@ async def open_checklist(self, tools_list: list[str], active_tools_num: int) -> def cmd_help(self, _) -> None: table = "\n".join( [ - "Available commands:\n" - "/help - Show this help message\n" - "/tools - List available tools\n" - "/edit_tools - Edit the list of available tools\n" - "/change_k - Change the 'k' value for the top_k algorithm selection or show the current value if no is provided\n" - "/history - Change the history depth or show the current value if no is provided\n" - "/show_history - Show the current history\n" - "/plan - Show the last generated plan\n" - "/rerun - Rerun the last plan\n" - "/bb - Show the last blackboard state\n" - "/clear - Clear the console screen\n" - "/exit - Exit the console\n" - "Query any other text to process it with the LLM and execute the plan generated.\n" + "[bold]Available commands:[/bold]\n" + "‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾\n" + "/[bold]help[/bold] - Show this help message\n" + "/[bold]tools[/bold] - List available tools\n" + "/[bold]edit_tools[/bold] - Edit the list of available tools\n" + "/[bold]change_k [/bold] - Change the 'k' value for the top_k algorithm selection or show the current value if no is provided\n" + "/[bold]history [/bold] - Change the history depth or show the current value if no is provided\n" + "/[bold]show_history[/bold] - Show the current history\n" + "/[bold]plan[/bold] - Show the last generated plan\n" + "/[bold]rerun[/bold] - Rerun the last plan\n" + "/[bold]bb[/bold] - Show the last blackboard state\n" + "/[bold]clear[/bold] - Clear the console screen\n" + "/[bold]exit[/bold] - Exit the console\n" + "[bold]Query any other text[/bold] to process it with the LLM and execute the plan generated.\n\n" "Add --image= to include images in the query. It can be used multiple times to add more images.\n" "Example: ' --image=/path/to/image1 --image=/path/to/image2'" ] @@ -594,10 +607,13 @@ def cmd_help(self, _) -> None: self._log(table, log_color=2) def cmd_tools(self, _) -> None: - help_msg = f"Available tools (current index k={self.manager.k}):\n" + tmp_msg = f"Available tools (current index k={self.manager.k}):" + tool_msg = f"[bold]{tmp_msg}[/bold]\n" + tool_msg += "‾" * len(tmp_msg) +'\n' + for tool in self.manager.registry.tools.values(): - help_msg += f"- {tool.name}: {tool.description}\n" - self._log(help_msg, log_color=2) + tool_msg += f"- [bold]{tool.name}:[/bold] {tool.description}\n" + self._log(tool_msg, log_color=2) def cmd_edit_tools(self, _) -> None: tools_list = [] @@ -642,11 +658,11 @@ def cmd_show_history(self, _) -> None: self._log("No history available.", log_color=2) return - help_msg = "\nCurrent history (oldest first):\n" + history_msg = "\nCurrent history (oldest first):\n" for i, (user_text, plan_summary) in enumerate(self.manager.history): - help_msg += f"{i+1}. User: {user_text}\n Plan summary: {plan_summary}\n" + history_msg += f"{i+1}. User: {user_text}\n Plan summary: {plan_summary}\n" - self._log(help_msg, log_color=2) + self._log(history_msg, log_color=2) def cmd_plan(self, _) -> None: if self.last_plan: @@ -697,26 +713,70 @@ def render_log(self) -> None: # Allow Rich markup for colors and styles log.update("\n".join(self._log_lines), markup=True)""" - def _log(self, line: str, log_type: str = "", log_color: int = -1) -> None: + def _log(self, line: str, log_type: str = "", log_color: int = -1, print_args_idx: int=-1) -> None: msg = "" color_type = "" if log_type == "register": - msg = "[bold cyan]\[REGISTRY][/bold cyan] " + color_tmp = "#068399" + msg = f"[bold {color_tmp}]\[REGISTRY][/bold {color_tmp}] " elif log_type == "manager": - msg = "[bold blue]\[MANAGER][/bold blue] " + color_tmp = "#0d87c0" + msg = f"[bold {color_tmp}]\[MANAGER][/bold {color_tmp}] " elif log_type == "executor": - msg = "[bold green]\[EXECUTOR][/bold green] " + color_tmp = "#15B606" + msg = f"[bold {color_tmp}]\[EXECUTOR][/bold {color_tmp}] " elif log_type == "validator": msg = "[bold orange_red1]\[VALIDATOR][/bold orange_red1] " elif log_type == "error": msg = "[bold red]\[ERROR][/bold red] " + + """if print_args_idx > 0: + msg += line[:print_args_idx] + i = print_args_idx + n = len(line) + + color_1 = "#C49C00" + color_2 = "#069899" + msg += f"[{color_1}]" + + while i < n: + c = line[i] + if c == '=' or c == ':': + msg += f"[/{color_1}]" + msg += line[i] + msg += f"[{color_2}]" + elif c == ',': + msg += f"[/{color_2}]" + msg += line[i] + msg += f"[{color_1}]" + elif c == '}' or c == ')': + msg += f"[/{color_2}]" + msg+=line[i] + break + else: + msg += line[i] + + i+=1 + + if i < n: + msg += line[i:] + + + + self._log_lines.append(msg) + self.render_log() + return""" + + + + if log_color == 0: color_type = "#FF0000" elif log_color == 1: - color_type = "#4E9A06" + color_type = "#56AA08" elif log_color == 2: color_type = "#8F6296" elif log_color == 3: @@ -724,7 +784,10 @@ def _log(self, line: str, log_type: str = "", log_color: int = -1) -> None: elif log_color == 4: color_type = "#069899" else: - color_type = "#FFFFFF" + msg += f"{line}" + self._log_lines.append(msg) + self.render_log() + return msg += f"[{color_type}]{line}[/{color_type}]" self._log_lines.append(msg) @@ -820,7 +883,8 @@ async def on_input_submitted(self, event: Input.Submitted) -> None: self._history_index = None # echo what the user typed (keep this if you like the prompt arrow) - self._log(f"\[USER] >>> {cmd}") + color_user = "#91DD16" + self._log(f"[bold {color_user}]\[USER] >>>[/bold {color_user}] {cmd}") # If it doesn't start with '/', just print it as output and stop here if user_input.startswith("/"): @@ -1064,19 +1128,21 @@ def run_console(self) -> None: console.print("[yellow]Exiting...[/yellow]") break""" - def init_manager(self, log_cb: Callable[[str], None]) -> None: + def init_manager(self) -> None: if self.iterative: from vulcanai.core.manager_iterator import IterativeManager as ConsoleManager else: from vulcanai.core.manager_plan import PlanManager as ConsoleManager - # TODO. use log_cb to print the log information - #console.print(f"[console]Initializing Manager '{ConsoleManager.__name__}'...[/console]") - self._log(f"Initializing Manager '{ConsoleManager.__name__}'...", log_color=2) + # Print in textual terminal: + # Initializing Manager '' ... + self._log(f"Initializing Manager [bold]'{ConsoleManager.__name__}'[/bold] ...", log_color=2) self.manager = ConsoleManager(model=self.model, k=self.k, logger=self._log) - #self.print(f"Manager initialized with model '{self.model}'.") - self._log(f"Manager initialized with model '{self.model}'", log_color=2) + + # Print in textual terminal: + # Manager initialized with model '' + self._log(f"Manager initialized with model [bold]'{self.model}[/bold]'", log_color=2) def print(self, msg: str) -> None: diff --git a/src/vulcanai/core/agent.py b/src/vulcanai/core/agent.py index d71829a..2745b37 100644 --- a/src/vulcanai/core/agent.py +++ b/src/vulcanai/core/agent.py @@ -25,6 +25,9 @@ class Brand(str, Enum): class Agent: + + class_color = "#0d87c0" + """Interface to operate the LLM.""" def __init__(self, model_name: str, logger=None): self.brand, name = self._detect_brand(model_name) @@ -133,7 +136,7 @@ def _detect_brand(self, model_name: str) -> tuple[Brand, str]: def _load_model(self, model_name: str): if self.brand == Brand.gpt: from vulcanai.models.openai import OpenAIModel - self.logger(f"Using OpenAI API with model: {model_name}", log_type="manager") + self.logger(f"Using OpenAI API with model: [{self.class_color}]{model_name}[/{self.class_color}]", log_type="manager") self.model = OpenAIModel(model_name, self.logger) elif self.brand == Brand.gemini: diff --git a/src/vulcanai/core/executor.py b/src/vulcanai/core/executor.py index 9507cbd..39b1074 100644 --- a/src/vulcanai/core/executor.py +++ b/src/vulcanai/core/executor.py @@ -57,6 +57,11 @@ def text_snapshot(self, keys: Optional[List[str]] = None) -> str: class PlanExecutor: """Executes a validated GlobalPlan with blackboard and execution control parameters.""" + class_color = "#15B606" + color_variable = "#C49C00" + color_value = "#069899" + color_error = "#CC0C0C" + def __init__(self, registry, logger=None): self.registry = registry self.logger = logger #or VulcanAILogger.log_executor @@ -87,7 +92,8 @@ def _run_plan_node(self, node: PlanBase, bb: Blackboard) -> bool: for i in range(attempts): ok = self._execute_plan_node_with_timeout(node, bb) if ok and self._check_success(node, bb): - self.logger(f"PlanNode {node.kind} succeeded on attempt {i+1}/{attempts}", log_type="executor") + self.logger(f"PlanNode [{self.color_value}]{node.kind}[/{self.color_value}] " + \ + f"[{self.class_color}]succeeded[/{self.class_color}] on attempt {i+1}/{attempts}", log_type="executor") return True self.logger(f"PlanNode {node.kind} failed on attempt {i+1}/{attempts}", log_type="executor", log_color=0) # error @@ -250,8 +256,22 @@ def _call_tool(self, arg_dict = {a.key: a.val for a in args} tool.bb = bb + first = True + + msg = f"Invoking [italic][{self.class_color}]'{tool_name}'[/{self.class_color}][/italic] with args: [italic]" + msg += "'{" + for key, value in arg_dict.items(): + if first: + msg += f"[{self.color_variable}]'{key}'[/{self.color_variable}]: [{self.color_value}]'{value}'[/{self.color_value}]" + else: + msg += f", [{self.color_variable}]'{key}'[/{self.color_variable}]: [{self.color_value}]'{value}'[/{self.color_value}]" + first = False + msg+="}''" + self.logger(msg, log_type="executor") + #self.logger(f"Invoking [italic]'{tool_name}'[/italic] with args: [italic]'{arg_dict}'[/italic]", log_type="executor", print_args_idx=49+1+len(tool_name)) + #self.logger(f"Invoking [italic]'{tool_name}'[/italic] with args: [italic]'{arg_dict}'[/italic]", log_type="executor", print_args_idx=25+len(tool_name)) + start = time.time() - self.logger(f"Invoking [italic]'{tool_name}'[/italic] with args: [italic]'{arg_dict}'[/italic]", log_type="executor") tool_log = "" try: if timeout_ms: @@ -276,11 +296,14 @@ def _call_tool(self, if tool_log: self.logger(f"{tool_log}: {tool_name} TODO. danip", log_type="executor")#, tool_name=tool_name) # TODO danip elapsed = (time.time() - start) * 1000 - self.logger(f"Executed [italic]'{tool_name}'[/italic] in {elapsed:.1f} ms with result: {result}", log_type="executor") + self.logger(f"Executed [italic][{self.class_color}]'{tool_name}'[/{self.class_color}][/italic] in [{self.color_value}]{elapsed:.1f} ms[/{self.color_value}] " + \ + f"with result: [bold][{self.class_color}]{result}[/{self.class_color}][/bold]", log_type="executor") return True, result except concurrent.futures.TimeoutError: - self.logger(f"Execution of [italic]'{tool_name}'[/italic] timed out after {timeout_ms} ms", log_type="executor") + self.logger(f"Execution of [italic][{self.class_color}]'{tool_name}'[/{self.class_color}][/italic] [{self.color_error}]timed out[/{self.color_error}] " + \ + f"after [{self.color_value}]{timeout_ms}[/{self.color_value}] ms", log_type="executor") return False, None except Exception as e: - self.logger(f"Execution failed for [italic]'{tool_name}'[/italic]: {e}", log_type="executor") + self.logger(f"Execution [bold][{self.color_error}]failed[\{self.color_error}][/bold] for " + \ + f"[italic][{self.class_color}]'{tool_name}'[/{self.class_color}][/italic]: {e}", log_type="executor") return False, None diff --git a/src/vulcanai/core/manager.py b/src/vulcanai/core/manager.py index 2f34383..4c06f95 100644 --- a/src/vulcanai/core/manager.py +++ b/src/vulcanai/core/manager.py @@ -97,7 +97,7 @@ def handle_user_request(self, user_text: str, context: Dict[str, Any]) -> Dict[s try: self.validator.validate(plan) except Exception as e: - self.logger(f"Plan validation error: {e}", log_type="validator") # error + self.logger(f"Plan validation error: {e}", log_type="validator", log_color=0) # error raise e # Execute plan ret = self.execute_plan(plan) diff --git a/src/vulcanai/core/plan_types.py b/src/vulcanai/core/plan_types.py index e426957..917ebcf 100644 --- a/src/vulcanai/core/plan_types.py +++ b/src/vulcanai/core/plan_types.py @@ -80,8 +80,12 @@ def __str__(self) -> str: if self.summary: lines.append(f"- [bold]Plan Summary[/bold]: {self.summary}\n") + color_tool = "#15B606" + color_variable = "#C49C00" + color_value = "#069899" + for i, node in enumerate(self.plan, 1): - lines.append(f"- PlanNode {i}: kind={node.kind}") + lines.append(f"- PlanNode {i}: [{color_variable}]kind[/{color_variable}]=[{color_value}]{node.kind}[/{color_value}]") if node.condition: lines.append(f" Condition: {node.condition}") if node.retry: @@ -93,8 +97,10 @@ def __str__(self) -> str: if node.on_fail: lines.append(f" On Fail: {node.on_fail.kind} with {len(node.on_fail.steps)} steps") for j, step in enumerate(node.steps, 1): - arg_str = ", ".join([f"{a.key}={a.val}" for a in step.args]) if step.args else "no args" - lines.append(f" Step {j}: {step.tool}({arg_str})") + #arg_str: =, ..., = + arg_str = ", ".join([f"[{color_variable}]{a.key}[/{color_variable}]=[{color_value}]{a.val}[/{color_value}]" for a in step.args]) if step.args else f"[{color_value}]no args[/{color_value}]" + # Step : () + lines.append(f" Step {j}: [{color_tool}]{step.tool}[/{color_tool}]({arg_str})") if step.condition: lines.append(f" Condition: {step.condition}") if step.retry: diff --git a/src/vulcanai/models/openai.py b/src/vulcanai/models/openai.py index 2f2598e..db4e359 100644 --- a/src/vulcanai/models/openai.py +++ b/src/vulcanai/models/openai.py @@ -26,6 +26,9 @@ class OpenAIModel(IModel): + + color = "#0d87c0" + """ Wrapper for OpenAI models. """ def __init__(self, model_name: str, logger=None, hooks: Optional[IModelHooks] = None):#, console = None): super().__init__() @@ -80,7 +83,7 @@ def _inference( response_format=response_cls, ) except Exception as e: - self.logger(f"OpenAI API error: {e}", error=True) + self.logger(f"OpenAI API error: {e}", log_type="manager", error=True) return None finally: # Notify hooks of request end @@ -95,14 +98,17 @@ def _inference( try: parsed = completion.choices[0].message.parsed except Exception as e: - self.logger(f"Failed to parse response into {response_cls.__name__}: {e}", error=True) + self.logger(f"Failed to parse response into {response_cls.__name__}: {e}", log_type="manager", error=True) end = time.time() - self.logger(f"GPT response time: {end - start:.3f} seconds") + self.logger(f"GPT response time: [{self.color}]{end - start:.3f} seconds[/{self.color}]", log_type="manager") try: input_tokens = completion.usage.prompt_tokens output_tokens = completion.usage.completion_tokens - self.logger(f"Prompt tokens: {input_tokens}, Completion tokens: {output_tokens}") + # Print in textual terminal: + # [MANAGER] Prompt tokens: , Completion tokens: + self.logger(f"Prompt tokens: [{self.color}]{input_tokens}[/{self.color}], " + \ + f"Completion tokens: [{self.color}]{output_tokens}[/{self.color}]", log_type="manager") except Exception: pass @@ -125,7 +131,7 @@ def _build_user_content(self, user_text: str, images: Optional[Iterable[str]]) - }) except Exception as e: # Fail soft on a single bad image but continue with others - self.logger(f"Image '{image_path}' could not be encoded: {e}", error=True) + self.logger(f"Image '{image_path}' could not be encoded: {e}", log_type="manager", error=True) return content def _build_messages( diff --git a/src/vulcanai/tools/tool_registry.py b/src/vulcanai/tools/tool_registry.py index 9be404f..78487e5 100644 --- a/src/vulcanai/tools/tool_registry.py +++ b/src/vulcanai/tools/tool_registry.py @@ -72,6 +72,9 @@ def run(self, **kwargs): class ToolRegistry: + + class_color = "#068399" + """Holds all known tools and performs vector search over metadata.""" def __init__(self, embedder=None, logger=None): # Logging function @@ -92,31 +95,36 @@ def __init__(self, embedder=None, logger=None): # Validation tools list to retrieve validation tools separately self.validation_tools: List[str] = [] - def register_tool(self, tool: ITool, solve_deps: bool = True): + def register_tool(self, tool: ITool, solve_deps: bool = True) -> bool: """Register a single tool instance.""" # Avoid duplicates if tool.name in self.tools: - return + return false + self.tools[tool.name] = tool if tool.is_validation_tool: self.validation_tools.append(tool.name) emb = self.embedder.embed(self._doc(tool)) self._index.append((tool.name, emb)) - self.logger(f"Registered tool: {tool.name}", log_type="register") + self.logger(f"Registered tool: [{self.class_color}]{tool.name}[/{self.class_color}]", log_type="register") self.help_tool.available_tools = self.tools if solve_deps: # Get class of tool if issubclass(type(tool), CompositeTool): self._resolve_dependencies(tool) - def activate_tool(self, tool_name): + return True + + def activate_tool(self, tool_name) -> bool: # check if the tool is already active if tool_name in self.tools: - return + return False # check if the tool is deactivated if tool_name not in self.deactivated_tools: - self.logger(f"Tool '{tool_name}' not found in the deactivated tools list.", error=True) - return + self.logger(f"Tool [{self.class_color}]'{tool_name}'[/{self.class_color}] not found in the deactivated tools list.", error=True) + return False + + self.logger(f"Tool [{self.class_color}]'{tool_name}'[/{self.class_color}] not found in the deactivated tools list.", error=True) # add the tool to the active tools self.tools[tool_name] = self.deactivated_tools[tool_name] @@ -124,14 +132,16 @@ def activate_tool(self, tool_name): # removed the tool from the deactivated tools del self.deactivated_tools[tool_name] - def deactivate_tool(self, tool_name): + return True + + def deactivate_tool(self, tool_name) -> bool: # check if the tool is already deactivated if tool_name in self.deactivated_tools: - return + return False # check if the tool is active if tool_name not in self.tools: self.logger(f"Tool '{tool_name}' not found int the active tools list.", error=True) - return + return False # add the tool to the deactivated tools self.deactivated_tools[tool_name] = self.tools[tool_name] @@ -139,6 +149,8 @@ def deactivate_tool(self, tool_name): # removed the tool from the active tools del self.tools[tool_name] + return True + def register(self): """Register all loaded classes marked with @vulcanai_tool.""" composite_classes = [] From 2fb9c822a227057c72814a7f677188933f1de8f8 Mon Sep 17 00:00:00 2001 From: danipiza Date: Wed, 19 Nov 2025 07:54:26 +0100 Subject: [PATCH 12/40] [#23892] Added 'ctrl+v' and keep latest terminal input for history navigation Signed-off-by: danipiza --- pyproject.toml | 1 + src/vulcanai/console/console.py | 34 ++++++++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 64d19d4..6e0441d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,7 @@ dependencies = [ "textual-dev==1.8.0", "textual-serve==1.1.3", "typeguard==2.13", + "pyperclip==1.11.0", ] [project.optional-dependencies] diff --git a/src/vulcanai/console/console.py b/src/vulcanai/console/console.py index 4c734eb..5cecfa1 100644 --- a/src/vulcanai/console/console.py +++ b/src/vulcanai/console/console.py @@ -43,6 +43,9 @@ import sys +# library used to paste the clipboard into the terminal +import pyperclip + class StreamToTextual: """ @@ -934,6 +937,7 @@ async def on_key(self, event: events.Key) -> None: if key in ("up", "down"): + # Only handle history navigation if input is focused if self.focused is not cmd_input: return @@ -945,6 +949,10 @@ async def on_key(self, event: events.Key) -> None: if not hasattr(self, "_history_index") or self._history_index is None: self._history_index = len(self.history) + # store the command input if it is new + if self._history_index == len(self.history): + self.terminal_input = cmd_input.value + if key == "up" and self._history_index > 0: self._history_index -= 1 elif key == "down" and self._history_index < len(self.history): @@ -952,11 +960,13 @@ async def on_key(self, event: events.Key) -> None: else: return # ignore if out of range + self._log(f"Historyindex = {self._history_index}") + # Update input value based on history if 0 <= self._history_index < len(self.history): cmd_input.value = self.history[self._history_index] else: - cmd_input.value = "" + cmd_input.value = self.terminal_input # Move cursor to end cmd_input.cursor_position = len(cmd_input.value) @@ -1047,6 +1057,28 @@ async def on_key(self, event: events.Key) -> None: event.stop() return + if key == "ctrl+v": + try: + paste_text = pyperclip.paste() or "" + except Exception as e: + self._log(f"Clipboard error: {e}", log_color=0) + return + + if not paste_text: + return + + value = cmd_input.value + cursor = cmd_input.cursor_position + + # Insert clipboard text at cursor + cmd_input.value = value[:cursor] + paste_text + value[cursor:] + cmd_input.cursor_position = cursor + len(paste_text) + + cmd_input.focus() + event.prevent_default() + event.stop() + return + # Any other keypress resets tab cycle if the prefix changes if len(key) == 1 or key in ("backspace", "delete"): self._tab_matches = [] From 290eaa4ba014696332e30f041b2a273618dc8473 Mon Sep 17 00:00:00 2001 From: danipiza Date: Thu, 20 Nov 2025 07:30:20 +0100 Subject: [PATCH 13/40] [#23892] Cleaned code and updated terminal style Signed-off-by: danipiza --- src/vulcanai/console/console.py | 372 +++++--------------------- src/vulcanai/core/agent.py | 29 +- src/vulcanai/core/executor.py | 98 +++++-- src/vulcanai/core/manager.py | 26 +- src/vulcanai/core/manager_iterator.py | 2 +- src/vulcanai/core/plan_types.py | 48 +++- src/vulcanai/models/gemini.py | 79 ++++-- src/vulcanai/models/ollama_model.py | 36 ++- src/vulcanai/models/openai.py | 42 +-- src/vulcanai/tools/tool_registry.py | 66 +++-- 10 files changed, 370 insertions(+), 428 deletions(-) diff --git a/src/vulcanai/console/console.py b/src/vulcanai/console/console.py index 5cecfa1..29c3632 100644 --- a/src/vulcanai/console/console.py +++ b/src/vulcanai/console/console.py @@ -71,33 +71,23 @@ def write(self, data: str): def flush(self): self._real_stream.flush() - - -class Prompt(Static): - """A tiny prompt label shown to the left of the input.""" - - def __init__(self, text: str = "> "): - super().__init__(text, id="prompt") - class SpinnerHook: + """ + Single entrant spinner controller for console. + - Starts the spinner on the LLM request. + - Stops the spinner when LLM request is over. + """ def __init__(self, console): self.console = console - # spinner states + # Spinner states self.spinner_timer: Timer | None = None self.spinner_frames = ["⠋","⠙","⠹","⠸","⠼","⠴","⠦","⠧","⠇","⠏"] self.spinner_frame_index = 0 self.spinner_line_index: int | None = None - - #self.running_query = False - #self.create_spinner_timer() - - async def create_spinner_timer(self): - self.spinner_timer = self.console.set_interval(0.1, self.update_spinner) - def on_request_start(self, text: str = "Querying LLM...") -> None: """ Create the spinner line at the end of the log and start updating it. @@ -107,30 +97,26 @@ def on_request_start(self, text: str = "Querying LLM...") -> None: self.update_color = "#15B606" self.text = text + # Check if it is already running if self.spinner_timer is not None: - return # already running + return - # Add a new line for the spinner and remember its index + # Initialized the class variables self.spinner_line_index = len(self.console._log_lines) self.console._log(f"[{self.color}]{text}[/{self.color}]") - #self.console._log_lines.append(f"[{self.running_color}]{text}[/{self.running_color}]") self.spinner_frame_index = 0 # Update every 0.1s self.spinner_timer = self.console.set_interval(0.1, self.update_spinner) - #self.call_from_thread(self.create_spinner_timer) - #self.running_query = True - + # Update the terminal self.console.render_log() def update_spinner(self) -> None: """ Timer callback. Rotate the spinner frame on the stored last log line. """ - #if self.running_query == False: - # return - + # Check if the spinner is not running if self.spinner_line_index is None: return @@ -138,8 +124,11 @@ def update_spinner(self) -> None: self.spinner_frame_index = (self.spinner_frame_index + 1) % len(self.spinner_frames) # Update that specific line only - self.console._log_lines[self.spinner_line_index] = f"[{self.update_color}]{frame}[/{self.update_color}] [{self.color}]{self.text}[/{self.color}]" - #self.console._log(f"[{self.running_color}] Sleeping {frame} [/{self.running_color}]") + self.console._log_lines[self.spinner_line_index] = \ + f"[{self.update_color}]{frame}[/{self.update_color}] " + \ + f"[{self.color}]{self.text}[/{self.color}]" + + # Update the terminal self.console.render_log() def on_request_end(self) -> None: @@ -147,21 +136,25 @@ def on_request_end(self) -> None: Stop the spinner. Optional, replace the line with final_text.""" + # Check if the spinner is running if self.spinner_timer is not None: self.spinner_timer.stop() self.spinner_timer = None + # Update the spinner message line if self.spinner_line_index is not None: self.console._log_lines[self.spinner_line_index] += f"[{self.update_color}] Query finished![/{self.update_color}]" self.spinner_line_index = None self.console.render_log() -# ---------- Modal checklist ---------- + class CheckListScreen(ModalScreen[list[str] | None]): - """A modal screen with the tools checkboxes and Submit/Cancel buttons.""" + """ + A modal screen with the tools checkboxes and Submit/Cancel buttons. + """ - DEFAULT_CSS = """ + CSS = """ CheckListScreen { align: center middle; } @@ -192,10 +185,11 @@ def __init__(self, lines: Iterable[str], active_tools_num: int=0) -> None: def compose(self) -> ComposeResult: with Vertical(classes="dialog"): - yield Label("Pick the lines you want to print", classes="title") - # Make one checkbox per provided line + yield Label("Pick tools you want to enable", classes="title") + for i, line in enumerate(self._lines, start=1): - yield Checkbox(line, value=i<=self.active_tools_num, id=f"cb{i}")#True, id=f"cb{i}") + # The checkbox value is True = tool activated. False otherwise + yield Checkbox(line, value=i<=self.active_tools_num, id=f"cb{i}") with Horizontal(classes="btns"): yield Button("Cancel", variant="default", id="cancel") yield Button("Submit", variant="primary", id="submit") @@ -205,7 +199,7 @@ def on_button_pressed(self, event: Button.Pressed) -> None: boxes = list(self.query(Checkbox)) # Use the original strings instead of Checkbox.label (renderable) selected = [self._lines[i] for i, cb in enumerate(boxes) if cb.value] - self.dismiss(selected) # -> list[str] + self.dismiss(selected) elif event.button.id == "cancel": self.dismiss(None) @@ -216,78 +210,14 @@ def on_mount(self) -> None: class VulcanConsole(App): - # Terminal style - """CSS = - Screen { - layout: vertical; - } - - # Header at top, then the log grows, then input row at bottom - # A small prompt sits to the left of the input for terminal vibes - - # Output area - # Make it fill remaining space and look terminal-ish - # Log already has a good style; just ensure it expands - # and wraps nicely. - Log#log { - height: 1fr; - border: solid rgb(0, 205, 0); - background: $boost; - padding: 1 2; - overflow-y: auto; - } - - .input-row { - height: auto; - layout: horizontal; - padding: 0 1; - dock: bottom; - background: $panel; - } - - # Prompt label - # Slightly dim to look like a shell prompt - # Input stretches to fill the row - # The Input gets a monospace look by default under Textual - - # Prompt label style - # (Using Static for a simple label avoids extra dependencies.) - - # Make the input larger for comfortable typing - Input#cmd { - width: 1fr; - padding: 0 1; - } - - Static#prompt { - width: auto; - color: $text-muted; - content-align: left middle; - } - - Static#hint { - height: auto; - color: $text-muted; - padding: 0 2; - } - """ - CSS = """ #log { height: 1fr; } #cmd { dock: bottom; } """ - """BINDINGS = [ - ("ctrl+q", "app_quit", "Quit"), - ("ctrl+p", "clear", "Clear"), - ("f1", "help", "Help"), - ("ctrl+alt+c", "copy", "Copy Selection"), # new: mirrors terminal habit - ]""" - BINDINGS = [ ("ctrl+c", "copy_to_clipboard", "Copy log to clipboard"), ("y", "copy_log", "Copy log to clipboard"), - #("ctrl+l", "clear", "Clear the terminal"), ] def __init__(self, tools_from_entrypoints: str = "", user_context: str = "", main_node = None, @@ -309,15 +239,15 @@ def __init__(self, tools_from_entrypoints: str = "", user_context: str = "", mai self.main_node = main_node self.commands = None - self._tab_matches = [] - self._tab_index = 0 + self.tab_matches = [] + self.tab_index = 0 self._log_lines = [] # terminal qol self.history = [] async def on_mount(self) -> None: - + # Disable terminal input self.set_input_enabled(False) sys.stdout = StreamToTextual(self, "stdout") sys.stderr = StreamToTextual(self, "stderr") @@ -394,18 +324,6 @@ async def on_mount(self) -> None: asyncio.create_task(self.bootstrap()) def compose(self) -> ComposeResult: - """yield Header(show_clock=True) - #yield Log(id="term", highlight=True)#, markup=True) - yield Log(id="log", highlight=True) - yield Static("Press [b]F1[/b] for help • [b]Ctrl+C[/b] to quit • [b]Tab[/b] to autocomplete", id="hint") - with Static(classes="input-row"): - yield Prompt("> ") - yield Input(placeholder="Type a command and press Enter…", id="cmd") - yield Footer()""" - - """yield Static("", id="log") - yield Input(placeholder="> ", id="cmd")""" - with VerticalScroll(id="logview"): yield Static("", id="logcontent") yield Input(placeholder="> ", id="cmd") @@ -424,19 +342,19 @@ def append_log_text(self, text: str) -> None: def attach_ros_logger_to_console(self, node): + """ + Function that remove ROS node overlaping prints in the terminal + """ logger = node.get_logger() def info_hook(msg, *args, **kwargs): self.call_from_thread(self._log, f"[gray]\[ROS] \[INFO] {msg}[/gray]") - #return original_info(msg, *args, **kwargs) def warn_hook(msg, *args, **kwargs): self.call_from_thread(self._log, f"[gray]\[ROS] \[WARN] {msg}[/gray]") - #return original_warn(msg, *args, **kwargs) def error_hook(msg, *args, **kwargs): self.call_from_thread(self._log, f"[gray]\[ROS] \[ERROR] {msg}[/gray]") - #return original_error(msg, *args, **kwargs) logger.info = info_hook logger.warning = warn_hook @@ -450,6 +368,7 @@ async def bootstrap(self, user_input: str="") -> None: def worker(user_input: str="") -> None: + # INITIALIZE CODE if user_input == "": self.init_manager() @@ -469,10 +388,9 @@ def worker(user_input: str="") -> None: "/exit": self.cmd_quit, } - # cycling through tab matches - self._tab_matches = [] - self._tab_index = 0 + self.tab_matches = [] + self.tab_index = 0 # Override hooks with spinner controller try: @@ -480,24 +398,29 @@ def worker(user_input: str="") -> None: except Exception: pass - if self.tools_from_entrypoints != "": self.manager.register_tools_from_entry_points(self.tools_from_entrypoints) - + else: + self._log("WARNING. No tools added", log_color=3) self.manager.add_user_context(self.user_context) - # Add the shared node to the console manager blackboard to be used by tools if self.main_node != None: self.manager.bb["main_node"] = self.main_node self.attach_ros_logger_to_console(self.main_node) - #attach_ros_logger_to_console(self, self.main_node) + else: + self._log("WARNING. No ROS node added", log_color=3) + + # MAIN FUNCTION (Handle commands or queries) else: + # Disable terminal input self.set_input_enabled(False) try: images = [] + + # Add the images if "--image=" in user_input: images = self.get_images(user_input) @@ -505,25 +428,23 @@ def worker(user_input: str="") -> None: try: result = self.manager.handle_user_request(user_input, context={"images": images}) except Exception as e: - #self.print(f"[error]Error handling request:[/error] {e}") + self._log(f"[error]Error handling request:[/error] {e}") return self.last_plan = result.get("plan", None) self.last_bb = result.get("blackboard", None) - #self.print(f"Output of plan: {result.get('blackboard', {None})}") self._log(f"Output of plan: {result.get('blackboard', {None})}", log_color=2) except KeyboardInterrupt: - #console.print("[yellow]Exiting...[/yellow]") self._log("[yellow]Exiting...[/yellow]") return except EOFError: - #console.print("[yellow]Exiting...[/yellow]") self._log("[yellow]Exiting...[/yellow]") return + # This is the main thread, here is where the hook is started for queries if user_input != "": self.hooks.on_request_start() @@ -531,40 +452,22 @@ def worker(user_input: str="") -> None: await loop.run_in_executor(None, lambda: worker(user_input)) if user_input == "": - - self._is_ready = True - self.set_input_enabled(True) + self.is_ready = True self._log("VulcanAI Interactive Console", log_color=2) self._log("Type [bold]'exit'[/bold] to quit.\n", log_color=2) - else: - self.set_input_enabled(True) - # region Utilities + # Activate the terminal input + self.set_input_enabled(True) - """@property - def term(self): - return self.query_one("#log", Log) - - @property - def cmd_input(self): - return self.query_one("#cmd", Input)#, id="term") - - def print_system(self, message: str): - #self.term.write(f"[bold cyan]system[/]: {message}") - self.term.write(f"{message}") - - #self.term.write(Text.from_markup(f"[{green_hex}]system[/]: {escape(message)}")) - #self.term.write(Text.from_markup(f"{escape(message)}")) - - def print_output(self, message: str): - self.term.write(message)""" + # region Utilities - @work # runs in a worker so waiting won't freeze the UI + @work # runs in a worker. waiting won't freeze the UI async def open_checklist(self, tools_list: list[str], active_tools_num: int) -> None: """ Function used in '/edit_tools' command. It creates a dialog with all the tools. """ + # create the checklist dialog selected = await self.push_screen_wait(CheckListScreen(tools_list, active_tools_num)) @@ -575,7 +478,8 @@ async def open_checklist(self, tools_list: list[str], active_tools_num: int) -> else: for tool_tmp in tools_list: - tool = tool_tmp[2:] # remove "- " + # remove "- " + tool = tool_tmp[2:] if tool_tmp in selected: self.manager.registry.activate_tool(tool) else: @@ -621,7 +525,7 @@ def cmd_tools(self, _) -> None: def cmd_edit_tools(self, _) -> None: tools_list = [] for tool in self.manager.registry.tools.values(): - tools_list.append(f"- {tool.name}")#: {tool.description}") + tools_list.append(f"- {tool.name}") active_tools_num = len(tools_list) @@ -705,17 +609,11 @@ def cmd_echo(self, args) -> None: # region Logging def render_log(self) -> None: - """self.query_one("#log", Static).update("\n".join(self._log_lines))""" log_static = self.query_one("#logcontent", Static) log_static.update("\n".join(self._log_lines)) self.query_one("#logview", VerticalScroll).scroll_end(animate=False) - """def render_log(self): - log = self.query_one("#log", Static) - # Allow Rich markup for colors and styles - log.update("\n".join(self._log_lines), markup=True)""" - def _log(self, line: str, log_type: str = "", log_color: int = -1, print_args_idx: int=-1) -> None: msg = "" @@ -736,46 +634,6 @@ def _log(self, line: str, log_type: str = "", log_color: int = -1, print_args_id msg = "[bold red]\[ERROR][/bold red] " - """if print_args_idx > 0: - msg += line[:print_args_idx] - i = print_args_idx - n = len(line) - - color_1 = "#C49C00" - color_2 = "#069899" - msg += f"[{color_1}]" - - while i < n: - c = line[i] - if c == '=' or c == ':': - msg += f"[/{color_1}]" - msg += line[i] - msg += f"[{color_2}]" - elif c == ',': - msg += f"[/{color_2}]" - msg += line[i] - msg += f"[{color_1}]" - elif c == '}' or c == ')': - msg += f"[/{color_2}]" - msg+=line[i] - break - else: - msg += line[i] - - i+=1 - - if i < n: - msg += line[i:] - - - - self._log_lines.append(msg) - self.render_log() - return""" - - - - if log_color == 0: color_type = "#FF0000" elif log_color == 1: @@ -796,16 +654,6 @@ def _log(self, line: str, log_type: str = "", log_color: int = -1, print_args_id self._log_lines.append(msg) self.render_log() - """def _log(self, line: str): - if "error" in line.lower(): - line = f"[red]{line}[/red]" - elif "warn" in line.lower(): - line = f"[yellow]{line}[/yellow]" - elif "success" in line.lower(): - line = f"[green]{line}[/green]" - self._log_lines.append(line) - self.render_log()""" - # endregion # region Input @@ -816,50 +664,14 @@ def set_input_enabled(self, enabled: bool) -> None: if enabled: self.set_focus(cmd) - """@work # runs in a worker so waiting won't freeze the UI - async def handle_user_query(self, user_input) -> None: - #"" - Function used in '/edit_tools' command. - It creates a dialog with all the tools. - #"" - # create the checklist dialog - # Check for image input. Must be always at the end of the input - - try: - images = [] - if "--image=" in user_input: - images = self.get_images(user_input) - - # Handle user request - try: - result = self.manager.handle_user_request(user_input, context={"images": images}) - except Exception as e: - #self.print(f"[error]Error handling request:[/error] {e}") - self._log(f"[error]Error handling request:[/error] {e}") - return - - self.last_plan = result.get("plan", None) - self.last_bb = result.get("blackboard", None) - - #self.print(f"Output of plan: {result.get('blackboard', {None})}") - self._log(f"Output of plan: {result.get('blackboard', {None})}") - - except KeyboardInterrupt: - #console.print("[yellow]Exiting...[/yellow]") - self._log("[yellow]Exiting...[/yellow]") - return - except EOFError: - #console.print("[yellow]Exiting...[/yellow]") - self._log("[yellow]Exiting...[/yellow]") - return""" - async def on_input_submitted(self, event: Input.Submitted) -> None: """ - Enter key + Function for entering a key """ - if not self._is_ready: + if not self.is_ready: return + cmd = event.value.strip() if not cmd: return @@ -875,8 +687,8 @@ async def on_input_submitted(self, event: Input.Submitted) -> None: event.input.focus() # reset tab state - self._tab_matches = [] - self._tab_index = 0 + self.tab_matches = [] + self.tab_index = 0 if not user_input: self.cmd_input.focus() @@ -894,26 +706,16 @@ async def on_input_submitted(self, event: Input.Submitted) -> None: self.handle_command(user_input) return - #self.handle_user_query(user_input) await asyncio.sleep(0) asyncio.create_task(self.bootstrap(user_input)) except KeyboardInterrupt: - #console.print("[yellow]Exiting...[/yellow]") self._log("[yellow]Exiting...[/yellow]") return except EOFError: - #console.print("[yellow]Exiting...[/yellow]") self._log("[yellow]Exiting...[/yellow]") return - """ - self.print_output(user_input) - event.input.value = "" - event.input.focus() - return - """ - def handle_command(self, user_input: str) -> None: # Otherwise, parse as a command parts = user_input.split() @@ -993,10 +795,10 @@ async def on_key(self, event: events.Key) -> None: if not all_cmds: return - self._tab_matches = [c for c in all_cmds if c.startswith(head)] if head else all_cmds - self._tab_index = 0 + self.tab_matches = [c for c in all_cmds if c.startswith(head)] if head else all_cmds + self.tab_index = 0 - matches = self._tab_matches + matches = self.tab_matches if not matches: cmd_input.focus() event.prevent_default() @@ -1081,8 +883,8 @@ async def on_key(self, event: events.Key) -> None: # Any other keypress resets tab cycle if the prefix changes if len(key) == 1 or key in ("backspace", "delete"): - self._tab_matches = [] - self._tab_index = 0 + self.tab_matches = [] + self.tab_index = 0 def common_prefix(self, strings: str) -> str: if not strings: @@ -1116,49 +918,17 @@ def common_prefix(self, strings: str) -> str: # region Actions (key bindings) + # TODO # endregion def run_console(self) -> None: - #self.print("VulcanAI Interactive Console") - #self.print("Type 'exit' to quit.\n") + """ + Run function for the VulcanAI + """ self.run() - """while True: - try: - user_input = self.session.prompt("[USER] >>> ") - if user_input.strip().lower() in ("exit", "quit"): - break - - # Internal commands start with / - if user_input.startswith("/"): - self.handle_command(user_input) - continue - - # Check for image input. Must be always at the end of the input - images = [] - if "--image=" in user_input: - images = self.get_images(user_input) - - # Handle user request - try: - result = self.manager.handle_user_request(user_input, context={"images": images}) - except Exception as e: - self.print(f"[error]Error handling request:[/error] {e}") - continue - - self.last_plan = result.get("plan", None) - self.last_bb = result.get("blackboard", None) - - self.print(f"Output of plan: {result.get('blackboard', {None})}") - - except KeyboardInterrupt: - console.print("[yellow]Exiting...[/yellow]") - break - except EOFError: - console.print("[yellow]Exiting...[/yellow]") - break""" def init_manager(self) -> None: if self.iterative: diff --git a/src/vulcanai/core/agent.py b/src/vulcanai/core/agent.py index 2745b37..59b7da1 100644 --- a/src/vulcanai/core/agent.py +++ b/src/vulcanai/core/agent.py @@ -32,7 +32,7 @@ class Agent: def __init__(self, model_name: str, logger=None): self.brand, name = self._detect_brand(model_name) self.model = None - self.logger = logger #or VulcanAILogger.log_manager + self.logger = logger self._load_model(name) def inference_plan( @@ -136,17 +136,26 @@ def _detect_brand(self, model_name: str) -> tuple[Brand, str]: def _load_model(self, model_name: str): if self.brand == Brand.gpt: from vulcanai.models.openai import OpenAIModel - self.logger(f"Using OpenAI API with model: [{self.class_color}]{model_name}[/{self.class_color}]", log_type="manager") + # Print in textual terminal: + # [MANAGER] Using OpenAI API with model: + self.logger(f"Using OpenAI API with model: " + \ + f"[{self.class_color}]{model_name}[/{self.class_color}]", log_type="manager") self.model = OpenAIModel(model_name, self.logger) elif self.brand == Brand.gemini: from vulcanai.models.gemini import GeminiModel - self.logger(f"Using Gemini API with model: {model_name}", log_type="manager") + # Print in textual terminal: + # [MANAGER] Using Gemini API with model: + self.logger(f"Using Gemini API with model: " + \ + f"[{self.class_color}]{model_name}[/{self.class_color}]", log_type="manager") self.model = GeminiModel(model_name, self.logger) elif self.brand == Brand.ollama: from vulcanai.models.ollama_model import OllamaModel - self.logger(f"Using Ollama API with model: {model_name}", log_type="manager") + # Print in textual terminal: + # [MANAGER] Using Ollama API with model: + self.logger(f"Using Ollama API with model: " + \ + f"[{self.class_color}]{model_name}[/{self.class_color}]", log_type="manager") self.model = OllamaModel(model_name, self.logger) else: @@ -157,8 +166,16 @@ def set_hooks(self, hooks) -> None: if self.model: try: self.model.hooks = hooks + # Print in textual terminal: + # [MANAGER] LLM hooks set. self.logger("LLM hooks set.", log_type="manager") except Exception as e: - self.logger(f"Failed to set LLM hooks: {e}", log_type="manager", log_color=0) # error + # Print in textual terminal: + # [MANAGER] ERROR. Failed to set LLM hooks: + self.logger(f"ERROR. Failed to set LLM hooks: {e}", + log_type="manager", log_color=0) else: - self.logger("LLM model not initialized, cannot set hooks.", log_type="manager", log_color=0) # error + # Print in textual terminal: + # [MANAGER] ERROR. LLM model not initialized, cannot set hooks. + self.logger("ERROR. LLM model not initialized, cannot set hooks.", + log_type="manager", log_color=0) diff --git a/src/vulcanai/core/executor.py b/src/vulcanai/core/executor.py index 39b1074..1a4bd85 100644 --- a/src/vulcanai/core/executor.py +++ b/src/vulcanai/core/executor.py @@ -64,7 +64,7 @@ class PlanExecutor: def __init__(self, registry, logger=None): self.registry = registry - self.logger = logger #or VulcanAILogger.log_executor + self.logger = logger def run(self, plan: GlobalPlan, bb: Blackboard) -> Dict[str, Any]: """ @@ -85,20 +85,32 @@ def _run_plan_node(self, node: PlanBase, bb: Blackboard) -> bool: """Run a PlanNode with execution control parameters.""" # Evaluate PlanNode-level condition if node.condition and not self.safe_eval(node.condition, bb): - self.logger(f"Skipping PlanNode {node.kind} due to not fulfilled condition={node.condition}", log_type="executor") + # Print in textual terminal: + # [EXECUTOR] Skipping PlanNode due to not fulfilled condition= + self.logger(f"Skipping PlanNode {node.kind} due to not fulfilled " + \ + f"condition={node.condition}", log_type="executor") return True attempts = node.retry + 1 if node.retry else 1 for i in range(attempts): ok = self._execute_plan_node_with_timeout(node, bb) if ok and self._check_success(node, bb): + # Print in textual terminal: + # [EXECUTOR] PlanNode succeeded on attempt / self.logger(f"PlanNode [{self.color_value}]{node.kind}[/{self.color_value}] " + \ - f"[{self.class_color}]succeeded[/{self.class_color}] on attempt {i+1}/{attempts}", log_type="executor") + f"[{self.class_color}]succeeded[/{self.class_color}] " + \ + f"on attempt {i+1}/{attempts}", log_type="executor") return True - self.logger(f"PlanNode {node.kind} failed on attempt {i+1}/{attempts}", log_type="executor", log_color=0) # error + # Print in textual terminal: + # [EXECUTOR] PlanNode failed on attempt /" + self.logger(f"PlanNode {node.kind} failed on attempt [bold]{i+1}/{attempts}[/bold]", + log_type="executor", log_color=0) if node.on_fail: - self.logger(f"Executing on_fail branch for PlanNode {node.kind}", log_type="executor") + # Print in textual terminal: + # [EXECUTOR] Executing on_fail branch for PlanNode + self.logger(f"Executing on_fail branch for PlanNode " + \ + f"[{self.color_value}]{node.kind}[/{self.color_value}]", log_type="executor") # Execute the on_fail branch but ignore its result and return False self._run_plan_node(node.on_fail, bb) @@ -112,7 +124,10 @@ def _execute_plan_node_with_timeout(self, node: PlanBase, bb: Blackboard) -> boo future = executor.submit(self._execute_plan_node, node, bb) return future.result(timeout=node.timeout_ms / 1000.0) except concurrent.futures.TimeoutError: - self.logger(f"PlanNode {node.kind} timed out after {node.timeout_ms} ms", log_type="executor") + # Print in textual terminal: + # [EXECUTOR] PlanNode timed out after ms + self.logger(f"PlanNode {node.kind} timed out after [bold]{node.timeout_ms} ms[/bold]", + log_type="executor", log_color=0) return False else: return self._execute_plan_node(node, bb) @@ -132,13 +147,20 @@ def _execute_plan_node(self, node: PlanBase, bb: Blackboard) -> bool: return all(results) # Pydantic should have validated this already - self.logger(f"Unknown PlanNode kind {node.kind}, skipping", log_type="executor", log_color=0)# error + + # Print in textual terminal: + # [EXECUTOR] Unknown PlanNode kind , skipping + self.logger(f"Unknown PlanNode kind {node.kind}, skipping", log_type="executor", log_color=0) return True def _run_step(self, step: Step, bb: Blackboard, parallel: bool = False) -> bool: # Evaluate Step-level condition if step.condition and not self.safe_eval(step.condition, bb): - self.logger(f"Skipping step [italic]'{step.tool}'[/italic] due to condition={step.condition}", log_type="executor") + # Print in textual terminal: + # [EXECUTOR] Skipping step '' due to condition= + self.logger(f"Skipping step [italic]'{step.tool}'[/italic] " + \ + f"due to condition=[{self.class_color}]{step.condition}[/{self.class_color}]", + log_type="executor") return True # Bind args with blackboard placeholders @@ -156,7 +178,10 @@ def _run_step(self, step: Step, bb: Blackboard, parallel: bool = False) -> bool: if ok and self._check_success(step, bb, is_step=True): return True else: - self.logger(f"Step [italic]'{step.tool}'[/italic] attempt {i+1}/{attempts} failed", log_type="executor") + # Print in textual terminal: + # [EXECUTOR] Step '' attempt / failed + self.logger(f"Step [italic]'{step.tool}'[/italic] " + \ + f"attempt {i+1}/{attempts} failed", log_type="executor") return False @@ -167,10 +192,16 @@ def _check_success(self, entity: Step | PlanBase, bb: Blackboard, is_step: bool return True log_value = entity.tool if is_step else entity.kind if self.safe_eval(entity.success_criteria, bb): - self.logger(f"Entity '{log_value}' succeeded with criteria={entity.success_criteria}", log_type="executor") + # Print in textual terminal: + # [EXECUTOR] Entity '' succeeded with criteria= + self.logger(f"Entity '{log_value}' [{self.class_color}]succeeded[/{self.class_color}] " + \ + f"with criteria={entity.success_criteria}", log_type="executor") return True else: - self.logger(f"Entity '{log_value}' failed with criteria={entity.success_criteria}", log_type="executor") + # Print in textual terminal: + # [EXECUTOR] Entity '' failed with criteria= + self.logger(f"Entity '{log_value}' [{self.color_error}]failed[/{self.color_error}] " + \ + f"with criteria={entity.success_criteria}", log_type="executor") return False def safe_eval(self, expr: str, bb: Blackboard) -> bool: @@ -185,7 +216,10 @@ def safe_eval(self, expr: str, bb: Blackboard) -> bool: # Eval does not correctly evaluate dot notation with nested dicts return bool(eval(sub_expr)) except Exception as e: - self.logger(f"Condition evaluation failed: {expr} ({e})") + # Print in textual terminal: + # [EXECUTOR] Condition evaluation failed: () + self.logger(f"Condition evaluation failed: {expr} ({e})", + log_type="executor", log_color=0) return False def _make_bb_subs(self, expr: str, bb: Blackboard) -> str: @@ -198,7 +232,10 @@ def _make_bb_subs(self, expr: str, bb: Blackboard) -> str: expr = expr.replace(f"{{{{{match}}}}}", str(val)) return expr except Exception as e: - self.logger(f"Blackboard substitution failed: {expr} ({e})", log_type="executor", log_color=0)# error + # Print in textual terminal: + # [EXECUTOR] Blackboard substitution failed: () + self.logger(f"Blackboard substitution failed: {expr} ({e})", + log_type="executor", log_color=0) return expr def _bind_args(self, args: List[ArgValue], schema: List[Tuple[str, str]], bb: Blackboard) -> List[ArgValue]: @@ -249,7 +286,10 @@ def _call_tool(self, """Invoke a registered tool.""" tool = self.registry.tools.get(tool_name) if not tool: - self.logger(f"Tool [italic]'{tool_name}'[/italic] not found", log_type="executor", log_color=0) # error + # Print in textual terminal: + # [EXECUTOR] Tool '' not found + self.logger(f"Tool [italic]'{tool_name}'[/italic] not found", + log_type="executor", log_color=0) return False, None # Convert args list to dict @@ -262,14 +302,14 @@ def _call_tool(self, msg += "'{" for key, value in arg_dict.items(): if first: - msg += f"[{self.color_variable}]'{key}'[/{self.color_variable}]: [{self.color_value}]'{value}'[/{self.color_value}]" + msg += f"[{self.color_variable}]'{key}'[/{self.color_variable}]: " + \ + f"[{self.color_value}]'{value}'[/{self.color_value}]" else: - msg += f", [{self.color_variable}]'{key}'[/{self.color_variable}]: [{self.color_value}]'{value}'[/{self.color_value}]" + msg += f", [{self.color_variable}]'{key}'[/{self.color_variable}]: " + \ + f"[{self.color_value}]'{value}'[/{self.color_value}]" first = False msg+="}''" self.logger(msg, log_type="executor") - #self.logger(f"Invoking [italic]'{tool_name}'[/italic] with args: [italic]'{arg_dict}'[/italic]", log_type="executor", print_args_idx=49+1+len(tool_name)) - #self.logger(f"Invoking [italic]'{tool_name}'[/italic] with args: [italic]'{arg_dict}'[/italic]", log_type="executor", print_args_idx=25+len(tool_name)) start = time.time() tool_log = "" @@ -294,16 +334,28 @@ def _call_tool(self, result = tool.run(**arg_dict) tool_log = buff.getvalue().strip() if tool_log: - self.logger(f"{tool_log}: {tool_name} TODO. danip", log_type="executor")#, tool_name=tool_name) # TODO danip + # Print in textual terminal: + # [EXECUTOR] : + self.logger(f"{tool_log}: {tool_name}", log_type="executor") elapsed = (time.time() - start) * 1000 - self.logger(f"Executed [italic][{self.class_color}]'{tool_name}'[/{self.class_color}][/italic] in [{self.color_value}]{elapsed:.1f} ms[/{self.color_value}] " + \ - f"with result: [bold][{self.class_color}]{result}[/{self.class_color}][/bold]", log_type="executor") + # Print in textual terminal: + # [EXECUTOR] Executed '' in ms with result: + self.logger(f"Executed [italic][{self.class_color}]'{tool_name}'[/{self.class_color}][/italic] " + \ + f"in [{self.color_value}]{elapsed:.1f} ms[/{self.color_value}] " + \ + f"with result: [bold][{self.class_color}]{result}[/{self.class_color}][/bold]", + log_type="executor") return True, result except concurrent.futures.TimeoutError: - self.logger(f"Execution of [italic][{self.class_color}]'{tool_name}'[/{self.class_color}][/italic] [{self.color_error}]timed out[/{self.color_error}] " + \ - f"after [{self.color_value}]{timeout_ms}[/{self.color_value}] ms", log_type="executor") + # Print in textual terminal: + # [EXECUTOR] Execution of '' timed out after ms + self.logger(f"Execution of [italic][{self.class_color}]'{tool_name}'[/{self.class_color}][/italic] " + \ + f"[{self.color_error}]timed out[/{self.color_error}] " + \ + f"after [{self.color_value}]{timeout_ms}[/{self.color_value}] ms", + log_type="executor") return False, None except Exception as e: + # Print in textual terminal: + # [EXECUTOR] Execution failed for '': self.logger(f"Execution [bold][{self.color_error}]failed[\{self.color_error}][/bold] for " + \ f"[italic][{self.class_color}]'{tool_name}'[/{self.class_color}][/italic]: {e}", log_type="executor") return False, None diff --git a/src/vulcanai/core/manager.py b/src/vulcanai/core/manager.py index 4c06f95..3ba22f5 100644 --- a/src/vulcanai/core/manager.py +++ b/src/vulcanai/core/manager.py @@ -33,12 +33,12 @@ def __init__( hist_depth: int = 3, logger=None ): - self.logger = logger #or VulcanAILogger.log_manager + self.logger = logger self.llm = Agent(model, self.logger) self.k = k - self.registry = registry or ToolRegistry(logger=(logger))# or VulcanAILogger.log_registry)) + self.registry = registry or ToolRegistry(logger=(logger)) self.validator = validator - self.executor = PlanExecutor(self.registry, logger=(logger))# or VulcanAILogger.log_executor)) + self.executor = PlanExecutor(self.registry, logger=(logger)) self.bb = Blackboard() self.user_context = "" # History is saved as a list of Tuples of user requests and plan summaries @@ -97,12 +97,16 @@ def handle_user_request(self, user_text: str, context: Dict[str, Any]) -> Dict[s try: self.validator.validate(plan) except Exception as e: - self.logger(f"Plan validation error: {e}", log_type="validator", log_color=0) # error + # Print in textual terminal: + # [VALIDATOR] error. Plan validation error: + self.logger(f"ERROR. Plan validation: {e}", log_type="validator", log_color=0) raise e # Execute plan ret = self.execute_plan(plan) except Exception as e: - self.logger(f"Error handling user request: {e}", log_type="manager", log_color=0) # error + # Print in textual terminal: + # [VALIDATOR] ERROR. Handling user request: + self.logger(f"ERROR. Handling user request: {e}", log_type="manager", log_color=0) ret = {"error": str(e)} return ret @@ -129,6 +133,9 @@ def get_plan_from_user_request(self, user_text: str, context: Dict[str, Any] = N # Query LLM plan = self.llm.inference_plan(system_prompt, user_prompt, images, self.history) + # Print in textual terminal: + # [VALIDATOR] Plan received: + # self.logger(f"Plan received:\n{plan}", log_type="manager") # Save to history if plan: @@ -164,7 +171,10 @@ def _build_prompt(self, user_text: str, ctx: Dict[str, Any]) -> Tuple[str, str]: """ tools = self.registry.top_k(user_text, self.k) if not tools: - self.logger("No tools available in the registry.", log_type="manager", log_color=0) # error + # Print in textual terminal: + # [VALIDATOR] ERROR. No tools available in the registry. + self.logger("ERROR. No tools available in the registry.", + log_type="manager", log_color=0) return "", "" tool_descriptions = [] for tool in tools: @@ -196,6 +206,8 @@ def update_history_depth(self, new_depth: int): :param new_depth: The new history depth. """ self.history_depth = max(0, int(new_depth)) + # Print in textual terminal: + # [VALIDATOR] Updated history depth to self.logger(f"Updated history depth to {new_depth}", log_type="manager", log_color=2) if len(self.history) > self.history_depth: if self.history_depth <= 0: @@ -210,6 +222,8 @@ def update_k_index(self, new_k: int): :param new_k: The new k index. """ self.k = max(1, int(new_k)) + # Print in textual terminal: + # [VALIDATOR] Updated k index to self.logger(f"Updated k index to {new_k}", log_type="manager", log_color=2) def _get_prompt_template(self) -> str: diff --git a/src/vulcanai/core/manager_iterator.py b/src/vulcanai/core/manager_iterator.py index a209006..be2e58b 100644 --- a/src/vulcanai/core/manager_iterator.py +++ b/src/vulcanai/core/manager_iterator.py @@ -521,4 +521,4 @@ def _add_to_history(self, user_text: str, plan_summary: str): if self.history_depth <= 0: self.history = [] else: - self.history = self.history[-self.history_depth:] + self.history = self.history[-self.history_depth:] \ No newline at end of file diff --git a/src/vulcanai/core/plan_types.py b/src/vulcanai/core/plan_types.py index 917ebcf..74d658a 100644 --- a/src/vulcanai/core/plan_types.py +++ b/src/vulcanai/core/plan_types.py @@ -83,32 +83,54 @@ def __str__(self) -> str: color_tool = "#15B606" color_variable = "#C49C00" color_value = "#069899" + color_error = "#CC0C0C" for i, node in enumerate(self.plan, 1): - lines.append(f"- PlanNode {i}: [{color_variable}]kind[/{color_variable}]=[{color_value}]{node.kind}[/{color_value}]") + # - PlanNode : kind= + lines.append(f"- PlanNode {i}: [{color_variable}]kind[/{color_variable}]=" + \ + f"[{color_value}]{node.kind}[/{color_value}]") + if node.condition: - lines.append(f" Condition: {node.condition}") + # Condition: + lines.append(f"\tCondition: [{color_value}]{node.condition}[/{color_value}]") if node.retry: - lines.append(f" Retry: {node.retry}") + # Retry: + lines.append(f"\t[{color_error}]Retry[/{color_error}]: " + \ + f"[{color_value}]{node.retry}[/{color_value}]") if node.timeout_ms: - lines.append(f" Timeout: {node.timeout_ms} ms") + # Timeout: ms + lines.append(f"\t[{color_error}]Timeout[/{color_error}]: " + \ + f"[{color_value}]{node.timeout_ms} ms[/{color_value}]") if node.success_criteria: - lines.append(f" Success Criteria: {node.success_criteria}") + # Succes Criteria: + lines.append(f"\[{color_tool}]tSuccess Criteria[/{color_tool}]: " + \ + f"[{color_value}]{node.success_criteria}[/{color_value}]") if node.on_fail: - lines.append(f" On Fail: {node.on_fail.kind} with {len(node.on_fail.steps)} steps") + # On Fail: with steps + lines.append(f"\tOn Fail: [{color_value}]{node.on_fail.kind}[/{color_value}] with " + \ + f"[{color_value}]{len(node.on_fail.steps)} steps[/{color_value}]") for j, step in enumerate(node.steps, 1): #arg_str: =, ..., = - arg_str = ", ".join([f"[{color_variable}]{a.key}[/{color_variable}]=[{color_value}]{a.val}[/{color_value}]" for a in step.args]) if step.args else f"[{color_value}]no args[/{color_value}]" - # Step : () - lines.append(f" Step {j}: [{color_tool}]{step.tool}[/{color_tool}]({arg_str})") + arg_str = ", ".join([f"[{color_variable}]{a.key}[/{color_variable}]=" + \ + f"[{color_value}]{a.val}[/{color_value}]" for a in step.args]) \ + if step.args else f"[{color_value}]no args[/{color_value}]" + # Step : () + lines.append(f"\tStep {j}: [{color_tool}]{step.tool}[/{color_tool}]({arg_str})") if step.condition: - lines.append(f" Condition: {step.condition}") + # Condition: + lines.append(f"\t Condition: [{color_value}]{step.condition}[/{color_value}]") if step.retry: - lines.append(f" Retry: {step.retry}") + # Condition: + lines.append(f"\t [{color_error}]Retry[/{color_error}]: " + \ + f"[{color_value}]{step.retry}[/{color_value}]") if step.timeout_ms: - lines.append(f" Timeout: {step.timeout_ms} ms") + # Timeout: ms + lines.append(f"\t [{color_error}]Timeout[/{color_error}]: " + \ + f"[{color_value}]{step.timeout_ms} ms[/{color_value}]") if step.success_criteria: - lines.append(f" Success Criteria: {step.success_criteria}") + # Success Criteria: + lines.append(f"\t [{color_tool}]Success Criteria[/{color_tool}]: " + \ + f"[{color_value}]{step.success_criteria}[/{color_value}]") return "\n".join(lines) diff --git a/src/vulcanai/models/gemini.py b/src/vulcanai/models/gemini.py index 253795c..cf171b1 100644 --- a/src/vulcanai/models/gemini.py +++ b/src/vulcanai/models/gemini.py @@ -26,6 +26,10 @@ class GeminiModel(IModel): + + # Color of the class [MANAGER] in the textual terminal + class_color = "#0d87c0" + """ Wrapper for most of Google models, Gemini mainly. """ def __init__(self, model_name:str, logger=None, hooks: Optional[IModelHooks] = None): super().__init__() @@ -35,7 +39,9 @@ def __init__(self, model_name:str, logger=None, hooks: Optional[IModelHooks] = N try: self.model = genai.Client(api_key=os.environ.get("GEMINI_API_KEY")) except Exception as e: - self.logger(f"Missing Gemini API Key: {e}", error=True) + # Print in textual terminal: + # [MANAGER] ERROR. Missing Gemini API Key: + self.logger(f"ERROR. Missing Gemini API Key: {e}", log_color=0) def _inference( self, @@ -65,29 +71,40 @@ def _inference( messages = self._build_messages(user_content, history) # Configuration for content generation - cfg = gtypes.GenerateContentConfig( - response_mime_type="application/json", - response_schema=response_cls, - system_instruction=[system_prompt], - candidate_count=1, - ) - - # Notify hooks of request start - """try: - self.hooks.on_request_start() + try: + cfg = gtypes.GenerateContentConfig( + response_mime_type="application/json", + response_schema=response_cls, + system_instruction=[system_prompt], + candidate_count=1, + ) + + response = self.model.models.generate_content( + model=self.model_name, + contents=messages, + config=cfg, + ) except Exception as e: - pass""" + # Print in textual terminal: + # [MANAGER] ERROR. Gemini API: + self.logger(f"ERROR. Gemini API: {e}", log_type="manager", log_color=0) + return None + finally: + # Notify hooks of request end + try: + self.hooks.on_request_end() + except Exception as e: + pass - response = self.model.models.generate_content( - model=self.model_name, - contents=messages, - config=cfg, - ) + # Extract parsed object safely parsed_response: Optional[T] = None try: parsed_response = response.parsed except Exception as e: - self.logger(f"Failed to get parsed goal from Gemini response, falling back to text: {e}", error=True) + # Print in textual terminal: + # [MANAGER] ERROR. Failed to get parsed goal from Gemini response, falling back to text: + self.logger(f"ERROR. Failed to get parsed goal from Gemini response, " + \ + f"falling back to text: {e}", log_color=0) finally: # Notify hooks of request end try: @@ -110,13 +127,22 @@ def _inference( import json parsed_response = GoalSpec(**json.loads(raw)) except Exception as e: - self.logger(f"Failed to parse raw {response_cls.__name__} JSON: {e}", error=True) - + # Print in textual terminal: + # [MANAGER] ERROR. Failed to parse raw JSON: + self.logger(f"ERROR. Failed to parse raw {response_cls.__name__} JSON: {e}", + log_color=0) end = time.time() + # Print in textual terminal: + # [MANAGER] Gemini response time