From 68d1f8ab3c57bb8759506cfc660ab4ec1286cd20 Mon Sep 17 00:00:00 2001 From: Vinh Nguyen <1097578+vinhnx@users.noreply.github.com> Date: Tue, 7 Oct 2025 17:16:31 +0700 Subject: [PATCH 1/3] chore(mcp): expose optional context7 configuration --- README.md | 4 +- config.toml | 5 - docs/guides/mcp-integration.md | 28 +++-- src/agent/runloop/tool_output.rs | 126 +--------------------- vtcode-core/src/config/mcp.rs | 12 +-- vtcode-core/src/core/agent/runner.rs | 5 +- vtcode-core/src/tools/registry/mod.rs | 4 +- vtcode-core/tests/mcp_context7_manual.rs | 35 ------ vtcode-core/tests/mcp_integration_e2e.rs | 14 +-- vtcode-core/tests/mcp_integration_test.rs | 25 +++-- vtcode.toml | 25 +++-- 11 files changed, 72 insertions(+), 211 deletions(-) delete mode 100644 vtcode-core/tests/mcp_context7_manual.rs diff --git a/README.md b/README.md index 76f0bb767..6e884aa18 100644 --- a/README.md +++ b/README.md @@ -88,12 +88,12 @@ The architecture divides into `vtcode-core` (reusable library) and `src/` (CLI e ```rust let client = McpClient::new("ws://localhost:8080"); let docs = client.call("get-library-docs", json!({ - "context7CompatibleLibraryID": "/tokio/docs", + "library_id": "/tokio/docs", "tokens": 5000, "topic": "async runtime" })).await?; ``` - Discovers tools dynamically (e.g., `mcp_resolve-library-id` for Context7 IDs, `mcp_sequentialthinking` for chain-of-thought reasoning with branch/revision support, `mcp_get_current_time` for timezone-aware ops). Connection pooling and failover for multi-provider setups. + Discovers tools dynamically (e.g., `mcp_resolve-library-id` for provider-specific IDs, `mcp_sequentialthinking` for chain-of-thought reasoning with branch/revision support, `mcp_get_current_time` for timezone-aware ops). Connection pooling and failover for multi-provider setups. ### CLI Execution (`src/`) diff --git a/config.toml b/config.toml index 86551a8b1..ccd59ba34 100644 --- a/config.toml +++ b/config.toml @@ -2,11 +2,6 @@ sse_servers = [ ] shttp_servers = [ ] - [[mcp.stdio_servers]] - name = "context7" - command = "npx" - args = [ "-y", "@upstash/context7-mcp@latest" ] - [[mcp.stdio_servers]] name = "time" command = "uvx" diff --git a/docs/guides/mcp-integration.md b/docs/guides/mcp-integration.md index 4f2c92809..c6f2cf610 100644 --- a/docs/guides/mcp-integration.md +++ b/docs/guides/mcp-integration.md @@ -39,6 +39,18 @@ allowlists control tool access, and how to troubleshoot common configuration iss For HTTP transports, specify the endpoint and headers in place of the stdio fields. The configuration loader automatically deserializes either transport variant. + To keep Context7 available without enabling it by default, add a disabled entry you can toggle + later: + + ```toml + [[mcp.providers]] + name = "context7" + enabled = false + command = "npx" + args = ["-y", "@upstash/context7-mcp"] + max_concurrent_requests = 3 + ``` + ## Allowlist Behaviour MCP access is gated by pattern-based allowlists. The defaults apply to every provider unless the @@ -59,13 +71,13 @@ broader default patterns. [mcp.allowlist.default] resources = ["docs/*"] -[mcp.allowlist.providers.context7] +[mcp.allowlist.providers.knowledge_base] resources = ["journals/*"] ``` In this configuration: -- `context7` can access only `journals/*` resources. +- `knowledge_base` can access only `journals/*` resources. - Other providers continue to match `docs/*` through the default rule. ## Testing the Integration @@ -77,15 +89,15 @@ wiring: cargo test -p vtcode-core mcp -- --nocapture ``` -The suite includes mocked clients and parsing tests so it does not require live MCP servers. For -an end-to-end check against the Context7 MCP server, invoke the ignored smoke test which spawns the -official `@upstash/context7-mcp` package on demand: +The suite includes mocked clients and parsing tests so it does not require live MCP servers. For an +end-to-end check against a live provider, enable the ignored time-server smoke test once +`mcp-server-time` is installed locally: ```bash -cargo test -p vtcode-core --test mcp_context7_manual context7_list_tools_smoke -- --ignored --nocapture +cargo test -p vtcode-core --test mcp_integration_e2e test_time_mcp_server_integration -- --ignored --nocapture ``` -Expect the test to take a little longer on the first run while `npx` downloads the server bundle. +Expect the test to take a little longer on the first run while the server binary is downloaded. ## Troubleshooting @@ -93,7 +105,7 @@ Expect the test to take a little longer on the first run while `npx` downloads t allowlist. Provider rules now override defaults, so missing patterns may block tools that defaults would otherwise allow. - **Provider handshake visibility** – VT Code now sends explicit MCP client metadata and - normalizes structured tool responses. Context7 results surface as plain JSON objects in the + normalizes structured tool responses. Provider results surface as plain JSON objects in the tool panel so downstream renderers can display status, metadata, and message lists without additional post-processing. - **Stale configuration values** – ensure `max_concurrent_connections`, `request_timeout_seconds`, diff --git a/src/agent/runloop/tool_output.rs b/src/agent/runloop/tool_output.rs index 3cb8132f5..daa2ac0a4 100644 --- a/src/agent/runloop/tool_output.rs +++ b/src/agent/runloop/tool_output.rs @@ -24,9 +24,7 @@ pub(crate) fn render_tool_output( } if let Some(tool) = tool_name { - if tool.starts_with("mcp_context7") { - render_mcp_context7_output(renderer, val)?; - } else if tool.starts_with("mcp_sequentialthinking") { + if tool.starts_with("mcp_sequentialthinking") { render_mcp_sequential_output(renderer, val)?; } } @@ -135,98 +133,6 @@ fn render_plan_update(renderer: &mut AnsiRenderer, val: &Value) -> Result<()> { Ok(()) } -fn render_mcp_context7_output(renderer: &mut AnsiRenderer, val: &Value) -> Result<()> { - let status = val - .get("status") - .and_then(|value| value.as_str()) - .unwrap_or("unknown"); - - let meta = val.get("meta").and_then(|value| value.as_object()); - let provider = val - .get("provider") - .and_then(|value| value.as_str()) - .unwrap_or("context7"); - let tool_used = val - .get("tool") - .and_then(|value| value.as_str()) - .unwrap_or("context7"); - - renderer.line( - MessageStyle::Tool, - &format!("[{}:{}] status: {}", provider, tool_used, status), - )?; - - if let Some(meta) = meta { - if let Some(query) = meta.get("query").and_then(|value| value.as_str()) { - renderer.line( - MessageStyle::ToolDetail, - &format!("┇ query: {}", shorten(query, 160)), - )?; - } - if let Some(scope) = meta.get("scope").and_then(|value| value.as_str()) { - renderer.line(MessageStyle::ToolDetail, &format!("┇ scope: {}", scope))?; - } - if let Some(limit) = meta.get("max_results").and_then(|value| value.as_u64()) { - renderer.line( - MessageStyle::ToolDetail, - &format!("┇ max_results: {}", limit), - )?; - } - } - - if let Some(messages) = val.get("messages").and_then(|value| value.as_array()) - && !messages.is_empty() - { - renderer.line(MessageStyle::ToolDetail, "┇ snippets:")?; - for message in messages.iter().take(3) { - if let Some(content) = message.get("content").and_then(|value| value.as_str()) { - renderer.line( - MessageStyle::ToolDetail, - &format!("┇ · {}", shorten(content, 200)), - )?; - } - } - if messages.len() > 3 { - renderer.line( - MessageStyle::ToolDetail, - &format!("┇ · … {} more", messages.len() - 3), - )?; - } - } - - if let Some(errors) = val.get("errors").and_then(|value| value.as_array()) - && !errors.is_empty() - { - renderer.line(MessageStyle::Error, "┇ provider errors:")?; - for err in errors.iter().take(2) { - if let Some(msg) = err.get("message").and_then(|value| value.as_str()) { - renderer.line(MessageStyle::Error, &format!("┇ · {}", shorten(msg, 160)))?; - } - } - if errors.len() > 2 { - renderer.line( - MessageStyle::Error, - &format!("┇ · … {} more", errors.len() - 2), - )?; - } - } - - if let Some(input) = val.get("input").and_then(|value| value.as_object()) - && let Some(name) = input.get("LibraryName").and_then(|value| value.as_str()) - { - let candidate = name.trim(); - if !candidate.is_empty() { - let lowered = candidate.to_lowercase(); - if lowered != "tokio" && levenshtein(&lowered, "tokio") <= 2 { - renderer.line(MessageStyle::Info, "┇ suggestion: did you mean 'tokio'?")?; - } - } - } - - renderer.line(MessageStyle::ToolDetail, "┗ context7 lookup complete")?; - Ok(()) -} - fn render_mcp_sequential_output(renderer: &mut AnsiRenderer, val: &Value) -> Result<()> { let status = val .get("status") @@ -337,34 +243,6 @@ fn shorten(text: &str, max_len: usize) -> String { result } -fn levenshtein(a: &str, b: &str) -> usize { - let a_len = a.chars().count(); - let b_len = b.chars().count(); - if a_len == 0 { - return b_len; - } - if b_len == 0 { - return a_len; - } - - let mut prev: Vec = (0..=b_len).collect(); - let mut current = vec![0; b_len + 1]; - - for (i, a_ch) in a.chars().enumerate() { - current[0] = i + 1; - for (j, b_ch) in b.chars().enumerate() { - let cost = if a_ch == b_ch { 0 } else { 1 }; - current[j + 1] = std::cmp::min( - std::cmp::min(current[j] + 1, prev[j + 1] + 1), - prev[j] + cost, - ); - } - prev.copy_from_slice(¤t); - } - - prev[b_len] -} - fn render_plan_panel(renderer: &mut AnsiRenderer, plan: &TaskPlan) -> Result<()> { renderer.line( MessageStyle::Tool, @@ -1028,7 +906,7 @@ mod tests { fn non_terminal_tools_do_not_apply_special_styles() { let git = GitStyles::new(); let ls = LsStyles::from_components(HashMap::new(), Vec::new()); - let styled = select_line_style(Some("context7"), "+added", &git, &ls); + let styled = select_line_style(Some("knowledge_base"), "+added", &git, &ls); assert!(styled.is_none()); } diff --git a/vtcode-core/src/config/mcp.rs b/vtcode-core/src/config/mcp.rs index 91ba84633..4039a7a3d 100644 --- a/vtcode-core/src/config/mcp.rs +++ b/vtcode-core/src/config/mcp.rs @@ -577,10 +577,10 @@ mod tests { provider_rules.tools = Some(vec!["list_*".to_string()]); config .providers - .insert("context7".to_string(), provider_rules); + .insert("knowledge".to_string(), provider_rules); - assert!(config.is_tool_allowed("context7", "list_documents")); - assert!(!config.is_tool_allowed("context7", "get_current_time")); + assert!(config.is_tool_allowed("knowledge", "list_documents")); + assert!(!config.is_tool_allowed("knowledge", "get_current_time")); assert!(config.is_tool_allowed("other", "get_timezone")); assert!(!config.is_tool_allowed("other", "list_documents")); } @@ -624,10 +624,10 @@ mod tests { provider_rules.resources = Some(vec!["journals/*".to_string()]); config .providers - .insert("context7".to_string(), provider_rules); + .insert("knowledge".to_string(), provider_rules); - assert!(config.is_resource_allowed("context7", "journals/2024")); - assert!(!config.is_resource_allowed("context7", "docs/manual")); + assert!(config.is_resource_allowed("knowledge", "journals/2024")); + assert!(!config.is_resource_allowed("knowledge", "docs/manual")); assert!(config.is_resource_allowed("other", "docs/reference")); assert!(!config.is_resource_allowed("other", "journals/2023")); } diff --git a/vtcode-core/src/core/agent/runner.rs b/vtcode-core/src/core/agent/runner.rs index 217b8d938..9881c3268 100644 --- a/vtcode-core/src/core/agent/runner.rs +++ b/vtcode-core/src/core/agent/runner.rs @@ -344,10 +344,7 @@ impl AgentRunner { parallel_tool_config: Some( crate::llm::provider::ParallelToolConfig::anthropic_optimized(), ), - reasoning_effort: if self - .provider_client - .supports_reasoning_effort(&self.model) - { + reasoning_effort: if self.provider_client.supports_reasoning_effort(&self.model) { self.reasoning_effort } else { None diff --git a/vtcode-core/src/tools/registry/mod.rs b/vtcode-core/src/tools/registry/mod.rs index 23a8daf90..cc01ad8f8 100644 --- a/vtcode-core/src/tools/registry/mod.rs +++ b/vtcode-core/src/tools/registry/mod.rs @@ -788,8 +788,8 @@ mod tests { "sequentialthinking" ); assert_eq!( - normalize_mcp_tool_identifier("Context7.Lookup"), - "context7lookup" + normalize_mcp_tool_identifier("Knowledge.Lookup"), + "knowledgelookup" ); assert_eq!(normalize_mcp_tool_identifier("alpha_beta"), "alphabeta"); } diff --git a/vtcode-core/tests/mcp_context7_manual.rs b/vtcode-core/tests/mcp_context7_manual.rs deleted file mode 100644 index a9d2713c0..000000000 --- a/vtcode-core/tests/mcp_context7_manual.rs +++ /dev/null @@ -1,35 +0,0 @@ -use std::collections::HashMap; - -use vtcode_core::config::mcp::{ - McpClientConfig, McpProviderConfig, McpStdioServerConfig, McpTransportConfig, -}; -use vtcode_core::mcp_client::McpClient; - -#[tokio::test] -#[ignore] -async fn context7_list_tools_smoke() { - let provider = McpProviderConfig { - name: "context7".to_string(), - transport: McpTransportConfig::Stdio(McpStdioServerConfig { - command: "npx".to_string(), - args: vec!["-y".to_string(), "@upstash/context7-mcp@latest".to_string()], - working_directory: None, - }), - env: HashMap::new(), - enabled: true, - max_concurrent_requests: 1, - }; - - let mut config = McpClientConfig::default(); - config.enabled = true; - config.providers = vec![provider]; - - let mut client = McpClient::new(config); - client.initialize().await.unwrap(); - - let tools = client.list_tools().await.unwrap(); - assert!( - !tools.is_empty(), - "context7 should expose at least one tool" - ); -} diff --git a/vtcode-core/tests/mcp_integration_e2e.rs b/vtcode-core/tests/mcp_integration_e2e.rs index 1c4fa8524..c9454b80d 100644 --- a/vtcode-core/tests/mcp_integration_e2e.rs +++ b/vtcode-core/tests/mcp_integration_e2e.rs @@ -194,10 +194,10 @@ args = ["mcp-server-time"] max_concurrent_requests = 1 [[mcp.providers]] -name = "context7" +name = "knowledge-base" enabled = true command = "npx" -args = ["-y", "@upstash/context7-mcp@latest"] +args = ["-y", "@example/knowledge-mcp@latest"] max_concurrent_requests = 2 [[mcp.providers]] @@ -219,11 +219,11 @@ max_concurrent_requests = 1 assert!(time_provider.enabled); assert_eq!(time_provider.max_concurrent_requests, 1); - // Check second provider (context7) - let context7_provider = &config.mcp.providers[1]; - assert_eq!(context7_provider.name, "context7"); - assert!(context7_provider.enabled); - assert_eq!(context7_provider.max_concurrent_requests, 2); + // Check second provider (knowledge-base) + let knowledge_provider = &config.mcp.providers[1]; + assert_eq!(knowledge_provider.name, "knowledge-base"); + assert!(knowledge_provider.enabled); + assert_eq!(knowledge_provider.max_concurrent_requests, 2); // Check third provider (disabled) let disabled_provider = &config.mcp.providers[2]; diff --git a/vtcode-core/tests/mcp_integration_test.rs b/vtcode-core/tests/mcp_integration_test.rs index 8195cdaa4..7f3c993a9 100644 --- a/vtcode-core/tests/mcp_integration_test.rs +++ b/vtcode-core/tests/mcp_integration_test.rs @@ -92,26 +92,29 @@ max_concurrent_requests = 1 fn test_provider_config_creation() { let stdio_config = McpStdioServerConfig { command: "npx".to_string(), - args: vec!["-y".to_string(), "@upstash/context7-mcp@latest".to_string()], + args: vec![ + "-y".to_string(), + "@example/knowledge-mcp@latest".to_string(), + ], working_directory: Some("/tmp".to_string()), }; let provider_config = McpProviderConfig { - name: "context7".to_string(), + name: "knowledge-base".to_string(), transport: McpTransportConfig::Stdio(stdio_config), env: HashMap::new(), enabled: true, max_concurrent_requests: 2, }; - assert_eq!(provider_config.name, "context7"); + assert_eq!(provider_config.name, "knowledge-base"); assert!(provider_config.enabled); assert_eq!(provider_config.max_concurrent_requests, 2); match provider_config.transport { McpTransportConfig::Stdio(ref config) => { assert_eq!(config.command, "npx"); - assert_eq!(config.args, vec!["-y", "@upstash/context7-mcp@latest"]); + assert_eq!(config.args, vec!["-y", "@example/knowledge-mcp@latest"]); assert_eq!(config.working_directory, Some("/tmp".to_string())); } McpTransportConfig::Http(_) => panic!("Expected stdio transport"), @@ -164,10 +167,10 @@ args = ["mcp-server-time"] max_concurrent_requests = 1 [[mcp.providers]] -name = "context7" +name = "knowledge-base" enabled = true command = "npx" -args = ["-y", "@upstash/context7-mcp@latest"] +args = ["-y", "@example/knowledge-mcp@latest"] max_concurrent_requests = 2 [[mcp.providers]] @@ -189,11 +192,11 @@ max_concurrent_requests = 1 assert!(time_provider.enabled); assert_eq!(time_provider.max_concurrent_requests, 1); - // Check second provider (context7) - let context7_provider = &config.mcp.providers[1]; - assert_eq!(context7_provider.name, "context7"); - assert!(context7_provider.enabled); - assert_eq!(context7_provider.max_concurrent_requests, 2); + // Check second provider (knowledge-base) + let knowledge_provider = &config.mcp.providers[1]; + assert_eq!(knowledge_provider.name, "knowledge-base"); + assert!(knowledge_provider.enabled); + assert_eq!(knowledge_provider.max_concurrent_requests, 2); // Check third provider (serena - disabled) let serena_provider = &config.mcp.providers[2]; diff --git a/vtcode.toml b/vtcode.toml index 1752f2ec4..54018dc3d 100644 --- a/vtcode.toml +++ b/vtcode.toml @@ -124,6 +124,16 @@ show_timeline_pane = false # Local MCP transports; fine-grained allowlists live in .vtcode/tool-policy.json # Local MCP clients executed via stdio transports enabled = true +# Tune concurrency + reliability defaults to match your environment +max_concurrent_connections = 3 +request_timeout_seconds = 45 +retry_attempts = 2 + +[mcp.ui] +# Inline MCP event rendering controls for the chat surface +mode = "compact" +max_events = 25 +show_provider_names = true [[mcp.providers]] # Official Model Context Protocol time server @@ -134,19 +144,20 @@ args = ["mcp-server-time"] max_concurrent_requests = 3 [[mcp.providers]] -# Upstash Context7 knowledge base over MCP -name = "context7" +# Anthropic sequential thinking planner via MCP +name = "sequential-thinking" enabled = true command = "npx" -args = ["-y", "@upstash/context7-mcp@latest"] +args = ["-y", "@modelcontextprotocol/server-sequential-thinking"] max_concurrent_requests = 3 [[mcp.providers]] -# Anthropic sequential thinking planner via MCP -name = "sequential-thinking" -enabled = true +# Optional Context7 integration for workspace memories and journaling +# Set `enabled = true` after installing the provider locally. +name = "context7" +enabled = false command = "npx" -args = ["-y", "@modelcontextprotocol/server-sequential-thinking"] +args = ["-y", "@upstash/context7-mcp"] max_concurrent_requests = 3 # Example HTTP provider configuration From c1bbd65a650e469d93e4d5def016c923daef48a6 Mon Sep 17 00:00:00 2001 From: Vinh Nguyen <1097578+vinhnx@users.noreply.github.com> Date: Tue, 7 Oct 2025 20:06:32 +0700 Subject: [PATCH 2/3] Restore Context7 MCP renderer --- src/agent/runloop/tool_output.rs | 124 ++++++++++++++++++++++++++++++- 1 file changed, 123 insertions(+), 1 deletion(-) diff --git a/src/agent/runloop/tool_output.rs b/src/agent/runloop/tool_output.rs index daa2ac0a4..5ee77e3fb 100644 --- a/src/agent/runloop/tool_output.rs +++ b/src/agent/runloop/tool_output.rs @@ -24,7 +24,9 @@ pub(crate) fn render_tool_output( } if let Some(tool) = tool_name { - if tool.starts_with("mcp_sequentialthinking") { + if tool.starts_with("mcp_context7") { + render_mcp_context7_output(renderer, val)?; + } else if tool.starts_with("mcp_sequentialthinking") { render_mcp_sequential_output(renderer, val)?; } } @@ -133,6 +135,98 @@ fn render_plan_update(renderer: &mut AnsiRenderer, val: &Value) -> Result<()> { Ok(()) } +fn render_mcp_context7_output(renderer: &mut AnsiRenderer, val: &Value) -> Result<()> { + let status = val + .get("status") + .and_then(|value| value.as_str()) + .unwrap_or("unknown"); + + let meta = val.get("meta").and_then(|value| value.as_object()); + let provider = val + .get("provider") + .and_then(|value| value.as_str()) + .unwrap_or("context7"); + let tool_used = val + .get("tool") + .and_then(|value| value.as_str()) + .unwrap_or("context7"); + + renderer.line( + MessageStyle::Tool, + &format!("[{}:{}] status: {}", provider, tool_used, status), + )?; + + if let Some(meta) = meta { + if let Some(query) = meta.get("query").and_then(|value| value.as_str()) { + renderer.line( + MessageStyle::ToolDetail, + &format!("┇ query: {}", shorten(query, 160)), + )?; + } + if let Some(scope) = meta.get("scope").and_then(|value| value.as_str()) { + renderer.line(MessageStyle::ToolDetail, &format!("┇ scope: {}", scope))?; + } + if let Some(limit) = meta.get("max_results").and_then(|value| value.as_u64()) { + renderer.line( + MessageStyle::ToolDetail, + &format!("┇ max_results: {}", limit), + )?; + } + } + + if let Some(messages) = val.get("messages").and_then(|value| value.as_array()) + && !messages.is_empty() + { + renderer.line(MessageStyle::ToolDetail, "┇ snippets:")?; + for message in messages.iter().take(3) { + if let Some(content) = message.get("content").and_then(|value| value.as_str()) { + renderer.line( + MessageStyle::ToolDetail, + &format!("┇ · {}", shorten(content, 200)), + )?; + } + } + if messages.len() > 3 { + renderer.line( + MessageStyle::ToolDetail, + &format!("┇ · … {} more", messages.len() - 3), + )?; + } + } + + if let Some(errors) = val.get("errors").and_then(|value| value.as_array()) + && !errors.is_empty() + { + renderer.line(MessageStyle::Error, "┇ provider errors:")?; + for err in errors.iter().take(2) { + if let Some(msg) = err.get("message").and_then(|value| value.as_str()) { + renderer.line(MessageStyle::Error, &format!("┇ · {}", shorten(msg, 160)))?; + } + } + if errors.len() > 2 { + renderer.line( + MessageStyle::Error, + &format!("┇ · … {} more", errors.len() - 2), + )?; + } + } + + if let Some(input) = val.get("input").and_then(|value| value.as_object()) + && let Some(name) = input.get("LibraryName").and_then(|value| value.as_str()) + { + let candidate = name.trim(); + if !candidate.is_empty() { + let lowered = candidate.to_lowercase(); + if lowered != "tokio" && levenshtein(&lowered, "tokio") <= 2 { + renderer.line(MessageStyle::Info, "┇ suggestion: did you mean 'tokio'?")?; + } + } + } + + renderer.line(MessageStyle::ToolDetail, "┗ context7 lookup complete")?; + Ok(()) +} + fn render_mcp_sequential_output(renderer: &mut AnsiRenderer, val: &Value) -> Result<()> { let status = val .get("status") @@ -243,6 +337,34 @@ fn shorten(text: &str, max_len: usize) -> String { result } +fn levenshtein(a: &str, b: &str) -> usize { + let a_len = a.chars().count(); + let b_len = b.chars().count(); + if a_len == 0 { + return b_len; + } + if b_len == 0 { + return a_len; + } + + let mut prev: Vec = (0..=b_len).collect(); + let mut current = vec![0; b_len + 1]; + + for (i, a_ch) in a.chars().enumerate() { + current[0] = i + 1; + for (j, b_ch) in b.chars().enumerate() { + let cost = if a_ch == b_ch { 0 } else { 1 }; + current[j + 1] = std::cmp::min( + std::cmp::min(current[j] + 1, prev[j + 1] + 1), + prev[j] + cost, + ); + } + prev.copy_from_slice(¤t); + } + + prev[b_len] +} + fn render_plan_panel(renderer: &mut AnsiRenderer, plan: &TaskPlan) -> Result<()> { renderer.line( MessageStyle::Tool, From c60c44124357e94e64ccd0728216e4577a5afec8 Mon Sep 17 00:00:00 2001 From: Vinh Nguyen <1097578+vinhnx@users.noreply.github.com> Date: Tue, 7 Oct 2025 20:11:06 +0700 Subject: [PATCH 3/3] Restore Context7 MCP renderer --- src/agent/runloop/tool_output.rs | 122 +++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) diff --git a/src/agent/runloop/tool_output.rs b/src/agent/runloop/tool_output.rs index 5ee77e3fb..eed2dee47 100644 --- a/src/agent/runloop/tool_output.rs +++ b/src/agent/runloop/tool_output.rs @@ -26,6 +26,8 @@ pub(crate) fn render_tool_output( if let Some(tool) = tool_name { if tool.starts_with("mcp_context7") { render_mcp_context7_output(renderer, val)?; + } else if tool.starts_with("mcp_sequentialthinking") { + render_mcp_context7_output(renderer, val)?; } else if tool.starts_with("mcp_sequentialthinking") { render_mcp_sequential_output(renderer, val)?; } @@ -108,6 +110,98 @@ fn render_plan_update(renderer: &mut AnsiRenderer, val: &Value) -> Result<()> { let plan: TaskPlan = serde_json::from_value(plan_value).context("Plan tool returned malformed plan payload")?; +fn render_mcp_context7_output(renderer: &mut AnsiRenderer, val: &Value) -> Result<()> { + let status = val + .get("status") + .and_then(|value| value.as_str()) + .unwrap_or("unknown"); + + let meta = val.get("meta").and_then(|value| value.as_object()); + let provider = val + .get("provider") + .and_then(|value| value.as_str()) + .unwrap_or("context7"); + let tool_used = val + .get("tool") + .and_then(|value| value.as_str()) + .unwrap_or("context7"); + + renderer.line( + MessageStyle::Tool, + &format!("[{}:{}] status: {}", provider, tool_used, status), + )?; + + if let Some(meta) = meta { + if let Some(query) = meta.get("query").and_then(|value| value.as_str()) { + renderer.line( + MessageStyle::ToolDetail, + &format!("┇ query: {}", shorten(query, 160)), + )?; + } + if let Some(scope) = meta.get("scope").and_then(|value| value.as_str()) { + renderer.line(MessageStyle::ToolDetail, &format!("┇ scope: {}", scope))?; + } + if let Some(limit) = meta.get("max_results").and_then(|value| value.as_u64()) { + renderer.line( + MessageStyle::ToolDetail, + &format!("┇ max_results: {}", limit), + )?; + } + } + + if let Some(messages) = val.get("messages").and_then(|value| value.as_array()) + && !messages.is_empty() + { + renderer.line(MessageStyle::ToolDetail, "┇ snippets:")?; + for message in messages.iter().take(3) { + if let Some(content) = message.get("content").and_then(|value| value.as_str()) { + renderer.line( + MessageStyle::ToolDetail, + &format!("┇ · {}", shorten(content, 200)), + )?; + } + } + if messages.len() > 3 { + renderer.line( + MessageStyle::ToolDetail, + &format!("┇ · … {} more", messages.len() - 3), + )?; + } + } + + if let Some(errors) = val.get("errors").and_then(|value| value.as_array()) + && !errors.is_empty() + { + renderer.line(MessageStyle::Error, "┇ provider errors:")?; + for err in errors.iter().take(2) { + if let Some(msg) = err.get("message").and_then(|value| value.as_str()) { + renderer.line(MessageStyle::Error, &format!("┇ · {}", shorten(msg, 160)))?; + } + } + if errors.len() > 2 { + renderer.line( + MessageStyle::Error, + &format!("┇ · … {} more", errors.len() - 2), + )?; + } + } + + if let Some(input) = val.get("input").and_then(|value| value.as_object()) + && let Some(name) = input.get("LibraryName").and_then(|value| value.as_str()) + { + let candidate = name.trim(); + if !candidate.is_empty() { + let lowered = candidate.to_lowercase(); + if lowered != "tokio" && levenshtein(&lowered, "tokio") <= 2 { + renderer.line(MessageStyle::Info, "┇ suggestion: did you mean 'tokio'?")?; + } + } + } + + renderer.line(MessageStyle::ToolDetail, "┗ context7 lookup complete")?; + Ok(()) +} + renderer.line( MessageStyle::Output, &format!( @@ -218,6 +312,34 @@ fn render_mcp_context7_output(renderer: &mut AnsiRenderer, val: &Value) -> Resul if !candidate.is_empty() { let lowered = candidate.to_lowercase(); if lowered != "tokio" && levenshtein(&lowered, "tokio") <= 2 { +fn levenshtein(a: &str, b: &str) -> usize { + let a_len = a.chars().count(); + let b_len = b.chars().count(); + if a_len == 0 { + return b_len; + } + if b_len == 0 { + return a_len; + } + + let mut prev: Vec = (0..=b_len).collect(); + let mut current = vec![0; b_len + 1]; + + for (i, a_ch) in a.chars().enumerate() { + current[0] = i + 1; + for (j, b_ch) in b.chars().enumerate() { + let cost = if a_ch == b_ch { 0 } else { 1 }; + current[j + 1] = std::cmp::min( + std::cmp::min(current[j] + 1, prev[j + 1] + 1), + prev[j] + cost, + ); + } + prev.copy_from_slice(¤t); + } + + prev[b_len] +} + renderer.line(MessageStyle::Info, "┇ suggestion: did you mean 'tokio'?")?; } }