diff --git a/crates/vite_task/src/session/execute/mod.rs b/crates/vite_task/src/session/execute/mod.rs index 37c0d23c..ed6b9141 100644 --- a/crates/vite_task/src/session/execute/mod.rs +++ b/crates/vite_task/src/session/execute/mod.rs @@ -120,18 +120,21 @@ impl ExecutionContext<'_> { LeafExecutionKind::InProcess(in_process_execution) => { // In-process (built-in) commands: caching is disabled, execute synchronously let mut stdio_config = leaf_reporter - .start(CacheStatus::Disabled(CacheDisabledReason::InProcessExecution)); + .start(CacheStatus::Disabled(CacheDisabledReason::InProcessExecution)) + .await; let execution_output = in_process_execution.execute(); // Write output to the stdout writer from StdioConfig let _ = stdio_config.stdout_writer.write_all(&execution_output.stdout).await; let _ = stdio_config.stdout_writer.flush().await; - leaf_reporter.finish( - None, - CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), - None, - ); + leaf_reporter + .finish( + None, + CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), + None, + ) + .await; } LeafExecutionKind::Spawn(spawn_execution) => { #[expect( @@ -191,11 +194,13 @@ pub async fn execute_spawn( Err(err) => { // Cache lookup error — report through finish. // Note: start() is NOT called because we don't have a valid cache status. - leaf_reporter.finish( - None, - CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), - Some(ExecutionError::Cache { kind: CacheErrorKind::Lookup, source: err }), - ); + leaf_reporter + .finish( + None, + CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), + Some(ExecutionError::Cache { kind: CacheErrorKind::Lookup, source: err }), + ) + .await; return SpawnOutcome::Failed; } } @@ -206,7 +211,7 @@ pub async fn execute_spawn( // 2. Report execution start with the determined cache status. // Returns StdioConfig with the reporter's suggestion and async writers. - let mut stdio_config = leaf_reporter.start(cache_status); + let mut stdio_config = leaf_reporter.start(cache_status).await; // 3. If cache hit, replay outputs via the StdioConfig writers and finish early. // No need to actually execute the command — just replay what was cached. @@ -219,11 +224,9 @@ pub async fn execute_spawn( let _ = writer.write_all(&output.content).await; let _ = writer.flush().await; } - leaf_reporter.finish( - None, - CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheHit), - None, - ); + leaf_reporter + .finish(None, CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheHit), None) + .await; return SpawnOutcome::CacheHit; } @@ -243,19 +246,23 @@ pub async fn execute_spawn( match spawn_inherited(&spawn_execution.spawn_command).await { Ok(result) => { - leaf_reporter.finish( - Some(result.exit_status), - CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), - None, - ); + leaf_reporter + .finish( + Some(result.exit_status), + CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), + None, + ) + .await; return SpawnOutcome::Spawned(result.exit_status); } Err(err) => { - leaf_reporter.finish( - None, - CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), - Some(ExecutionError::Spawn(err)), - ); + leaf_reporter + .finish( + None, + CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), + Some(ExecutionError::Spawn(err)), + ) + .await; return SpawnOutcome::Failed; } } @@ -280,11 +287,13 @@ pub async fn execute_spawn( { Ok(result) => result, Err(err) => { - leaf_reporter.finish( - None, - CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), - Some(ExecutionError::Spawn(err)), - ); + leaf_reporter + .finish( + None, + CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), + Some(ExecutionError::Spawn(err)), + ) + .await; return SpawnOutcome::Failed; } }; @@ -337,7 +346,7 @@ pub async fn execute_spawn( // 7. Finish the leaf execution with the result and optional cache error. // Cache update/fingerprint failures are reported but do not affect the outcome — // the process ran, so we return its actual exit status. - leaf_reporter.finish(Some(result.exit_status), cache_update_status, cache_error); + leaf_reporter.finish(Some(result.exit_status), cache_update_status, cache_error).await; SpawnOutcome::Spawned(result.exit_status) } @@ -409,6 +418,6 @@ impl Session<'_> { // Leaf-level errors and non-zero exit statuses are tracked internally // by the reporter. - reporter.finish() + reporter.finish().await } } diff --git a/crates/vite_task/src/session/mod.rs b/crates/vite_task/src/session/mod.rs index aa90d040..17c5d7e3 100644 --- a/crates/vite_task/src/session/mod.rs +++ b/crates/vite_task/src/session/mod.rs @@ -246,7 +246,10 @@ impl<'a> Session<'a> { .await } Ok(graph) => { - let builder = LabeledReporterBuilder::new(self.workspace_path()); + let builder = LabeledReporterBuilder::new( + self.workspace_path(), + Box::new(tokio::io::stdout()), + ); Ok(self .execute_graph(graph, Box::new(builder)) .await @@ -391,7 +394,8 @@ impl<'a> Session<'a> { let cwd = Arc::clone(&self.cwd); let graph = self.plan_from_cli_run(cwd, run_command).await?; - let builder = LabeledReporterBuilder::new(self.workspace_path()); + let builder = + LabeledReporterBuilder::new(self.workspace_path(), Box::new(tokio::io::stdout())); Ok(self.execute_graph(graph, Box::new(builder)).await.err().unwrap_or(ExitStatus::SUCCESS)) } @@ -467,7 +471,8 @@ impl<'a> Session<'a> { let cache = self.cache()?; // Create a plain (standalone) reporter — no graph awareness, no summary - let plain_reporter = reporter::PlainReporter::new(silent_if_cache_hit); + let plain_reporter = + reporter::PlainReporter::new(silent_if_cache_hit, Box::new(tokio::io::stdout())); // Execute the spawn directly using the free function, bypassing the graph pipeline match execute::execute_spawn( diff --git a/crates/vite_task/src/session/reporter/labeled.rs b/crates/vite_task/src/session/reporter/labeled.rs index 3671b27d..8f3ef1d9 100644 --- a/crates/vite_task/src/session/reporter/labeled.rs +++ b/crates/vite_task/src/session/reporter/labeled.rs @@ -6,12 +6,10 @@ //! Tracks statistics across multiple leaf executions, prints command lines with cache //! status indicators, and renders a summary with per-task details at the end. -use std::{ - cell::RefCell, io::Write, process::ExitStatus as StdExitStatus, rc::Rc, sync::Arc, - time::Duration, -}; +use std::{cell::RefCell, process::ExitStatus as StdExitStatus, rc::Rc, sync::Arc, time::Duration}; use owo_colors::Style; +use tokio::io::{AsyncWrite, AsyncWriteExt as _}; use vite_path::AbsolutePath; use vite_str::Str; use vite_task_plan::{ExecutionGraph, ExecutionItemDisplay, ExecutionItemKind, LeafExecutionKind}; @@ -19,8 +17,8 @@ use vite_task_plan::{ExecutionGraph, ExecutionItemDisplay, ExecutionItemKind, Le use super::{ CACHE_MISS_STYLE, COMMAND_STYLE, ColorizeExt, ExitStatus, GraphExecutionReporter, GraphExecutionReporterBuilder, LeafExecutionPath, LeafExecutionReporter, StdioConfig, - StdioSuggestion, format_command_display, write_cache_hit_message, - write_command_with_cache_status, write_error_message, + StdioSuggestion, format_cache_hit_message, format_command_display, + format_command_with_cache_status, format_error_message, }; use crate::session::{ cache::format_cache_status_summary, @@ -99,26 +97,30 @@ pub(super) fn count_spawn_leaves(graph: &ExecutionGraph) -> usize { /// - Results in clean output showing just the command's stdout/stderr pub struct LabeledReporterBuilder { workspace_path: Arc, + writer: Box, } impl LabeledReporterBuilder { /// Create a new labeled reporter builder. /// /// - `workspace_path`: The workspace root, used to compute relative cwds in display. - pub const fn new(workspace_path: Arc) -> Self { - Self { workspace_path } + /// - `writer`: Async writer for reporter display output. + pub fn new(workspace_path: Arc, writer: Box) -> Self { + Self { workspace_path, writer } } } impl GraphExecutionReporterBuilder for LabeledReporterBuilder { fn build(self: Box, graph: &Arc) -> Box { let spawn_leaf_count = count_spawn_leaves(graph); + let writer = Rc::new(RefCell::new(self.writer)); Box::new(LabeledGraphReporter { shared: Rc::new(RefCell::new(SharedReporterState { executions: Vec::new(), stats: ExecutionStats::default(), spawn_leaf_count, })), + writer, graph: Arc::clone(graph), workspace_path: self.workspace_path, }) @@ -131,16 +133,24 @@ impl GraphExecutionReporterBuilder for LabeledReporterBuilder { /// share mutable state with this reporter via `Rc>`. pub struct LabeledGraphReporter { shared: Rc>, + writer: Rc>>, graph: Arc, workspace_path: Arc, } +#[async_trait::async_trait(?Send)] +#[expect( + clippy::await_holding_refcell_ref, + reason = "writer RefCell borrow across await is safe: reporter is !Send, single-threaded, \ + and finish() is called once after all leaf reporters are dropped" +)] impl GraphExecutionReporter for LabeledGraphReporter { fn new_leaf_execution(&mut self, path: &LeafExecutionPath) -> Box { // Look up display info from the graph using the path let display = path.resolve_display(&self.graph).cloned(); Box::new(LabeledLeafReporter { shared: Rc::clone(&self.shared), + writer: Rc::clone(&self.writer), display, workspace_path: Arc::clone(&self.workspace_path), started: false, @@ -148,63 +158,78 @@ impl GraphExecutionReporter for LabeledGraphReporter { }) } - fn finish(self: Box) -> Result<(), ExitStatus> { - let shared = self.shared.borrow(); - - // Print summary. - // Special case: single execution without display info (e.g., synthetic via nested expansion) - // → skip summary since there's nothing meaningful to show. - let is_single_displayless = - shared.executions.len() == 1 && shared.executions[0].display.is_none(); - if !is_single_displayless { - print_summary( - &mut std::io::stdout(), - &shared.executions, - &shared.stats, - &self.workspace_path, - ); - } + async fn finish(self: Box) -> Result<(), ExitStatus> { + // Borrow shared state synchronously to build the summary buffer and compute + // the exit result. The borrow is dropped before any async writes. + let (summary_buf, result) = { + let shared = self.shared.borrow(); + + // Print summary. + // Special case: single execution without display info (e.g., synthetic via + // nested expansion) → skip summary since there's nothing meaningful to show. + let is_single_displayless = + shared.executions.len() == 1 && shared.executions[0].display.is_none(); + let summary_buf = if is_single_displayless { + None + } else { + Some(format_summary(&shared.executions, &shared.stats, &self.workspace_path)) + }; + + // Determine exit code based on failed tasks and infrastructure errors: + // - Infrastructure errors (cache lookup, spawn failure) have error_message set + // but no meaningful exit_status. + // - Process failures have a non-zero exit_status. + // + // Rules: + // 1. No failures at all → Ok(()) + // 2. Exactly one process failure, no infra errors → use that task's exit code + // 3. Any infra errors, or multiple failures → Err(1) + let has_infra_errors = + shared.executions.iter().any(|exec| exec.error_message.is_some()); + + let failed_exit_codes: Vec = shared + .executions + .iter() + .filter_map(|exec| exec.exit_status.as_ref()) + .filter(|status| !status.success()) + .map(|status| exit_status_to_code(*status)) + .collect(); + + let result = match (has_infra_errors, failed_exit_codes.as_slice()) { + (false, []) => Ok(()), + (false, [code]) => { + // Return the single failed task's exit code (clamped to u8 range) + #[expect( + clippy::cast_sign_loss, + reason = "value is clamped to 1..=255, always positive" + )] + Err(ExitStatus((*code).clamp(1, 255) as u8)) + } + _ => Err(ExitStatus::FAILURE), + }; + + (summary_buf, result) + }; + // shared borrow dropped here - // Determine exit code based on failed tasks and infrastructure errors: - // - Infrastructure errors (cache lookup, spawn failure) have error_message set - // but no meaningful exit_status. - // - Process failures have a non-zero exit_status. - // - // Rules: - // 1. No failures at all → Ok(()) - // 2. Exactly one process failure, no infra errors → use that task's exit code - // 3. Any infra errors, or multiple failures → Err(1) - let has_infra_errors = shared.executions.iter().any(|exec| exec.error_message.is_some()); - - let failed_exit_codes: Vec = shared - .executions - .iter() - .filter_map(|exec| exec.exit_status.as_ref()) - .filter(|status| !status.success()) - .map(|status| exit_status_to_code(*status)) - .collect(); - - match (has_infra_errors, failed_exit_codes.as_slice()) { - (false, []) => Ok(()), - (false, [code]) => { - // Return the single failed task's exit code (clamped to u8 range) - #[expect( - clippy::cast_sign_loss, - reason = "value is clamped to 1..=255, always positive" - )] - Err(ExitStatus((*code).clamp(1, 255) as u8)) - } - _ => Err(ExitStatus::FAILURE), + // Write the summary buffer asynchronously (if any) + if let Some(buf) = summary_buf { + let mut writer = self.writer.borrow_mut(); + let _ = writer.write_all(&buf).await; + let _ = writer.flush().await; } + + result } } /// Leaf-level reporter created by [`LabeledGraphReporter::new_leaf_execution`]. /// -/// Writes display output in real-time to stdout and updates shared stats/errors -/// via `Rc>`. +/// Writes display output in real-time to the shared async writer and updates shared +/// stats/errors via `Rc>`. struct LabeledLeafReporter { shared: Rc>, + writer: Rc>>, /// Display info for this execution, looked up from the graph via the path. display: Option, workspace_path: Arc, @@ -215,45 +240,58 @@ struct LabeledLeafReporter { is_cache_hit: bool, } +#[async_trait::async_trait(?Send)] +#[expect( + clippy::await_holding_refcell_ref, + reason = "writer RefCell borrow across await is safe: reporter is !Send, single-threaded, \ + and only one leaf is active at a time (no re-entrant access during write_all)" +)] impl LeafExecutionReporter for LabeledLeafReporter { - fn start(&mut self, cache_status: CacheStatus) -> StdioConfig { + async fn start(&mut self, cache_status: CacheStatus) -> StdioConfig { self.started = true; self.is_cache_hit = matches!(cache_status, CacheStatus::Hit { .. }); - let mut shared = self.shared.borrow_mut(); + // Update shared state synchronously, then drop the borrow before any async writes. + let suggestion = { + let mut shared = self.shared.borrow_mut(); - // Update statistics based on cache status - match &cache_status { - CacheStatus::Hit { .. } => shared.stats.cache_hits += 1, - CacheStatus::Miss(_) => shared.stats.cache_misses += 1, - CacheStatus::Disabled(_) => shared.stats.cache_disabled += 1, - } + // Update statistics based on cache status + match &cache_status { + CacheStatus::Hit { .. } => shared.stats.cache_hits += 1, + CacheStatus::Miss(_) => shared.stats.cache_misses += 1, + CacheStatus::Disabled(_) => shared.stats.cache_disabled += 1, + } - // Print command line with cache status (if display info is available) + // Store execution info for the summary + shared.executions.push(ExecutionInfo { + display: self.display.clone(), + cache_status, + exit_status: None, + error_message: None, + }); + + // Determine stdio suggestion: inherited only when exactly one spawn leaf + if shared.spawn_leaf_count == 1 { + StdioSuggestion::Inherited + } else { + StdioSuggestion::Piped + } + }; + // shared borrow dropped here + + // Format command line with cache status (sync), then write asynchronously. + // The shared borrow to read cache_status is brief and dropped before the await. if let Some(ref display) = self.display { - write_command_with_cache_status( - &mut std::io::stdout(), - display, - &self.workspace_path, - &cache_status, - ); + let line = { + let shared = self.shared.borrow(); + let cache_status = &shared.executions.last().unwrap().cache_status; + format_command_with_cache_status(display, &self.workspace_path, cache_status) + }; + let mut writer = self.writer.borrow_mut(); + let _ = writer.write_all(line.as_bytes()).await; + let _ = writer.flush().await; } - // Store execution info for the summary - shared.executions.push(ExecutionInfo { - display: self.display.clone(), - cache_status, - exit_status: None, - error_message: None, - }); - - // Determine stdio suggestion: inherited only when exactly one spawn leaf - let suggestion = if shared.spawn_leaf_count == 1 { - StdioSuggestion::Inherited - } else { - StdioSuggestion::Piped - }; - StdioConfig { suggestion, stdout_writer: Box::new(tokio::io::stdout()), @@ -261,58 +299,76 @@ impl LeafExecutionReporter for LabeledLeafReporter { } } - fn finish( + async fn finish( self: Box, status: Option, _cache_update_status: CacheUpdateStatus, error: Option, ) { - let mut shared = self.shared.borrow_mut(); - let mut stdout = std::io::stdout(); - - // Handle errors — format the full error chain using anyhow's `{:#}` formatter - // (joins cause chain with `: ` separators). - let has_error = error.is_some(); - if let Some(error) = error { - let message: Str = vite_str::format!("{:#}", anyhow::Error::from(error)); - write_error_message(&mut stdout, &message); - - // Update the execution info if start() was called (an entry was pushed). - // Without the `self.started` guard, `last_mut()` would return a - // *different* execution's entry, corrupting its error_message. + // Format error message up front (before borrowing shared state) + let error_message: Option = + error.map(|e| vite_str::format!("{:#}", anyhow::Error::from(e))); + let has_error = error_message.is_some(); + + // Update shared state synchronously, then drop the borrow before any async writes. + { + let mut shared = self.shared.borrow_mut(); + + // Handle errors — update execution info and stats. + // Error message is formatted using anyhow's `{:#}` formatter + // (joins cause chain with `: ` separators). + if let Some(ref message) = error_message { + // Update the execution info if start() was called (an entry was pushed). + // Without the `self.started` guard, `last_mut()` would return a + // *different* execution's entry, corrupting its error_message. + if self.started + && let Some(exec) = shared.executions.last_mut() + { + exec.error_message = Some(message.clone()); + } + + shared.stats.failed += 1; + } + + // Update failure statistics for non-zero exit status (not an error, just a failed task) + // None means success (cache hit or in-process), Some checks the actual exit status + if !has_error && status.is_some_and(|s| !s.success()) { + shared.stats.failed += 1; + } + + // Update execution info with exit status (if start() was called and an entry exists) if self.started && let Some(exec) = shared.executions.last_mut() { - exec.error_message = Some(message); + exec.exit_status = status; } - - shared.stats.failed += 1; } + // shared borrow dropped here - // Update failure statistics for non-zero exit status (not an error, just a failed task) - // None means success (cache hit or in-process), Some checks the actual exit status - if !has_error && status.is_some_and(|s| !s.success()) { - shared.stats.failed += 1; - } + // Build all display output into a buffer (sync), then write once asynchronously. + let mut buf = Vec::new(); - // Update execution info with exit status (if start() was called and an entry exists) - if self.started - && let Some(exec) = shared.executions.last_mut() - { - exec.exit_status = status; + if let Some(ref message) = error_message { + buf.extend_from_slice(format_error_message(message).as_bytes()); } // For executions without display info (synthetics via nested expansion) that are // cache hits, print the cache hit message if self.started && self.display.is_none() && self.is_cache_hit { - write_cache_hit_message(&mut stdout); + buf.extend_from_slice(format_cache_hit_message().as_bytes()); } // Add a trailing newline after each task's output for readability. // Skip if start() was never called (e.g. cache lookup failure) — there's // no task output to separate. if self.started { - let _ = writeln!(stdout); + buf.push(b'\n'); + } + + if !buf.is_empty() { + let mut writer = self.writer.borrow_mut(); + let _ = writer.write_all(&buf).await; + let _ = writer.flush().await; } } } @@ -321,20 +377,25 @@ impl LeafExecutionReporter for LabeledLeafReporter { // Summary printing // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -/// Print the full execution summary with statistics, performance, and per-task details. +/// Format the full execution summary into a byte buffer. /// /// Called by [`LabeledGraphReporter::finish`] after all tasks have executed. -/// Infrastructure errors and task failures are included in the summary. +/// The caller writes the returned buffer to the async writer. +/// +/// Building the summary synchronously into a `Vec` buffer avoids holding +/// `RefCell` borrows across async write points, and ensures atomic output. #[expect( clippy::too_many_lines, reason = "summary formatting is inherently verbose with many write calls" )] -fn print_summary( - writer: &mut impl Write, +fn format_summary( executions: &[ExecutionInfo], stats: &ExecutionStats, workspace_path: &AbsolutePath, -) { +) -> Vec { + use std::io::Write; + let mut buf = Vec::new(); + let total = executions.len(); let cache_hits = stats.cache_hits; let cache_misses = stats.cache_misses; @@ -344,23 +405,23 @@ fn print_summary( // Print summary header with decorative line // Note: leaf finish already adds a trailing newline after each task's output // Add an extra blank line before the summary for visual separation - let _ = writeln!(writer); + let _ = writeln!(buf); let _ = writeln!( - writer, + buf, "{}", "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) ); let _ = writeln!( - writer, + buf, "{}", " Vite+ Task Runner • Execution Summary".style(Style::new().bold().bright_white()) ); let _ = writeln!( - writer, + buf, "{}", "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) ); - let _ = writeln!(writer); + let _ = writeln!(buf); // Print statistics let cache_disabled_str = if cache_disabled > 0 { @@ -381,7 +442,7 @@ fn print_summary( // Build statistics line, only including non-empty parts let _ = write!( - writer, + buf, "{} {} {} {}", "Statistics:".style(Style::new().bold()), vite_str::format!(" {total} tasks").style(Style::new().bright_white()), @@ -389,12 +450,12 @@ fn print_summary( vite_str::format!("• {cache_misses} cache misses").style(CACHE_MISS_STYLE), ); if !cache_disabled_str.is_empty() { - let _ = write!(writer, " {cache_disabled_str}"); + let _ = write!(buf, " {cache_disabled_str}"); } if !failed_str.is_empty() { - let _ = write!(writer, " {failed_str}"); + let _ = write!(buf, " {failed_str}"); } - let _ = writeln!(writer); + let _ = writeln!(buf); // Calculate cache hit rate let cache_rate = if total > 0 { @@ -427,7 +488,7 @@ fn print_summary( .sum(); let _ = write!( - writer, + buf, "{} {} cache hit rate", "Performance:".style(Style::new().bold()), format_args!("{cache_rate}%").style(if cache_rate >= 75 { @@ -440,19 +501,16 @@ fn print_summary( ); if total_saved > Duration::ZERO { - let _ = write!( - writer, - ", {:.2?} saved in total", - total_saved.style(Style::new().green().bold()) - ); + let _ = + write!(buf, ", {:.2?} saved in total", total_saved.style(Style::new().green().bold())); } - let _ = writeln!(writer); - let _ = writeln!(writer); + let _ = writeln!(buf); + let _ = writeln!(buf); // Detailed task results - let _ = writeln!(writer, "{}", "Task Details:".style(Style::new().bold())); + let _ = writeln!(buf, "{}", "Task Details:".style(Style::new().bold())); let _ = writeln!( - writer, + buf, "{}", "────────────────────────────────────────────────".style(Style::new().bright_black()) ); @@ -467,7 +525,7 @@ fn print_summary( // Task name and index let _ = write!( - writer, + buf, " {} {}", vite_str::format!("[{}]", idx + 1).style(Style::new().bright_black()), task_display.to_string().style(Style::new().bright_white().bold()) @@ -475,28 +533,28 @@ fn print_summary( // Command with cwd prefix let command_display = format_command_display(display, workspace_path); - let _ = write!(writer, ": {}", command_display.style(COMMAND_STYLE)); + let _ = write!(buf, ": {}", command_display.style(COMMAND_STYLE)); // Execution result icon // None means success (cache hit or in-process), Some checks actual status match &exec.exit_status { None => { - let _ = write!(writer, " {}", "✓".style(Style::new().green().bold())); + let _ = write!(buf, " {}", "✓".style(Style::new().green().bold())); } Some(exit_status) if exit_status.success() => { - let _ = write!(writer, " {}", "✓".style(Style::new().green().bold())); + let _ = write!(buf, " {}", "✓".style(Style::new().green().bold())); } Some(exit_status) => { let code = exit_status_to_code(*exit_status); let _ = write!( - writer, + buf, " {} {}", "✗".style(Style::new().red().bold()), vite_str::format!("(exit code: {code})").style(Style::new().red()) ); } } - let _ = writeln!(writer); + let _ = writeln!(buf); // Cache status details — use display module for plain text, apply styling here let cache_summary = format_cache_status_summary(&exec.cache_status); @@ -505,12 +563,12 @@ fn print_summary( CacheStatus::Miss(_) => cache_summary.style(CACHE_MISS_STYLE), CacheStatus::Disabled(_) => cache_summary.style(Style::new().bright_black()), }; - let _ = writeln!(writer, " {styled_summary}"); + let _ = writeln!(buf, " {styled_summary}"); // Error message if present if let Some(ref error_msg) = exec.error_message { let _ = writeln!( - writer, + buf, " {} {}", "✗ Error:".style(Style::new().red().bold()), error_msg.style(Style::new().red()) @@ -520,7 +578,7 @@ fn print_summary( // Add spacing between tasks except for the last one if idx < executions.len() - 1 { let _ = writeln!( - writer, + buf, " {}", "·······················································" .style(Style::new().bright_black()) @@ -529,8 +587,168 @@ fn print_summary( } let _ = writeln!( - writer, + buf, "{}", "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) ); + + buf +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use vite_task_plan::ExecutionGraph; + + use super::*; + use crate::session::{ + event::CacheDisabledReason, + reporter::{ + LeafExecutionPath, LeafExecutionReporter, StdioSuggestion, + test_fixtures::{expanded_task, in_process_task, spawn_task, test_path}, + }, + }; + + // ──────────────────────────────────────────────────────────── + // count_spawn_leaves tests + // ──────────────────────────────────────────────────────────── + + #[test] + fn count_spawn_leaves_empty_graph() { + let graph = ExecutionGraph::default(); + assert_eq!(count_spawn_leaves(&graph), 0); + } + + #[test] + fn count_spawn_leaves_single_spawn() { + let graph = ExecutionGraph::from_node_list([spawn_task("build")]); + assert_eq!(count_spawn_leaves(&graph), 1); + } + + #[test] + fn count_spawn_leaves_multiple_spawns() { + let graph = ExecutionGraph::from_node_list([ + spawn_task("build"), + spawn_task("test"), + spawn_task("lint"), + ]); + assert_eq!(count_spawn_leaves(&graph), 3); + } + + #[test] + fn count_spawn_leaves_in_process_not_counted() { + let graph = ExecutionGraph::from_node_list([in_process_task("echo")]); + assert_eq!(count_spawn_leaves(&graph), 0); + } + + #[test] + fn count_spawn_leaves_mixed_spawn_and_in_process() { + let graph = ExecutionGraph::from_node_list([spawn_task("build"), in_process_task("echo")]); + assert_eq!(count_spawn_leaves(&graph), 1); + } + + #[test] + fn count_spawn_leaves_nested_expanded() { + // Build a nested graph containing two spawns + let nested = + ExecutionGraph::from_node_list([spawn_task("nested-build"), spawn_task("nested-test")]); + + // Outer graph has one expanded item pointing to the nested graph + let graph = ExecutionGraph::from_node_list([expanded_task("expand", nested)]); + assert_eq!(count_spawn_leaves(&graph), 2); + } + + #[test] + fn count_spawn_leaves_nested_with_top_level() { + // Nested graph with one spawn + let nested = ExecutionGraph::from_node_list([spawn_task("nested-lint")]); + + // Top-level graph has one spawn + one expanded + let graph = + ExecutionGraph::from_node_list([spawn_task("build"), expanded_task("expand", nested)]); + assert_eq!(count_spawn_leaves(&graph), 2); + } + + // ──────────────────────────────────────────────────────────── + // LabeledLeafReporter stdio suggestion tests + // ──────────────────────────────────────────────────────────── + + /// Build a `LabeledGraphReporter` for the given graph and return a leaf reporter + /// for the first node's first item. + fn build_labeled_leaf(graph: ExecutionGraph) -> Box { + let graph_arc = Arc::new(graph); + let builder = + Box::new(LabeledReporterBuilder::new(test_path(), Box::new(tokio::io::sink()))); + let mut reporter = builder.build(&graph_arc); + + // Create a leaf reporter for the first node + let path = LeafExecutionPath::default(); + reporter.new_leaf_execution(&path) + } + + #[tokio::test] + async fn labeled_reporter_single_spawn_suggests_inherited() { + let graph = ExecutionGraph::from_node_list([spawn_task("build")]); + let mut leaf = build_labeled_leaf(graph); + let stdio_config = + leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)).await; + assert_eq!(stdio_config.suggestion, StdioSuggestion::Inherited); + } + + #[tokio::test] + async fn labeled_reporter_multiple_spawns_suggests_piped() { + let graph = ExecutionGraph::from_node_list([spawn_task("build"), spawn_task("test")]); + let mut leaf = build_labeled_leaf(graph); + let stdio_config = + leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)).await; + assert_eq!(stdio_config.suggestion, StdioSuggestion::Piped); + } + + #[tokio::test] + async fn labeled_reporter_single_in_process_suggests_piped() { + // Zero spawn leaves → spawn_leaf_count == 0, so not == 1 → Piped + // This is correct: in-process executions don't spawn child processes, + // so stdio suggestion doesn't apply to them. + let graph = ExecutionGraph::from_node_list([in_process_task("echo")]); + let mut leaf = build_labeled_leaf(graph); + let stdio_config = + leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)).await; + assert_eq!(stdio_config.suggestion, StdioSuggestion::Piped); + } + + #[tokio::test] + async fn labeled_reporter_one_spawn_one_in_process_suggests_inherited() { + // One spawn leaf + one in-process → spawn_leaf_count == 1 → Inherited + let graph = ExecutionGraph::from_node_list([spawn_task("build"), in_process_task("echo")]); + let mut leaf = build_labeled_leaf(graph); + let stdio_config = + leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)).await; + assert_eq!(stdio_config.suggestion, StdioSuggestion::Inherited); + } + + #[tokio::test] + async fn labeled_reporter_nested_single_spawn_suggests_inherited() { + // Nested graph with exactly one spawn + let nested = ExecutionGraph::from_node_list([spawn_task("nested-build")]); + + let graph = ExecutionGraph::from_node_list([expanded_task("expand", nested)]); + let mut leaf = build_labeled_leaf(graph); + let stdio_config = + leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)).await; + assert_eq!(stdio_config.suggestion, StdioSuggestion::Inherited); + } + + #[tokio::test] + async fn labeled_reporter_nested_multiple_spawns_suggests_piped() { + // Nested graph with two spawns + let nested = + ExecutionGraph::from_node_list([spawn_task("nested-a"), spawn_task("nested-b")]); + + let graph = ExecutionGraph::from_node_list([expanded_task("expand", nested)]); + let mut leaf = build_labeled_leaf(graph); + let stdio_config = + leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)).await; + assert_eq!(stdio_config.suggestion, StdioSuggestion::Piped); + } } diff --git a/crates/vite_task/src/session/reporter/mod.rs b/crates/vite_task/src/session/reporter/mod.rs index 3e468845..6a52552e 100644 --- a/crates/vite_task/src/session/reporter/mod.rs +++ b/crates/vite_task/src/session/reporter/mod.rs @@ -26,7 +26,6 @@ mod plain; // Re-export the concrete implementations so callers can use `reporter::PlainReporter` // and `reporter::LabeledReporterBuilder` without navigating into child modules. use std::{ - io::Write, process::ExitStatus as StdExitStatus, sync::{Arc, LazyLock}, }; @@ -206,6 +205,7 @@ pub trait GraphExecutionReporterBuilder { /// /// Creates [`LeafExecutionReporter`] instances for individual leaf executions /// and finalizes the session with `finish()`. +#[async_trait::async_trait(?Send)] pub trait GraphExecutionReporter { /// Create a new leaf execution reporter for the leaf identified by `path`. /// @@ -221,7 +221,7 @@ pub trait GraphExecutionReporter { /// /// Returns `Ok(())` if all tasks succeeded, or `Err(ExitStatus)` to indicate the /// process should exit with the given status code. - fn finish(self: Box) -> Result<(), ExitStatus>; + async fn finish(self: Box) -> Result<(), ExitStatus>; } /// Reporter for a single leaf execution (one spawned process or in-process command). @@ -230,6 +230,7 @@ pub trait GraphExecutionReporter { /// /// `start()` may not be called before `finish()` if an error occurs before the cache /// status is determined (e.g., cache lookup failure). +#[async_trait::async_trait(?Send)] pub trait LeafExecutionReporter { /// Report that execution is starting with the given cache status. /// @@ -243,7 +244,7 @@ pub trait LeafExecutionReporter { /// AND whether caching is enabled — inherited stdio is only used when the /// suggestion is [`StdioSuggestion::Inherited`] AND the task has no cache /// metadata (caching disabled). - fn start(&mut self, cache_status: CacheStatus) -> StdioConfig; + async fn start(&mut self, cache_status: CacheStatus) -> StdioConfig; /// Finalize this leaf execution. /// @@ -253,7 +254,7 @@ pub trait LeafExecutionReporter { /// failure, spawn failure, fingerprint creation failure, cache update failure). /// /// This method consumes the reporter — no further calls are possible after `finish()`. - fn finish( + async fn finish( self: Box, status: Option, cache_update_status: CacheUpdateStatus, @@ -298,68 +299,61 @@ fn format_command_display(display: &ExecutionItemDisplay, workspace_path: &Absol vite_str::format!("{cwd_str}$ {}", display.command) } -/// Write the command line with optional inline cache status to the writer. +/// Format the command line with optional inline cache status. /// /// This is called during `start()` to show the user what command is being executed -/// and its cache status. -fn write_command_with_cache_status( - writer: &mut impl Write, +/// and its cache status. The caller writes the returned string to the async writer. +fn format_command_with_cache_status( display: &ExecutionItemDisplay, workspace_path: &AbsolutePath, cache_status: &CacheStatus, -) { +) -> Str { let command_str = format_command_display(display, workspace_path); - if let Some(inline_status) = format_cache_status_inline(cache_status) { - // Apply styling based on cache status type - let styled_status = match cache_status { - CacheStatus::Hit { .. } => inline_status.style(Style::new().green().dimmed()), - CacheStatus::Miss(_) => inline_status.style(CACHE_MISS_STYLE.dimmed()), - CacheStatus::Disabled(_) => inline_status.style(Style::new().bright_black()), - }; - let _ = writeln!(writer, "{} {}", command_str.style(COMMAND_STYLE), styled_status); - } else { - let _ = writeln!(writer, "{}", command_str.style(COMMAND_STYLE)); - } + format_cache_status_inline(cache_status).map_or_else( + || vite_str::format!("{}\n", command_str.style(COMMAND_STYLE)), + |inline_status| { + // Apply styling based on cache status type + let styled_status = match cache_status { + CacheStatus::Hit { .. } => inline_status.style(Style::new().green().dimmed()), + CacheStatus::Miss(_) => inline_status.style(CACHE_MISS_STYLE.dimmed()), + CacheStatus::Disabled(_) => inline_status.style(Style::new().bright_black()), + }; + vite_str::format!("{} {}\n", command_str.style(COMMAND_STYLE), styled_status) + }, + ) } -/// Write an error message in red with an error icon. -fn write_error_message(writer: &mut impl Write, message: &str) { - let _ = writeln!( - writer, - "{} {}", +/// Format an error message in red with an error icon. +fn format_error_message(message: &str) -> Str { + vite_str::format!( + "{} {}\n", "✗".style(Style::new().red().bold()), message.style(Style::new().red()) - ); + ) } -/// Write the "cache hit, logs replayed" message for synthetic executions without display info. -fn write_cache_hit_message(writer: &mut impl Write) { - let _ = - writeln!(writer, "{}", "✓ cache hit, logs replayed".style(Style::new().green().dimmed())); +/// Format the "cache hit, logs replayed" message for synthetic executions without display info. +fn format_cache_hit_message() -> Str { + vite_str::format!("{}\n", "✓ cache hit, logs replayed".style(Style::new().green().dimmed())) } // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -// Tests +// Test fixtures (shared by child module tests) // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ #[cfg(test)] -mod tests { - use std::collections::BTreeMap; +pub mod test_fixtures { + use std::{collections::BTreeMap, sync::Arc}; + use vite_path::AbsolutePath; use vite_task_graph::display::TaskDisplay; use vite_task_plan::{ - ExecutionItem, ExecutionItemDisplay, ExecutionItemKind, InProcessExecution, + ExecutionGraph, ExecutionItem, ExecutionItemDisplay, ExecutionItemKind, InProcessExecution, LeafExecutionKind, SpawnCommand, SpawnExecution, TaskExecution, }; - use super::*; - use crate::session::{ - event::{CacheDisabledReason, CacheStatus}, - reporter::labeled::count_spawn_leaves, - }; - /// Create a dummy `AbsolutePath` for test fixtures. - fn test_path() -> Arc { + pub fn test_path() -> Arc { #[cfg(unix)] { Arc::from(AbsolutePath::new("/test").unwrap()) @@ -371,7 +365,7 @@ mod tests { } /// Create a dummy `TaskDisplay` for test fixtures. - fn test_task_display(name: &str) -> TaskDisplay { + pub fn test_task_display(name: &str) -> TaskDisplay { TaskDisplay { package_name: "pkg".into(), task_name: name.into(), @@ -380,7 +374,7 @@ mod tests { } /// Create a dummy `ExecutionItemDisplay` for test fixtures. - fn test_display(name: &str) -> ExecutionItemDisplay { + pub fn test_display(name: &str) -> ExecutionItemDisplay { ExecutionItemDisplay { task_display: test_task_display(name), command: name.into(), @@ -390,7 +384,7 @@ mod tests { } /// Create a `TaskExecution` with a single spawn leaf. - fn spawn_task(name: &str) -> TaskExecution { + pub fn spawn_task(name: &str) -> TaskExecution { TaskExecution { task_display: test_task_display(name), items: vec![ExecutionItem { @@ -409,7 +403,7 @@ mod tests { } /// Create a `TaskExecution` with a single in-process leaf (echo). - fn in_process_task(name: &str) -> TaskExecution { + pub fn in_process_task(name: &str) -> TaskExecution { TaskExecution { task_display: test_task_display(name), items: vec![ExecutionItem { @@ -427,7 +421,7 @@ mod tests { } /// Create a `TaskExecution` with an expanded (nested) subgraph as its item. - fn expanded_task(name: &str, nested_graph: ExecutionGraph) -> TaskExecution { + pub fn expanded_task(name: &str, nested_graph: ExecutionGraph) -> TaskExecution { TaskExecution { task_display: test_task_display(name), items: vec![ExecutionItem { @@ -436,159 +430,4 @@ mod tests { }], } } - - // ──────────────────────────────────────────────────────────── - // count_spawn_leaves tests - // ──────────────────────────────────────────────────────────── - - #[test] - fn count_spawn_leaves_empty_graph() { - let graph = ExecutionGraph::default(); - assert_eq!(count_spawn_leaves(&graph), 0); - } - - #[test] - fn count_spawn_leaves_single_spawn() { - let graph = ExecutionGraph::from_node_list([spawn_task("build")]); - assert_eq!(count_spawn_leaves(&graph), 1); - } - - #[test] - fn count_spawn_leaves_multiple_spawns() { - let graph = ExecutionGraph::from_node_list([ - spawn_task("build"), - spawn_task("test"), - spawn_task("lint"), - ]); - assert_eq!(count_spawn_leaves(&graph), 3); - } - - #[test] - fn count_spawn_leaves_in_process_not_counted() { - let graph = ExecutionGraph::from_node_list([in_process_task("echo")]); - assert_eq!(count_spawn_leaves(&graph), 0); - } - - #[test] - fn count_spawn_leaves_mixed_spawn_and_in_process() { - let graph = ExecutionGraph::from_node_list([spawn_task("build"), in_process_task("echo")]); - assert_eq!(count_spawn_leaves(&graph), 1); - } - - #[test] - fn count_spawn_leaves_nested_expanded() { - // Build a nested graph containing two spawns - let nested = - ExecutionGraph::from_node_list([spawn_task("nested-build"), spawn_task("nested-test")]); - - // Outer graph has one expanded item pointing to the nested graph - let graph = ExecutionGraph::from_node_list([expanded_task("expand", nested)]); - assert_eq!(count_spawn_leaves(&graph), 2); - } - - #[test] - fn count_spawn_leaves_nested_with_top_level() { - // Nested graph with one spawn - let nested = ExecutionGraph::from_node_list([spawn_task("nested-lint")]); - - // Top-level graph has one spawn + one expanded - let graph = - ExecutionGraph::from_node_list([spawn_task("build"), expanded_task("expand", nested)]); - assert_eq!(count_spawn_leaves(&graph), 2); - } - - // ──────────────────────────────────────────────────────────── - // PlainReporter stdio suggestion tests - // ──────────────────────────────────────────────────────────── - - #[test] - fn plain_reporter_always_suggests_inherited() { - let mut reporter = PlainReporter::new(false); - let stdio_config = - reporter.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)); - assert_eq!(stdio_config.suggestion, StdioSuggestion::Inherited); - } - - #[test] - fn plain_reporter_suggests_inherited_even_when_silent() { - let mut reporter = PlainReporter::new(true); - let stdio_config = - reporter.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)); - assert_eq!(stdio_config.suggestion, StdioSuggestion::Inherited); - } - - // ──────────────────────────────────────────────────────────── - // LabeledLeafReporter stdio suggestion tests - // ──────────────────────────────────────────────────────────── - - /// Build a `LabeledGraphReporter` for the given graph and return a leaf reporter - /// for the first node's first item. - fn build_labeled_leaf(graph: ExecutionGraph) -> Box { - let graph_arc = Arc::new(graph); - let builder = Box::new(LabeledReporterBuilder::new(test_path())); - let mut reporter = builder.build(&graph_arc); - - // Create a leaf reporter for the first node - let path = LeafExecutionPath::default(); - reporter.new_leaf_execution(&path) - } - - #[test] - fn labeled_reporter_single_spawn_suggests_inherited() { - let graph = ExecutionGraph::from_node_list([spawn_task("build")]); - let mut leaf = build_labeled_leaf(graph); - let stdio_config = leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)); - assert_eq!(stdio_config.suggestion, StdioSuggestion::Inherited); - } - - #[test] - fn labeled_reporter_multiple_spawns_suggests_piped() { - let graph = ExecutionGraph::from_node_list([spawn_task("build"), spawn_task("test")]); - let mut leaf = build_labeled_leaf(graph); - let stdio_config = leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)); - assert_eq!(stdio_config.suggestion, StdioSuggestion::Piped); - } - - #[test] - fn labeled_reporter_single_in_process_suggests_piped() { - // Zero spawn leaves → spawn_leaf_count == 0, so not == 1 → Piped - // This is correct: in-process executions don't spawn child processes, - // so stdio suggestion doesn't apply to them. - let graph = ExecutionGraph::from_node_list([in_process_task("echo")]); - let mut leaf = build_labeled_leaf(graph); - let stdio_config = leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)); - assert_eq!(stdio_config.suggestion, StdioSuggestion::Piped); - } - - #[test] - fn labeled_reporter_one_spawn_one_in_process_suggests_inherited() { - // One spawn leaf + one in-process → spawn_leaf_count == 1 → Inherited - let graph = ExecutionGraph::from_node_list([spawn_task("build"), in_process_task("echo")]); - let mut leaf = build_labeled_leaf(graph); - let stdio_config = leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)); - assert_eq!(stdio_config.suggestion, StdioSuggestion::Inherited); - } - - #[test] - fn labeled_reporter_nested_single_spawn_suggests_inherited() { - // Nested graph with exactly one spawn - let nested = ExecutionGraph::from_node_list([spawn_task("nested-build")]); - - let graph = ExecutionGraph::from_node_list([expanded_task("expand", nested)]); - let mut leaf = build_labeled_leaf(graph); - let stdio_config = leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)); - assert_eq!(stdio_config.suggestion, StdioSuggestion::Inherited); - } - - #[test] - fn labeled_reporter_nested_multiple_spawns_suggests_piped() { - // Nested graph with two spawns - let nested = - ExecutionGraph::from_node_list([spawn_task("nested-a"), spawn_task("nested-b")]); - - let graph = ExecutionGraph::from_node_list([expanded_task("expand", nested)]); - let mut leaf = build_labeled_leaf(graph); - let stdio_config = leaf.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)); - assert_eq!(stdio_config.suggestion, StdioSuggestion::Piped); - } } diff --git a/crates/vite_task/src/session/reporter/plain.rs b/crates/vite_task/src/session/reporter/plain.rs index 0532019a..94e74ee0 100644 --- a/crates/vite_task/src/session/reporter/plain.rs +++ b/crates/vite_task/src/session/reporter/plain.rs @@ -1,11 +1,13 @@ //! Plain reporter — a standalone [`LeafExecutionReporter`] for single-leaf execution. //! //! Used for synthetic executions (e.g., auto-install) where there is no execution graph -//! and no summary is needed. Writes directly to stdout/stderr with no shared state. +//! and no summary is needed. Writes directly to the provided writer with no shared state. + +use tokio::io::{AsyncWrite, AsyncWriteExt as _}; use super::{ - LeafExecutionReporter, StdioConfig, StdioSuggestion, write_cache_hit_message, - write_error_message, + LeafExecutionReporter, StdioConfig, StdioSuggestion, format_cache_hit_message, + format_error_message, }; use crate::session::event::{CacheStatus, CacheUpdateStatus, ExecutionError}; @@ -13,7 +15,7 @@ use crate::session::event::{CacheStatus, CacheUpdateStatus, ExecutionError}; /// (e.g., `execute_synthetic`). /// /// This reporter: -/// - Writes display output (errors, cache-hit messages) directly to stdout +/// - Writes display output (errors, cache-hit messages) to the provided async writer /// - Has no display info (synthetic executions have no task display) /// - Does not track stats or print summaries /// - Supports `silent_if_cache_hit` to suppress output for cached executions @@ -21,6 +23,8 @@ use crate::session::event::{CacheStatus, CacheUpdateStatus, ExecutionError}; /// The exit status is determined by the caller from the `execute_spawn` return value, /// not from the reporter. pub struct PlainReporter { + /// Async writer for reporter display output (errors, cache-hit messages). + writer: Box, /// When true, suppresses all output (command line, process output, cache hit message) /// for executions that are cache hits. silent_if_cache_hit: bool, @@ -32,8 +36,9 @@ impl PlainReporter { /// Create a new plain reporter. /// /// - `silent_if_cache_hit`: If true, suppress all output when the execution is a cache hit. - pub const fn new(silent_if_cache_hit: bool) -> Self { - Self { silent_if_cache_hit, is_cache_hit: false } + /// - `writer`: Async writer for reporter display output. + pub fn new(silent_if_cache_hit: bool, writer: Box) -> Self { + Self { writer, silent_if_cache_hit, is_cache_hit: false } } /// Returns true if output should be suppressed for this execution. @@ -42,8 +47,9 @@ impl PlainReporter { } } +#[async_trait::async_trait(?Send)] impl LeafExecutionReporter for PlainReporter { - fn start(&mut self, cache_status: CacheStatus) -> StdioConfig { + async fn start(&mut self, cache_status: CacheStatus) -> StdioConfig { self.is_cache_hit = matches!(cache_status, CacheStatus::Hit { .. }); // PlainReporter is used for single-leaf synthetic executions (e.g., auto-install). // Always suggest inherited stdio so the spawned process can be interactive. @@ -69,24 +75,48 @@ impl LeafExecutionReporter for PlainReporter { } } - fn finish( - self: Box, + async fn finish( + mut self: Box, _status: Option, _cache_update_status: CacheUpdateStatus, error: Option, ) { - let mut stdout = std::io::stdout(); - // Handle errors — format the full error chain and print inline. if let Some(error) = error { let message = vite_str::format!("{:#}", anyhow::Error::from(error)); - write_error_message(&mut stdout, &message); + let line = format_error_message(&message); + let _ = self.writer.write_all(line.as_bytes()).await; + let _ = self.writer.flush().await; return; } // For cache hits, print the "cache hit" message (unless silent) if self.is_cache_hit && !self.is_silent() { - write_cache_hit_message(&mut stdout); + let line = format_cache_hit_message(); + let _ = self.writer.write_all(line.as_bytes()).await; + let _ = self.writer.flush().await; } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::session::event::CacheDisabledReason; + + #[tokio::test] + async fn plain_reporter_always_suggests_inherited() { + let mut reporter = PlainReporter::new(false, Box::new(tokio::io::sink())); + let stdio_config = + reporter.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)).await; + assert_eq!(stdio_config.suggestion, StdioSuggestion::Inherited); + } + + #[tokio::test] + async fn plain_reporter_suggests_inherited_even_when_silent() { + let mut reporter = PlainReporter::new(true, Box::new(tokio::io::sink())); + let stdio_config = + reporter.start(CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata)).await; + assert_eq!(stdio_config.suggestion, StdioSuggestion::Inherited); + } +}