From e503a8bb21ce2663d331f5f77e581ea705fc8651 Mon Sep 17 00:00:00 2001 From: branchseer Date: Fri, 13 Feb 2026 16:21:51 +0800 Subject: [PATCH 01/11] refactor: extract ExecutionPlan::plan_query returning ExecutionGraph Extract the query planning logic from ExecutionPlan::plan into a new plan_query method that accepts QueryPlanRequest and returns Result directly. Session::plan_from_cli now returns ExecutionGraph instead of ExecutionPlan, and callers wrap it via the new ExecutionPlan::from_execution_graph constructor. Also narrows RunCommand::into_plan_request to into_query_plan_request, since it always produced a Query variant. --- crates/vite_task/src/cli/mod.rs | 12 ++-- crates/vite_task/src/session/mod.rs | 41 ++++++------- crates/vite_task_plan/src/lib.rs | 57 +++++++++++++++---- .../tests/plan_snapshots/main.rs | 3 +- 4 files changed, 76 insertions(+), 37 deletions(-) diff --git a/crates/vite_task/src/cli/mod.rs b/crates/vite_task/src/cli/mod.rs index 45bc76d5..7e6a76d4 100644 --- a/crates/vite_task/src/cli/mod.rs +++ b/crates/vite_task/src/cli/mod.rs @@ -4,7 +4,7 @@ use clap::Parser; use vite_path::AbsolutePath; use vite_str::Str; use vite_task_graph::{TaskSpecifier, query::TaskQueryKind}; -use vite_task_plan::plan_request::{PlanOptions, PlanRequest, QueryPlanRequest}; +use vite_task_plan::plan_request::{PlanOptions, QueryPlanRequest}; #[derive(Debug, Clone, clap::Subcommand)] pub enum CacheSubcommand { @@ -70,16 +70,16 @@ pub enum CLITaskQueryError { } impl RunCommand { - /// Convert to `PlanRequest`, or return an error if invalid. + /// Convert to `QueryPlanRequest`, or return an error if invalid. /// /// # Errors /// /// Returns an error if `--recursive` and `--transitive` are both set, /// or if a package name is specified with `--recursive`. - pub fn into_plan_request( + pub fn into_query_plan_request( self, cwd: &Arc, - ) -> Result { + ) -> Result { let Self { task_specifier, flags: RunFlags { recursive, transitive, ignore_depends_on }, @@ -110,9 +110,9 @@ impl RunCommand { include_topological_deps: transitive, } }; - Ok(PlanRequest::Query(QueryPlanRequest { + Ok(QueryPlanRequest { query: vite_task_graph::query::TaskQuery { kind: query_kind, include_explicit_deps }, plan_options: PlanOptions { extra_args: additional_args.into() }, - })) + }) } } diff --git a/crates/vite_task/src/session/mod.rs b/crates/vite_task/src/session/mod.rs index 69f407ec..dc25ac28 100644 --- a/crates/vite_task/src/session/mod.rs +++ b/crates/vite_task/src/session/mod.rs @@ -21,7 +21,7 @@ use vite_task_graph::{ loader::UserConfigLoader, }; use vite_task_plan::{ - ExecutionPlan, TaskGraphLoader, + ExecutionGraph, ExecutionPlan, TaskGraphLoader, TaskPlanErrorKind, plan_request::{PlanRequest, ScriptCommand, SyntheticPlanRequest}, prepend_path_env, }; @@ -102,15 +102,17 @@ impl vite_task_plan::PlanRequestParser for PlanRequestParser<'_> { Command::Cache { .. } => Ok(Some(PlanRequest::Synthetic( command.to_synthetic_plan_request(UserCacheConfig::disabled()), ))), - Command::Run(run_command) => match run_command.into_plan_request(&command.cwd) { - Ok(plan_request) => Ok(Some(plan_request)), - Err(crate::cli::CLITaskQueryError::MissingTaskSpecifier) => { - Ok(Some(PlanRequest::Synthetic( - command.to_synthetic_plan_request(UserCacheConfig::disabled()), - ))) + Command::Run(run_command) => { + match run_command.into_query_plan_request(&command.cwd) { + Ok(query_plan_request) => Ok(Some(PlanRequest::Query(query_plan_request))), + Err(crate::cli::CLITaskQueryError::MissingTaskSpecifier) => { + Ok(Some(PlanRequest::Synthetic( + command.to_synthetic_plan_request(UserCacheConfig::disabled()), + ))) + } + Err(err) => Err(err.into()), } - Err(err) => Err(err.into()), - }, + } }, HandledCommand::Verbatim => Ok(None), } @@ -238,7 +240,7 @@ impl<'a> Session<'a> { let additional_args = run_command.additional_args.clone(); match self.plan_from_cli(cwd, run_command).await { - Ok(plan) if plan.is_empty() => { + Ok(ref graph) if graph.node_count() == 0 => { // No tasks matched the query — show task selector / "did you mean" self.handle_no_task( is_interactive, @@ -248,7 +250,8 @@ impl<'a> Session<'a> { ) .await } - Ok(plan) => { + Ok(graph) => { + let plan = ExecutionPlan::from_execution_graph(graph); let reporter = LabeledReporter::new(std::io::stdout(), self.workspace_path()); Ok(self @@ -398,7 +401,8 @@ impl<'a> Session<'a> { RunCommand { task_specifier: Some(task_specifier), flags, additional_args }; let cwd = Arc::clone(&self.cwd); - let plan = self.plan_from_cli(cwd, run_command).await?; + let graph = self.plan_from_cli(cwd, run_command).await?; + let plan = ExecutionPlan::from_execution_graph(graph); let reporter = LabeledReporter::new(std::io::stdout(), self.workspace_path()); Ok(self.execute(plan, Box::new(reporter)).await.err().unwrap_or(ExitStatus::SUCCESS)) } @@ -480,9 +484,9 @@ impl<'a> Session<'a> { &mut self, cwd: Arc, command: RunCommand, - ) -> Result { - let plan_request = match command.into_plan_request(&cwd) { - Ok(plan_request) => plan_request, + ) -> Result { + let query_plan_request = match command.into_query_plan_request(&cwd) { + Ok(query_plan_request) => query_plan_request, Err(crate::cli::CLITaskQueryError::MissingTaskSpecifier) => { return Err(vite_task_plan::Error::MissingTaskSpecifier); } @@ -495,15 +499,14 @@ impl<'a> Session<'a> { }); } }; - let plan = ExecutionPlan::plan( - plan_request, + ExecutionPlan::plan_query( + query_plan_request, &self.workspace_path, &cwd, &self.envs, &mut self.plan_request_parser, &mut self.lazy_task_graph, ) - .await?; - Ok(plan) + .await } } diff --git a/crates/vite_task_plan/src/lib.rs b/crates/vite_task_plan/src/lib.rs index 756f8069..03fc7ea5 100644 --- a/crates/vite_task_plan/src/lib.rs +++ b/crates/vite_task_plan/src/lib.rs @@ -11,12 +11,13 @@ pub mod plan_request; use std::{collections::BTreeMap, ffi::OsStr, fmt::Debug, sync::Arc}; use context::PlanContext; -pub use error::Error; -use execution_graph::ExecutionGraph; +use error::TaskPlanErrorKindResultExt; +pub use error::{Error, TaskPlanErrorKind}; +pub use execution_graph::ExecutionGraph; use in_process::InProcessExecution; pub use path_env::{get_path_env, prepend_path_env}; use plan::{ParentCacheConfig, plan_query_request, plan_synthetic_request}; -use plan_request::{PlanRequest, SyntheticPlanRequest}; +use plan_request::{PlanRequest, QueryPlanRequest, SyntheticPlanRequest}; use rustc_hash::FxHashMap; use serde::{Serialize, ser::SerializeMap as _}; use vite_graph_ser::serialize_by_key; @@ -200,6 +201,41 @@ impl ExecutionPlan { } } + /// Create an execution plan from an execution graph. + #[must_use] + pub const fn from_execution_graph(execution_graph: ExecutionGraph) -> Self { + Self { root_node: ExecutionItemKind::Expanded(execution_graph) } + } + + /// Plan a query execution: load the task graph, query it, and build the execution graph. + /// + /// # Errors + /// Returns an error if task graph loading, query, or execution planning fails. + #[expect(clippy::future_not_send, reason = "PlanRequestParser and TaskGraphLoader are !Send")] + pub async fn plan_query( + query_plan_request: QueryPlanRequest, + workspace_path: &Arc, + cwd: &Arc, + envs: &FxHashMap, Arc>, + plan_request_parser: &mut (dyn PlanRequestParser + '_), + task_graph_loader: &mut (dyn TaskGraphLoader + '_), + ) -> Result { + let indexed_task_graph = task_graph_loader + .load_task_graph() + .await + .map_err(TaskPlanErrorKind::TaskGraphLoadError) + .with_empty_call_stack()?; + + let context = PlanContext::new( + workspace_path, + Arc::clone(cwd), + envs.clone(), + plan_request_parser, + indexed_task_graph, + ); + plan_query_request(query_plan_request, context).await + } + /// Plan an execution from a plan request. /// /// # Errors @@ -215,16 +251,15 @@ impl ExecutionPlan { ) -> Result { let root_node = match plan_request { PlanRequest::Query(query_plan_request) => { - let indexed_task_graph = task_graph_loader.load_task_graph().await?; - - let context = PlanContext::new( + let execution_graph = Self::plan_query( + query_plan_request, workspace_path, - Arc::clone(cwd), - envs.clone(), + cwd, + envs, plan_request_parser, - indexed_task_graph, - ); - let execution_graph = plan_query_request(query_plan_request, context).await?; + task_graph_loader, + ) + .await?; ExecutionItemKind::Expanded(execution_graph) } PlanRequest::Synthetic(synthetic_plan_request) => { diff --git a/crates/vite_task_plan/tests/plan_snapshots/main.rs b/crates/vite_task_plan/tests/plan_snapshots/main.rs index d19a946f..f47588e6 100644 --- a/crates/vite_task_plan/tests/plan_snapshots/main.rs +++ b/crates/vite_task_plan/tests/plan_snapshots/main.rs @@ -11,6 +11,7 @@ use tokio::runtime::Runtime; use vite_path::{AbsolutePath, AbsolutePathBuf, RelativePathBuf}; use vite_str::Str; use vite_task::{Command, Session}; +use vite_task_plan::ExecutionPlan; use vite_workspace::find_workspace_root; /// Local parser wrapper for `BuiltInCommand` @@ -183,7 +184,7 @@ fn run_case_inner( session.plan_from_cli(workspace_root.path.join(plan.cwd).into(), run_command).await; let plan = match plan_result { - Ok(plan) => plan, + Ok(graph) => ExecutionPlan::from_execution_graph(graph), Err(err) => { #[expect( clippy::disallowed_macros, From 80e4090804aa4c6352c803a9a90db110c6c79d49 Mon Sep 17 00:00:00 2001 From: branchseer Date: Fri, 13 Feb 2026 18:39:48 +0800 Subject: [PATCH 02/11] refactor: replace event-based reporter with typestate pattern MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the flat Reporter trait with ExecutionEvent/ExecutionId dispatch with a three-phase typestate hierarchy: GraphExecutionReporterBuilder → GraphExecutionReporter → LeafExecutionReporter. Split into PlainReporter (standalone, for single-leaf synthetic execution) and LabeledReporter family (graph-aware, with summary aggregation and shared state via Rc). Extract execute_spawn as a free function reusable from both graph execution and execute_synthetic. Simplify plan_synthetic to return SpawnExecution directly. Remove ExecutionEvent, ExecutionEventKind, ExecutionId, and the CycleDetected cache variant (cycles are now graph-level errors via ExecutionAborted). --- Cargo.lock | 1 + crates/vite_task/Cargo.toml | 1 + crates/vite_task/src/session/cache/display.rs | 2 - crates/vite_task/src/session/event.rs | 49 +- crates/vite_task/src/session/execute/mod.rs | 590 ++++---- crates/vite_task/src/session/mod.rs | 80 +- crates/vite_task/src/session/reporter.rs | 1226 ++++++++++------- crates/vite_task_bin/src/main.rs | 7 +- .../snapshots/cycle dependency error.snap | 1 - .../exec-api/snapshots/exec caching.snap | 2 - crates/vite_task_plan/src/lib.rs | 13 +- .../tests/plan_snapshots/main.rs | 5 +- 12 files changed, 1115 insertions(+), 862 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cab5bbb8..5932fe33 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3884,6 +3884,7 @@ dependencies = [ "rustc-hash", "serde", "serde_json", + "smallvec 2.0.0-alpha.12", "thiserror 2.0.18", "tokio", "tracing", diff --git a/crates/vite_task/Cargo.toml b/crates/vite_task/Cargo.toml index d485a683..131fcbb7 100644 --- a/crates/vite_task/Cargo.toml +++ b/crates/vite_task/Cargo.toml @@ -30,6 +30,7 @@ rusqlite = { workspace = true, features = ["bundled"] } rustc-hash = { workspace = true } serde = { workspace = true, features = ["derive", "rc"] } serde_json = { workspace = true } +smallvec.workspace = true thiserror = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "io-std", "macros", "sync"] } tracing = { workspace = true } diff --git a/crates/vite_task/src/session/cache/display.rs b/crates/vite_task/src/session/cache/display.rs index e2e4b573..0db234db 100644 --- a/crates/vite_task/src/session/cache/display.rs +++ b/crates/vite_task/src/session/cache/display.rs @@ -185,7 +185,6 @@ pub fn format_cache_status_inline(cache_status: &CacheStatus) -> Option { let message = match reason { CacheDisabledReason::InProcessExecution => "cache disabled: built-in command", CacheDisabledReason::NoCacheMetadata => "cache disabled: no cache config", - CacheDisabledReason::CycleDetected => "cache disabled: cycle detected", }; Some(vite_str::format!("⊘ {message}")) } @@ -278,7 +277,6 @@ pub fn format_cache_status_summary(cache_status: &CacheStatus) -> Str { let message = match reason { CacheDisabledReason::InProcessExecution => "Cache disabled for built-in command", CacheDisabledReason::NoCacheMetadata => "Cache disabled in task configuration", - CacheDisabledReason::CycleDetected => "Cache disabled: cycle detected", }; vite_str::format!("→ {message}") } diff --git a/crates/vite_task/src/session/event.rs b/crates/vite_task/src/session/event.rs index e8e47604..ded050ff 100644 --- a/crates/vite_task/src/session/event.rs +++ b/crates/vite_task/src/session/event.rs @@ -1,10 +1,5 @@ use std::{process::ExitStatus, time::Duration}; -use bstr::BString; -use vite_str::Str; -// Re-export ExecutionItemDisplay from vite_task_plan since it's the canonical definition -pub use vite_task_plan::ExecutionItemDisplay; - use super::cache::CacheMiss; #[derive(Debug)] @@ -17,7 +12,6 @@ pub enum OutputKind { pub enum CacheDisabledReason { InProcessExecution, NoCacheMetadata, - CycleDetected, } #[derive(Debug)] @@ -34,8 +28,16 @@ pub enum CacheNotUpdatedReason { pub enum CacheUpdateStatus { /// Cache was successfully updated with new fingerprint and outputs Updated, - /// Cache was not updated (with reason) - NotUpdated(CacheNotUpdatedReason), + /// Cache was not updated (with reason). + /// The reason is part of the `LeafExecutionReporter` trait contract — reporters + /// can use it for detailed logging, even if current implementations don't. + NotUpdated( + #[expect( + dead_code, + reason = "part of LeafExecutionReporter trait contract; reporters may use for detailed logging" + )] + CacheNotUpdatedReason, + ), } #[derive(Debug)] @@ -66,34 +68,3 @@ pub fn exit_status_to_code(status: ExitStatus) -> i32 { status.code().unwrap_or(1) } } - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct ExecutionId(u32); - -impl ExecutionId { - pub(crate) const fn zero() -> Self { - Self(0) - } - - pub(crate) const fn next(self) -> Self { - Self(self.0.checked_add(1).expect("ExecutionId overflow")) - } -} - -#[derive(Debug)] -pub struct ExecutionEvent { - pub execution_id: ExecutionId, - pub kind: ExecutionEventKind, -} - -#[derive(Debug)] -#[expect( - clippy::large_enum_variant, - reason = "event variants are consumed once and not stored in collections" -)] -pub enum ExecutionEventKind { - Start { display: Option, cache_status: CacheStatus }, - Output { kind: OutputKind, content: BString }, - Error { message: Str }, - Finish { status: Option, cache_update_status: CacheUpdateStatus }, -} diff --git a/crates/vite_task/src/session/execute/mod.rs b/crates/vite_task/src/session/execute/mod.rs index e9abd806..1505d336 100644 --- a/crates/vite_task/src/session/execute/mod.rs +++ b/crates/vite_task/src/session/execute/mod.rs @@ -6,8 +6,9 @@ use std::sync::Arc; use futures_util::FutureExt; use petgraph::{algo::toposort, stable_graph::StableGraph}; use vite_path::AbsolutePath; +use vite_str::Str; use vite_task_plan::{ - ExecutionItemKind, ExecutionPlan, LeafExecutionKind, SpawnExecution, TaskExecution, + ExecutionGraph, ExecutionItemKind, LeafExecutionKind, SpawnExecution, TaskExecution, execution_graph::ExecutionIx, }; @@ -18,401 +19,368 @@ use self::{ use super::{ cache::{CommandCacheValue, ExecutionCache}, event::{ - CacheDisabledReason, CacheNotUpdatedReason, CacheStatus, CacheUpdateStatus, ExecutionEvent, - ExecutionEventKind, ExecutionId, ExecutionItemDisplay, OutputKind, + CacheDisabledReason, CacheNotUpdatedReason, CacheStatus, CacheUpdateStatus, OutputKind, + }, + reporter::{ + ExitStatus, GraphExecutionReporter, GraphExecutionReporterBuilder, LeafExecutionPath, + LeafExecutionReporter, }, - reporter::{ExitStatus, Reporter}, }; use crate::{Session, session::execute::spawn::SpawnTrackResult}; /// Internal error type used to abort execution when errors occur. -/// This error is swallowed in `Session::execute` and never exposed externally. +/// +/// Contains an optional graph-level error message: +/// - `None`: A leaf-level error occurred and was already reported through +/// `LeafExecutionReporter::finish()` +/// - `Some(message)`: A graph-level error occurred (e.g., cycle detection) +/// that needs to be passed to `GraphExecutionReporter::finish()` #[derive(Debug)] -struct ExecutionAborted; +pub struct ExecutionAborted(Option); +/// Holds mutable references needed during graph execution. +/// +/// The `reporter` field is used to create leaf reporters for individual executions. +/// Cache fields are passed through to [`execute_spawn`] for cache-aware execution. struct ExecutionContext<'a> { - event_handler: &'a mut dyn Reporter, - current_execution_id: ExecutionId, + /// The graph-level reporter, used to create leaf reporters via `new_leaf_execution()`. + reporter: &'a mut dyn GraphExecutionReporter, + /// The execution cache for looking up and storing cached results. cache: &'a ExecutionCache, - /// All relative paths in cache are relative to this base path + /// Base path for resolving relative paths in cache entries. + /// Typically the workspace root. cache_base_path: &'a Arc, } impl ExecutionContext<'_> { + /// Execute all tasks in an execution graph in topological order. + /// + /// This is the main entry point for graph traversal. It topologically sorts the graph + /// (reversing edges so dependencies execute first), then executes each task's items + /// sequentially. + /// + /// The `path_prefix` tracks our position within nested execution graphs. For the + /// root call this is an empty path; for nested `Expanded` items it carries the + /// path so far. #[expect(clippy::future_not_send, reason = "uses !Send types internally")] - async fn execute_item_kind( + async fn execute_expanded_graph( &mut self, - display: Option<&ExecutionItemDisplay>, - item_kind: &ExecutionItemKind, + graph: &ExecutionGraph, + path_prefix: &LeafExecutionPath, ) -> Result<(), ExecutionAborted> { - match item_kind { - ExecutionItemKind::Expanded(graph) => { - // Use StableGraph to preserve node indices during removal - let mut graph: StableGraph<&TaskExecution, (), _, ExecutionIx> = - graph.map(|_, task_execution| task_execution, |_, ()| ()).into(); + // Use StableGraph to preserve node indices during removal. + // We need stable indices because the LeafExecutionPath references nodes + // by their original index in the graph. + let mut stable_graph: StableGraph<&TaskExecution, (), _, ExecutionIx> = + graph.map(|_, task_execution| task_execution, |_, ()| ()).into(); - // To be consistent with the package graph in vite_package_manager and the dependency graph definition in Wikipedia - // https://en.wikipedia.org/wiki/Dependency_graph, we construct the graph with edges from dependents to dependencies - // e.g. A -> B means A depends on B - // - // For execution we need to reverse the edges first before topological sorting, - // so that tasks without dependencies are executed first - graph.reverse(); // Run tasks without dependencies first + // The graph is constructed with edges from dependents to dependencies + // (e.g., A → B means "A depends on B"). For execution we need the reverse: + // dependencies should execute first. Reversing edges before topological sort + // achieves this. + stable_graph.reverse(); - // Always use topological sort to ensure the correct order of execution - // or the task dependencies declaration is meaningless - let node_indices = match toposort(&graph, None) { - Ok(ok) => ok, - Err(cycle) => { - // Follow standard error pattern: Start event, then Error event - let execution_id = self.current_execution_id; - self.current_execution_id = self.current_execution_id.next(); + // Topological sort ensures tasks execute in dependency order. + // A cycle means the dependency graph is invalid. + let node_indices = match toposort(&stable_graph, None) { + Ok(ok) => ok, + Err(cycle) => { + // Cycle detected — return a graph-level error. + // This will be passed to `GraphExecutionReporter::finish(Some(msg))`. + return Err(ExecutionAborted(Some(vite_str::format!( + "Cycle dependencies detected: {cycle:?}" + )))); + } + }; - // Emit Start event for cycle detection error - // display is None for top-level execution (no parent task) - // display is Some for nested execution (within a parent task) - // Caching is disabled when cycle dependencies are detected - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Start { - display: display.cloned(), - cache_status: CacheStatus::Disabled( - CacheDisabledReason::CycleDetected, - ), - }, - }); + // Execute tasks in topological order. Each task may have multiple items + // (from `&&`-split commands), which are executed sequentially. + for node_ix in node_indices { + // `remove_node` on a StableGraph preserves other node indices. + // The original node index (`node_ix`) is still valid for path construction + // because it corresponds to the same node in the original (non-stable) graph. + let task_execution = stable_graph + .remove_node(node_ix) + .expect("node was returned by toposort so it must exist"); - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Error { - message: vite_str::format!( - "Cycle dependencies detected: {cycle:?}" - ), - }, - }); + for (item_idx, item) in task_execution.items.iter().enumerate() { + // Build the path for this item by appending to the prefix + let mut item_path = path_prefix.clone(); + item_path.push(node_ix, item_idx); - return Err(ExecutionAborted); + match &item.kind { + ExecutionItemKind::Leaf(leaf_kind) => { + self.execute_leaf(&item_path, leaf_kind).boxed_local().await?; } - }; - - let ordered_executions = - node_indices.into_iter().map(|id| graph.remove_node(id).unwrap()); - for task_execution in ordered_executions { - for item in &task_execution.items { - match &item.kind { - ExecutionItemKind::Leaf(leaf_kind) => { - self.execute_leaf(Some(&item.execution_item_display), leaf_kind) - .boxed_local() - .await?; - } - ExecutionItemKind::Expanded(_) => { - self.execute_item_kind( - Some(&item.execution_item_display), - &item.kind, - ) - .boxed_local() - .await?; - } - } + ExecutionItemKind::Expanded(nested_graph) => { + // Recurse into the nested graph, carrying the path prefix forward + self.execute_expanded_graph(nested_graph, &item_path).boxed_local().await?; } } } - ExecutionItemKind::Leaf(leaf_execution_kind) => { - #[expect( - clippy::large_futures, - reason = "recursive execution creates large futures" - )] - self.execute_leaf(display, leaf_execution_kind).await?; - } } Ok(()) } + /// Execute a single leaf item (in-process command or spawned process). + /// + /// Creates a [`LeafExecutionReporter`] from the graph reporter and delegates + /// to the appropriate execution method. #[expect(clippy::future_not_send, reason = "uses !Send types internally")] async fn execute_leaf( &mut self, - display: Option<&ExecutionItemDisplay>, + path: &LeafExecutionPath, leaf_execution_kind: &LeafExecutionKind, ) -> Result<(), ExecutionAborted> { - let execution_id = self.current_execution_id; - self.current_execution_id = self.current_execution_id.next(); + let mut leaf_reporter = self.reporter.new_leaf_execution(path); match leaf_execution_kind { LeafExecutionKind::InProcess(in_process_execution) => { - // Emit Start event with cache_status for in-process (built-in) commands - // Caching is disabled for built-in commands - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Start { - display: display.cloned(), - cache_status: CacheStatus::Disabled( - CacheDisabledReason::InProcessExecution, - ), - }, - }); + // In-process (built-in) commands: caching is disabled, execute synchronously + leaf_reporter.start(CacheStatus::Disabled(CacheDisabledReason::InProcessExecution)); - // Execute the in-process command let execution_output = in_process_execution.execute(); - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Output { - kind: OutputKind::Stdout, - content: execution_output.stdout.into(), - }, - }); + leaf_reporter.output(OutputKind::Stdout, execution_output.stdout.into()); - // Emit Finish with CacheDisabled status (in-process executions don't cache) - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Finish { - status: None, - cache_update_status: CacheUpdateStatus::NotUpdated( - CacheNotUpdatedReason::CacheDisabled, - ), - }, - }); + leaf_reporter.finish( + None, + CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), + None, + ); + Ok(()) } - LeafExecutionKind::Spawn(spawn_execution) => { + LeafExecutionKind::Spawn(spawn_execution) => + { #[expect( clippy::large_futures, reason = "spawn execution with cache management creates large futures" )] - self.execute_spawn(execution_id, display, spawn_execution).await?; + execute_spawn(leaf_reporter, spawn_execution, self.cache, self.cache_base_path) + .await + .map(|_status| ()) } } - Ok(()) } +} - #[expect(clippy::future_not_send, reason = "uses !Send types internally")] - #[expect( - clippy::too_many_lines, - reason = "sequential cache check, execute, and update steps are clearer in one function" - )] - async fn execute_spawn( - &mut self, - execution_id: ExecutionId, - display: Option<&ExecutionItemDisplay>, - spawn_execution: &SpawnExecution, - ) -> Result<(), ExecutionAborted> { - let cache_metadata = spawn_execution.cache_metadata.as_ref(); +/// Execute a spawned process with cache-aware lifecycle. +/// +/// This is a free function (not tied to `ExecutionContext`) so it can be reused +/// from both graph-based execution and standalone synthetic execution. +/// +/// The full lifecycle is: +/// 1. Cache lookup (determines cache status) +/// 2. `leaf_reporter.start(cache_status)` +/// 3. If cache hit: replay cached outputs → finish +/// 4. If cache miss/disabled: spawn process → stream output → update cache → finish +/// +/// # Returns +/// +/// - `Ok(None)` — cache hit, no process was spawned +/// - `Ok(Some(exit_status))` — process ran, here's its exit status +/// - `Err(ExecutionAborted(None))` — an error occurred, already reported through +/// `leaf_reporter.finish(..., Some(error_message))` +#[expect(clippy::future_not_send, reason = "uses !Send types internally")] +#[expect( + clippy::too_many_lines, + reason = "sequential cache check, execute, and update steps are clearer in one function" +)] +pub async fn execute_spawn( + mut leaf_reporter: Box, + spawn_execution: &SpawnExecution, + cache: &ExecutionCache, + cache_base_path: &Arc, +) -> Result, ExecutionAborted> { + let cache_metadata = spawn_execution.cache_metadata.as_ref(); - // 1. Determine cache status FIRST by trying cache hit - // We need to know the status before emitting Start event so users - // see cache status immediately when execution begins - let (cache_status, cached_value) = if let Some(cache_metadata) = cache_metadata { - match self.cache.try_hit(cache_metadata, self.cache_base_path).await { - Ok(Ok(cached)) => ( - // Cache hit - we can replay the cached outputs - CacheStatus::Hit { replayed_duration: cached.duration }, - Some(cached), - ), - Ok(Err(cache_miss)) => ( - // Cache miss - includes detailed reason (NotFound or FingerprintMismatch) - CacheStatus::Miss(cache_miss), + // 1. Determine cache status FIRST by trying cache hit. + // We need to know the status before calling start() so the reporter + // can display cache status immediately when execution begins. + let (cache_status, cached_value) = if let Some(cache_metadata) = cache_metadata { + match cache.try_hit(cache_metadata, cache_base_path).await { + Ok(Ok(cached)) => ( + // Cache hit — we can replay the cached outputs + CacheStatus::Hit { replayed_duration: cached.duration }, + Some(cached), + ), + Ok(Err(cache_miss)) => ( + // Cache miss — includes detailed reason (NotFound or FingerprintMismatch) + CacheStatus::Miss(cache_miss), + None, + ), + Err(err) => { + // Cache lookup error — report through finish and abort. + // Note: start() is NOT called because we don't have a valid cache status. + leaf_reporter.finish( None, - ), - Err(err) => { - // Cache lookup error - emit error and abort - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Error { - message: vite_str::format!("Cache lookup failed: {err}"), - }, - }); - return Err(ExecutionAborted); - } + CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), + Some(vite_str::format!("Cache lookup failed: {err}")), + ); + return Err(ExecutionAborted(None)); } - } else { - // No cache metadata provided - caching is disabled for this task - (CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata), None) - }; + } + } else { + // No cache metadata provided — caching is disabled for this task + (CacheStatus::Disabled(CacheDisabledReason::NoCacheMetadata), None) + }; - // 2. NOW emit Start event with cache_status (ALWAYS emit Start) - // This ensures all spawn executions emit Start, including cache hits - // (previously cache hits didn't emit Start at all) - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Start { display: display.cloned(), cache_status }, - }); + // 2. Report execution start with the determined cache status + leaf_reporter.start(cache_status); - // 3. If cache hit, replay outputs and return early - // No need to actually execute the command - just replay what was cached - if let Some(cached) = cached_value { - for output in cached.std_outputs.iter() { - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Output { - kind: match output.kind { - SpawnOutputKind::StdOut => OutputKind::Stdout, - SpawnOutputKind::StdErr => OutputKind::Stderr, - }, - content: output.content.clone().into(), - }, - }); - } - // Emit Finish with CacheHit status (no cache update needed) - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Finish { - status: None, - cache_update_status: CacheUpdateStatus::NotUpdated( - CacheNotUpdatedReason::CacheHit, - ), + // 3. If cache hit, replay outputs and finish early. + // No need to actually execute the command — just replay what was cached. + if let Some(cached) = cached_value { + for output in cached.std_outputs.iter() { + leaf_reporter.output( + match output.kind { + SpawnOutputKind::StdOut => OutputKind::Stdout, + SpawnOutputKind::StdErr => OutputKind::Stderr, }, - }); - return Ok(()); + output.content.clone().into(), + ); } + leaf_reporter.finish( + None, + CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheHit), + None, + ); + return Ok(None); + } - // 4. Execute spawn (cache miss or disabled) - // Track file system access if caching is enabled (for future cache updates) - let mut track_result_with_cache_metadata = - cache_metadata.map(|cache_metadata| (SpawnTrackResult::default(), cache_metadata)); + // 4. Execute spawn (cache miss or disabled). + // Track file system access if caching is enabled (for future cache updates). + let mut track_result_with_cache_metadata = + cache_metadata.map(|cache_metadata| (SpawnTrackResult::default(), cache_metadata)); - // Execute command with tracking, emitting output events in real-time - #[expect( - clippy::large_futures, - reason = "spawn_with_tracking manages process I/O and creates a large future" - )] - let result = match spawn_with_tracking( - &spawn_execution.spawn_command, - self.cache_base_path, - |kind, content| { - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Output { - kind: match kind { - SpawnOutputKind::StdOut => OutputKind::Stdout, - SpawnOutputKind::StdErr => OutputKind::Stderr, - }, - content, - }, - }); - }, - track_result_with_cache_metadata.as_mut().map(|(track_result, _)| track_result), - ) - .await - { - Ok(result) => result, - Err(err) => { - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Error { - message: vite_str::format!("Failed to spawn process: {err}"), - }, - }); - return Err(ExecutionAborted); - } - }; + // Execute command with tracking, streaming output in real-time via the reporter + #[expect( + clippy::large_futures, + reason = "spawn_with_tracking manages process I/O and creates a large future" + )] + let result = match spawn_with_tracking( + &spawn_execution.spawn_command, + cache_base_path, + |kind, content| { + leaf_reporter.output( + match kind { + SpawnOutputKind::StdOut => OutputKind::Stdout, + SpawnOutputKind::StdErr => OutputKind::Stderr, + }, + content, + ); + }, + track_result_with_cache_metadata.as_mut().map(|(track_result, _)| track_result), + ) + .await + { + Ok(result) => result, + Err(err) => { + leaf_reporter.finish( + None, + CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), + Some(vite_str::format!("Failed to spawn process: {err}")), + ); + return Err(ExecutionAborted(None)); + } + }; - // 5. Update cache if successful and determine cache update status - let cache_update_status = if let Some((track_result, cache_metadata)) = - track_result_with_cache_metadata - { - if result.exit_status.success() { - // Execution succeeded, attempt cache update - let fingerprint_ignores = cache_metadata - .spawn_fingerprint - .fingerprint_ignores() - .map(std::vec::Vec::as_slice); - match PostRunFingerprint::create( - &track_result.path_reads, - self.cache_base_path, - fingerprint_ignores, - ) { - Ok(post_run_fingerprint) => { - let new_cache_value = CommandCacheValue { - post_run_fingerprint, - std_outputs: track_result.std_outputs.clone().into(), - duration: result.duration, - }; - if let Err(err) = self.cache.update(cache_metadata, new_cache_value).await { - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Error { - message: vite_str::format!("Failed to update cache: {err}"), - }, - }); - return Err(ExecutionAborted); - } - CacheUpdateStatus::Updated - } - Err(err) => { - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Error { - message: vite_str::format!( - "Failed to create post-run fingerprint: {err}" - ), - }, - }); - return Err(ExecutionAborted); + // 5. Update cache if successful and determine cache update status. + // Errors during cache update are terminal (reported through finish). + let (cache_update_status, cache_error) = if let Some((track_result, cache_metadata)) = + track_result_with_cache_metadata + { + if result.exit_status.success() { + // Execution succeeded — attempt to create fingerprint and update cache + let fingerprint_ignores = + cache_metadata.spawn_fingerprint.fingerprint_ignores().map(std::vec::Vec::as_slice); + match PostRunFingerprint::create( + &track_result.path_reads, + cache_base_path, + fingerprint_ignores, + ) { + Ok(post_run_fingerprint) => { + let new_cache_value = CommandCacheValue { + post_run_fingerprint, + std_outputs: track_result.std_outputs.clone().into(), + duration: result.duration, + }; + match cache.update(cache_metadata, new_cache_value).await { + Ok(()) => (CacheUpdateStatus::Updated, None), + Err(err) => ( + CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), + Some(vite_str::format!("Failed to update cache: {err}")), + ), } } - } else { - // Execution failed with non-zero exit status, don't update cache - CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::NonZeroExitStatus) + Err(err) => ( + CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), + Some(vite_str::format!("Failed to create post-run fingerprint: {err}")), + ), } } else { - // Caching was disabled for this task - CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled) - }; + // Execution failed with non-zero exit status — don't update cache + (CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::NonZeroExitStatus), None) + } + } else { + // Caching was disabled for this task + (CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), None) + }; - // 6. Emit finish with cache_update_status - self.event_handler.handle_event(ExecutionEvent { - execution_id, - kind: ExecutionEventKind::Finish { - status: Some(result.exit_status), - cache_update_status, - }, - }); + // 6. Finish the leaf execution with the result and optional error + let has_error = cache_error.is_some(); + leaf_reporter.finish(Some(result.exit_status), cache_update_status, cache_error); - Ok(()) - } + if has_error { Err(ExecutionAborted(None)) } else { Ok(Some(result.exit_status)) } } impl Session<'_> { - /// Execute an execution plan, reporting events to the provided reporter. + /// Execute an execution graph, reporting events through the provided reporter builder. /// - /// Returns Err(ExitStatus) to suggest the caller to abort and exit the process with the given exit status. + /// The builder is first transitioned to a `GraphExecutionReporter` by providing the graph. + /// Then each task in the graph is executed in topological order, with leaf executions + /// reported through individual `LeafExecutionReporter` instances. /// - /// The return type isn't just `ExitStatus` because we want to distinguish between normal successful execution, - /// and execution that failed and needs to exit with a specific code which can be zero. + /// Returns `Err(ExitStatus)` to indicate the caller should exit with the given status code. + /// Returns `Ok(())` when all tasks succeeded. #[expect(clippy::future_not_send, reason = "uses !Send types internally")] - pub(crate) async fn execute( + pub(crate) async fn execute_graph( &self, - plan: ExecutionPlan, - mut reporter: Box, + execution_graph: ExecutionGraph, + builder: Box, ) -> Result<(), ExitStatus> { + // Wrap the graph in Arc so both the reporter and execution can reference it. + // The reporter clones the Arc internally for display lookups. + let graph = Arc::new(execution_graph); + let mut reporter = builder.build(&graph); + // Lazily initialize the cache on first execution let cache = match self.cache() { Ok(cache) => cache, Err(err) => { - reporter.handle_event(ExecutionEvent { - execution_id: ExecutionId::zero(), - kind: ExecutionEventKind::Error { - message: vite_str::format!("Failed to initialize cache: {err}"), - }, - }); - return Err(ExitStatus(1)); + // Cache initialization failure is a graph-level error — pass to finish() + return reporter + .finish(Some(vite_str::format!("Failed to initialize cache: {err}"))); } }; let mut execution_context = ExecutionContext { - event_handler: &mut *reporter, - current_execution_id: ExecutionId::zero(), + reporter: &mut *reporter, cache, cache_base_path: &self.workspace_path, }; - // Execute and swallow ExecutionAborted error - // display is None for top-level execution - #[expect( - clippy::large_futures, - reason = "top-level execution dispatches the entire task graph" - )] - let _ = execution_context.execute_item_kind(None, plan.root_node()).await; + // Execute the graph. On abort, extract the optional graph-level error message. + let graph_error = match execution_context + .execute_expanded_graph(&graph, &LeafExecutionPath::default()) + .await + { + Ok(()) => None, + Err(ExecutionAborted(error)) => error, + }; - // Always call post_execution, whether execution succeeded or failed - reporter.post_execution() + // Always call finish, whether execution succeeded or was aborted. + // graph_error is None for leaf-level errors (already handled by leaf reporter) + // and Some(msg) for graph-level errors (cycle detection). + reporter.finish(graph_error) } } diff --git a/crates/vite_task/src/session/mod.rs b/crates/vite_task/src/session/mod.rs index dc25ac28..61c9bc9a 100644 --- a/crates/vite_task/src/session/mod.rs +++ b/crates/vite_task/src/session/mod.rs @@ -8,10 +8,9 @@ use std::{ffi::OsStr, fmt::Debug, io::IsTerminal, sync::Arc}; use cache::ExecutionCache; pub use cache::{CacheMiss, FingerprintMismatch}; -pub use event::ExecutionEvent; use once_cell::sync::OnceCell; pub use reporter::ExitStatus; -use reporter::LabeledReporter; +use reporter::LabeledReporterBuilder; use rustc_hash::FxHashMap; use vite_path::{AbsolutePath, AbsolutePathBuf}; use vite_select::SelectItem; @@ -222,10 +221,6 @@ impl<'a> Session<'a> { clippy::future_not_send, reason = "session is single-threaded, futures do not need to be Send" )] - #[expect( - clippy::large_futures, - reason = "execution plan future is large but only awaited once" - )] pub async fn main(mut self, command: Command) -> anyhow::Result { match command { Command::Cache { ref subcmd } => self.handle_cache_command(subcmd), @@ -239,7 +234,7 @@ impl<'a> Session<'a> { let flags = run_command.flags; let additional_args = run_command.additional_args.clone(); - match self.plan_from_cli(cwd, run_command).await { + match self.plan_from_cli_run(cwd, run_command).await { Ok(ref graph) if graph.node_count() == 0 => { // No tasks matched the query — show task selector / "did you mean" self.handle_no_task( @@ -251,11 +246,10 @@ impl<'a> Session<'a> { .await } Ok(graph) => { - let plan = ExecutionPlan::from_execution_graph(graph); - let reporter = - LabeledReporter::new(std::io::stdout(), self.workspace_path()); + let builder = + LabeledReporterBuilder::new(std::io::stdout(), self.workspace_path()); Ok(self - .execute(plan, Box::new(reporter)) + .execute_graph(graph, Box::new(builder)) .await .err() .unwrap_or(ExitStatus::SUCCESS)) @@ -301,10 +295,6 @@ impl<'a> Session<'a> { clippy::future_not_send, reason = "session is single-threaded, futures do not need to be Send" )] - #[expect( - clippy::large_futures, - reason = "interactive select future is large but only awaited once" - )] async fn handle_no_task( &mut self, is_interactive: bool, @@ -401,10 +391,9 @@ impl<'a> Session<'a> { RunCommand { task_specifier: Some(task_specifier), flags, additional_args }; let cwd = Arc::clone(&self.cwd); - let graph = self.plan_from_cli(cwd, run_command).await?; - let plan = ExecutionPlan::from_execution_graph(graph); - let reporter = LabeledReporter::new(std::io::stdout(), self.workspace_path()); - Ok(self.execute(plan, Box::new(reporter)).await.err().unwrap_or(ExitStatus::SUCCESS)) + let graph = self.plan_from_cli_run(cwd, run_command).await?; + let builder = LabeledReporterBuilder::new(std::io::stdout(), self.workspace_path()); + Ok(self.execute_graph(graph, Box::new(builder)).await.err().unwrap_or(ExitStatus::SUCCESS)) } /// Lazily initializes and returns the execution cache. @@ -439,8 +428,15 @@ impl<'a> Session<'a> { /// Execute a synthetic command with cache support. /// - /// This is for executing a command with cache before/without the entrypoint [`Session::main`]. - /// In vite-plus, this is used for auto-install. + /// This is for executing a single command with cache before/without the entrypoint + /// [`Session::main`]. In vite-plus, this is used for auto-install. + /// + /// Unlike `execute_graph` which uses the full graph reporter + /// pipeline, this method uses a `PlainReporter` — a lightweight reporter with no + /// summary, no stats tracking, and no graph awareness. + /// + /// The exit status is determined from the `execute_spawn` return value, not from + /// the reporter. /// /// # Errors /// @@ -459,16 +455,46 @@ impl<'a> Session<'a> { cache_key: Arc<[Str]>, silent_if_cache_hit: bool, ) -> anyhow::Result { - let plan = ExecutionPlan::plan_synthetic( + // Plan the synthetic execution — returns a SpawnExecution directly + // (synthetic plans are always a single spawned process) + let spawn_execution = ExecutionPlan::plan_synthetic( &self.workspace_path, &self.cwd, synthetic_plan_request, cache_key, )?; - let mut reporter = LabeledReporter::new(std::io::stdout(), self.workspace_path()); - reporter.set_hide_summary(true); - reporter.set_silent_if_cache_hit(silent_if_cache_hit); - Ok(self.execute(plan, Box::new(reporter)).await.err().unwrap_or(ExitStatus::SUCCESS)) + + // Initialize cache (needed for cache-aware execution) + let cache = self.cache()?; + + // Create a plain (standalone) reporter — no graph awareness, no summary + let plain_reporter = reporter::PlainReporter::new(std::io::stdout(), silent_if_cache_hit); + + // Execute the spawn directly using the free function, bypassing the graph pipeline + match execute::execute_spawn( + Box::new(plain_reporter), + &spawn_execution, + cache, + &self.workspace_path, + ) + .await + { + // Cache hit — no process was spawned, success + Ok(None) => Ok(ExitStatus::SUCCESS), + // Process ran successfully + Ok(Some(status)) if status.success() => Ok(ExitStatus::SUCCESS), + // Process ran but exited with non-zero status + Ok(Some(status)) => { + let code = event::exit_status_to_code(status); + #[expect( + clippy::cast_sign_loss, + reason = "value is clamped to 1..=255, always positive" + )] + Ok(ExitStatus(code.clamp(1, 255) as u8)) + } + // Error — already reported through the reporter's finish() + Err(_) => Ok(ExitStatus::FAILURE), + } } /// Plans execution from a CLI run command. @@ -480,7 +506,7 @@ impl<'a> Session<'a> { clippy::future_not_send, reason = "session is single-threaded, futures do not need to be Send" )] - pub async fn plan_from_cli( + pub async fn plan_from_cli_run( &mut self, cwd: Arc, command: RunCommand, diff --git a/crates/vite_task/src/session/reporter.rs b/crates/vite_task/src/session/reporter.rs index 3344f26e..495d5177 100644 --- a/crates/vite_task/src/session/reporter.rs +++ b/crates/vite_task/src/session/reporter.rs @@ -1,25 +1,225 @@ -//! `LabeledReporter` event handler for rendering execution events. +//! Reporter traits and implementations for rendering execution events. +//! +//! This module provides a typestate-based reporter system with three phases: +//! +//! 1. [`GraphExecutionReporterBuilder`] — created before the execution graph is known. +//! Transitions to [`GraphExecutionReporter`] when `build()` is called with the graph. +//! +//! 2. [`GraphExecutionReporter`] — knows the execution graph. Creates [`LeafExecutionReporter`] +//! instances for individual leaf executions via `new_leaf_execution()`. Finalized with `finish()`. +//! +//! 3. [`LeafExecutionReporter`] — handles events for a single leaf execution (output streaming, +//! cache status, errors). Finalized with `finish()`. +//! +//! Two concrete implementations are provided: +//! +//! - [`PlainReporter`] — a standalone [`LeafExecutionReporter`] for single-leaf execution +//! (e.g., `execute_synthetic`). Self-contained, no shared state, no summary. +//! +//! - [`LabeledReporterBuilder`] / [`LabeledGraphReporter`] / [`LabeledLeafReporter`] — a full +//! graph-aware reporter family. Tracks stats across multiple leaf executions, prints command +//! lines with cache status, and renders a summary at the end. use std::{ + cell::RefCell, io::Write, + ops::Index, process::ExitStatus as StdExitStatus, + rc::Rc, sync::{Arc, LazyLock}, time::Duration, }; +use bstr::BString; use owo_colors::{Style, Styled}; -use rustc_hash::FxHashSet; +use smallvec::SmallVec; use vite_path::AbsolutePath; use vite_str::Str; +use vite_task_plan::{ + ExecutionGraph, ExecutionItem, ExecutionItemDisplay, ExecutionItemKind, + execution_graph::ExecutionNodeIndex, +}; use super::{ cache::{format_cache_status_inline, format_cache_status_summary}, - event::{ - CacheStatus, ExecutionEvent, ExecutionEventKind, ExecutionId, ExecutionItemDisplay, - exit_status_to_code, - }, + event::{CacheStatus, CacheUpdateStatus, OutputKind, exit_status_to_code}, }; +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Exit status type +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// Exit status code for task execution. +/// +/// Wraps a `u8` exit code. `0` means success, non-zero means failure. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct ExitStatus(pub u8); + +impl ExitStatus { + pub const FAILURE: Self = Self(1); + pub const SUCCESS: Self = Self(0); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Leaf execution path — identifies a leaf within a (potentially nested) execution graph +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// One step in a [`LeafExecutionPath`]: identifies a specific execution item +/// within a single level of the execution graph. +#[derive(Clone, Copy, Debug)] +struct ExecutionPathItem { + /// The node (task) index in the execution graph at this level. + graph_node_ix: ExecutionNodeIndex, + /// The item index within the task's `items` vector. + task_execution_item_index: usize, +} + +/// Indexing a graph by a single `ExecutionPathItem` yields the [`ExecutionItem`] +/// at that position. +impl Index for ExecutionGraph { + type Output = ExecutionItem; + + fn index(&self, index: ExecutionPathItem) -> &Self::Output { + &self[index.graph_node_ix].items[index.task_execution_item_index] + } +} + +/// A path through a (potentially nested) execution graph that identifies a specific +/// leaf execution. +/// +/// Each element in the path represents a step deeper into a nested `Expanded` execution +/// graph. The last element identifies the actual leaf item. +/// +/// For example, a path of `[(node_0, item_1), (node_2, item_0)]` means: +/// - In the root graph, node 0, item 1 (which is an `Expanded` containing a nested graph) +/// - In that nested graph, node 2, item 0 (the actual leaf execution) +/// +/// Uses `SmallVec` with inline capacity of 4 since most execution graphs are shallow +/// (typically 1-2 levels of nesting). +#[derive(Clone, Debug, Default)] +pub struct LeafExecutionPath(SmallVec); + +impl LeafExecutionPath { + /// Append a new step to this path, identifying an item at the given node and item indices. + pub fn push(&mut self, graph_node_ix: ExecutionNodeIndex, task_execution_item_index: usize) { + self.0.push(ExecutionPathItem { graph_node_ix, task_execution_item_index }); + } + + /// Look up the [`ExecutionItemDisplay`] for the leaf identified by this path, + /// traversing through nested `Expanded` graphs as needed. + /// + /// Returns `None` if the path is empty. + /// + /// # Panics + /// + /// Panics if an intermediate path element does not point to an `Expanded` item, + /// which indicates a bug in path construction. + fn resolve_display<'a>( + &self, + root_graph: &'a ExecutionGraph, + ) -> Option<&'a ExecutionItemDisplay> { + let mut current_graph = root_graph; + for (depth, path_item) in self.0.iter().enumerate() { + let item = ¤t_graph[*path_item]; + let is_last = depth == self.0.len() - 1; + if is_last { + // Last element — return the display info regardless of Leaf/Expanded + return Some(&item.execution_item_display); + } + // Intermediate element — must be Expanded so we can descend into it + match &item.kind { + ExecutionItemKind::Expanded(nested_graph) => { + current_graph = nested_graph; + } + ExecutionItemKind::Leaf(_) => { + // A Leaf in the middle of the path is a bug in path construction. + // This should never happen if the execution engine builds paths correctly. + debug_assert!( + false, + "LeafExecutionPath: intermediate element is a Leaf, expected Expanded" + ); + return None; + } + } + } + // Empty path + None + } +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Typestate traits +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// Builder for creating a [`GraphExecutionReporter`]. +/// +/// This is the initial state before the execution graph is known. The `build` method +/// transitions to the next state by providing the graph. +pub trait GraphExecutionReporterBuilder { + /// Create a [`GraphExecutionReporter`] for the given execution graph. + /// + /// The reporter may clone the `Arc` to retain a reference to the graph + /// for looking up display information during execution. + fn build(self: Box, graph: &Arc) -> Box; +} + +/// Reporter for an entire graph execution session. +/// +/// Creates [`LeafExecutionReporter`] instances for individual leaf executions +/// and finalizes the session with `finish()`. +pub trait GraphExecutionReporter { + /// Create a new leaf execution reporter for the leaf identified by `path`. + /// + /// The reporter implementation can look up display info (task name, command, cwd) + /// from the execution graph using the path. + fn new_leaf_execution(&mut self, path: &LeafExecutionPath) -> Box; + + /// Finalize the graph execution session. + /// + /// If `error` is `Some`, a graph-level error occurred (e.g., cycle detection, cache + /// initialization failure). Leaf-level errors are already tracked internally by the + /// reporter via the leaf reporter's `finish()` method. + /// + /// Returns `Ok(())` if all tasks succeeded, or `Err(ExitStatus)` to indicate the + /// process should exit with the given status code. + fn finish(self: Box, error: Option) -> Result<(), ExitStatus>; +} + +/// Reporter for a single leaf execution (one spawned process or in-process command). +/// +/// Lifecycle: `start()` → zero or more `output()` → `finish()`. +/// +/// `start()` may not be called before `finish()` if an error occurs before the cache +/// status is determined (e.g., cache lookup failure). +pub trait LeafExecutionReporter { + /// Report that execution is starting with the given cache status. + /// + /// Called after the cache lookup completes, before any output is produced. + fn start(&mut self, cache_status: CacheStatus); + + /// Report a chunk of output (stdout or stderr) from the executing process. + fn output(&mut self, kind: OutputKind, content: BString); + + /// Finalize this leaf execution. + /// + /// - `status`: The process exit status, or `None` for cache hits and in-process commands. + /// - `cache_update_status`: Whether the cache was updated after execution. + /// - `error`: If `Some`, an error occurred during this leaf's execution (cache lookup + /// failure, spawn failure, fingerprint creation failure, cache update failure). + /// + /// This method consumes the reporter — no further calls are possible after `finish()`. + fn finish( + self: Box, + status: Option, + cache_update_status: CacheUpdateStatus, + error: Option, + ); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Shared display helpers +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + /// Wrap of `OwoColorize` that ignores style if `NO_COLOR` is set. trait ColorizeExt { fn style(&self, style: Style) -> Styled<&Self>; @@ -33,39 +233,158 @@ impl ColorizeExt for T { } } -/// Exit status code for task execution -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct ExitStatus(pub u8); +const COMMAND_STYLE: Style = Style::new().cyan(); +const CACHE_MISS_STYLE: Style = Style::new().purple(); -impl ExitStatus { - pub const FAILURE: Self = Self(1); - pub const SUCCESS: Self = Self(0); +/// Format the display's cwd as a string relative to the workspace root. +/// Returns an empty string if the cwd equals the workspace root. +fn format_cwd_relative(display: &ExecutionItemDisplay, workspace_path: &AbsolutePath) -> Str { + let cwd_relative = if let Ok(Some(rel)) = display.cwd.strip_prefix(workspace_path) { + Str::from(rel.as_str()) + } else { + Str::default() + }; + if cwd_relative.is_empty() { Str::default() } else { vite_str::format!("~/{cwd_relative}") } } -/// Trait for handling execution events and reporting results -pub trait Reporter { - /// Handle an execution event (start, output, error, finish) - fn handle_event(&mut self, event: ExecutionEvent); +/// Format the command string with cwd prefix for display (e.g., `~/packages/lib$ vitest run`). +fn format_command_display(display: &ExecutionItemDisplay, workspace_path: &AbsolutePath) -> Str { + let cwd_str = format_cwd_relative(display, workspace_path); + vite_str::format!("{cwd_str}$ {}", display.command) +} - /// Called after execution completes (whether successful or not) - /// Returns Ok(()) on success, or Err(ExitStatus) on failure - fn post_execution(self: Box) -> Result<(), ExitStatus>; +/// Write the command line with optional inline cache status to the writer. +/// +/// This is called during `start()` to show the user what command is being executed +/// and its cache status. +fn write_command_with_cache_status( + writer: &mut impl Write, + display: &ExecutionItemDisplay, + workspace_path: &AbsolutePath, + cache_status: &CacheStatus, +) { + let command_str = format_command_display(display, workspace_path); + if let Some(inline_status) = format_cache_status_inline(cache_status) { + // Apply styling based on cache status type + let styled_status = match cache_status { + CacheStatus::Hit { .. } => inline_status.style(Style::new().green().dimmed()), + CacheStatus::Miss(_) => inline_status.style(CACHE_MISS_STYLE.dimmed()), + CacheStatus::Disabled(_) => inline_status.style(Style::new().bright_black()), + }; + let _ = writeln!(writer, "{} {}", command_str.style(COMMAND_STYLE), styled_status); + } else { + let _ = writeln!(writer, "{}", command_str.style(COMMAND_STYLE)); + } } -const COMMAND_STYLE: Style = Style::new().cyan(); -const CACHE_MISS_STYLE: Style = Style::new().purple(); +/// Write an error message in red with an error icon. +fn write_error_message(writer: &mut impl Write, message: &str) { + let _ = writeln!( + writer, + "{} {}", + "✗".style(Style::new().red().bold()), + message.style(Style::new().red()) + ); +} + +/// Write the "cache hit, logs replayed" message for synthetic executions without display info. +fn write_cache_hit_message(writer: &mut impl Write) { + let _ = + writeln!(writer, "{}", "✓ cache hit, logs replayed".style(Style::new().green().dimmed())); +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// PlainReporter — standalone LeafExecutionReporter for single-leaf execution +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// A self-contained [`LeafExecutionReporter`] for single-leaf executions +/// (e.g., `execute_synthetic`). +/// +/// This reporter: +/// - Owns its writer directly (no `Rc` shared state) +/// - Has no display info (synthetic executions have no task display) +/// - Does not track stats or print summaries +/// - Supports `silent_if_cache_hit` to suppress output for cached executions +/// +/// The exit status is determined by the caller from the `execute_spawn` return value, +/// not from the reporter. +pub struct PlainReporter { + writer: W, + /// When true, suppresses all output (command line, process output, cache hit message) + /// for executions that are cache hits. + silent_if_cache_hit: bool, + /// Whether the current execution is a cache hit, set by `start()`. + is_cache_hit: bool, +} -/// Information tracked for each execution +impl PlainReporter { + /// Create a new plain reporter. + /// + /// - `writer`: The output stream (typically `std::io::stdout()`). + /// - `silent_if_cache_hit`: If true, suppress all output when the execution is a cache hit. + pub const fn new(writer: W, silent_if_cache_hit: bool) -> Self { + Self { writer, silent_if_cache_hit, is_cache_hit: false } + } + + /// Returns true if output should be suppressed for this execution. + const fn is_silent(&self) -> bool { + self.silent_if_cache_hit && self.is_cache_hit + } +} + +impl LeafExecutionReporter for PlainReporter { + fn start(&mut self, cache_status: CacheStatus) { + self.is_cache_hit = matches!(cache_status, CacheStatus::Hit { .. }); + // PlainReporter has no display info (synthetic executions), + // so there's no command line to print at start. + } + + fn output(&mut self, _kind: OutputKind, content: BString) { + if self.is_silent() { + return; + } + let _ = self.writer.write_all(&content); + let _ = self.writer.flush(); + } + + fn finish( + mut self: Box, + _status: Option, + _cache_update_status: CacheUpdateStatus, + error: Option, + ) { + // Handle errors — print inline error message + if let Some(ref message) = error { + write_error_message(&mut self.writer, message); + return; + } + + // For cache hits, print the "cache hit" message (unless silent) + if self.is_cache_hit && !self.is_silent() { + write_cache_hit_message(&mut self.writer); + } + } +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// LabeledReporter family — graph-aware reporter with aggregation and summary +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// Information tracked for each leaf execution, used in the final summary. #[derive(Debug)] struct ExecutionInfo { + /// Display info for this execution. `None` for displayless executions + /// (e.g., synthetics reached via nested expansion). display: Option, - cache_status: CacheStatus, // Non-optional, determined at Start - /// Exit status from the process. None means no process was spawned (cache hit or in-process). + /// Cache status, determined at `start()`. + cache_status: CacheStatus, + /// Exit status from the process. `None` means no process was spawned (cache hit or in-process). exit_status: Option, + /// Error message, set on error. error_message: Option, } -/// Statistics for the execution summary +/// Running statistics updated as leaf executions complete. #[derive(Default)] struct ExecutionStats { cache_hits: usize, @@ -74,523 +393,494 @@ struct ExecutionStats { failed: usize, } -/// Event handler that renders execution events in labeled format. +/// Mutable state shared between [`LabeledGraphReporter`] and its [`LabeledLeafReporter`] instances +/// via `Rc>`. /// -/// # Output Modes +/// This is safe because execution is single-threaded and sequential — only one leaf +/// reporter is active at a time. +struct SharedReporterState { + writer: W, + executions: Vec, + stats: ExecutionStats, + /// The first error encountered during execution. Used to print the abort banner. + first_error: Option, +} + +/// Builder for the labeled graph reporter. /// -/// The reporter has different output modes based on configuration and execution context: +/// Created by the caller before execution, then transitioned to [`LabeledGraphReporter`] +/// by calling `build()` with the execution graph. +/// +/// # Output Modes /// /// ## Normal Mode (default) /// - Prints command lines with cache status indicators during execution /// - Shows full summary with Statistics and Task Details at the end /// -/// ## Silent Cache Hit Mode (`silent_if_cache_hit = true`) -/// - Suppresses command lines and output for cache hit executions -/// - Useful for faster, cleaner output when many tasks are cached -/// -/// ## Hidden Summary Mode (`hide_summary = true`) -/// - Skips printing the execution summary entirely -/// - Useful for programmatic usage or when summary is not needed -/// /// ## Simplified Summary for Single Tasks -/// - When a single task is executed: +/// - When a single task with display info is executed: /// - Skips full summary (no Statistics/Task Details sections) -/// - Shows only cache status (except for "`NotFound`" which is hidden for clean first-run output) +/// - Shows only cache status inline /// - Results in clean output showing just the command's stdout/stderr -pub struct LabeledReporter { +pub struct LabeledReporterBuilder { writer: W, workspace_path: Arc, - executions: Vec, - stats: ExecutionStats, - first_error: Option, +} - /// When true, suppresses command line and output for cache hit executions - silent_if_cache_hit: bool, +impl LabeledReporterBuilder { + /// Create a new labeled reporter builder. + /// + /// - `writer`: The output stream (typically `std::io::stdout()`). + /// - `workspace_path`: The workspace root, used to compute relative cwds in display. + pub const fn new(writer: W, workspace_path: Arc) -> Self { + Self { writer, workspace_path } + } +} - /// When true, skips printing the execution summary at the end - hide_summary: bool, +impl GraphExecutionReporterBuilder for LabeledReporterBuilder { + fn build(self: Box, graph: &Arc) -> Box { + Box::new(LabeledGraphReporter { + shared: Rc::new(RefCell::new(SharedReporterState { + writer: self.writer, + executions: Vec::new(), + stats: ExecutionStats::default(), + first_error: None, + })), + graph: Arc::clone(graph), + workspace_path: self.workspace_path, + }) + } +} - /// Tracks which executions are cache hits (for `silent_if_cache_hit` mode) - cache_hit_executions: FxHashSet, +/// Graph-level reporter that tracks multiple leaf executions and prints a summary. +/// +/// Creates [`LabeledLeafReporter`] instances for each leaf execution. The leaf reporters +/// share mutable state with this reporter via `Rc>`. +pub struct LabeledGraphReporter { + shared: Rc>>, + graph: Arc, + workspace_path: Arc, } -impl LabeledReporter { - pub fn new(writer: W, workspace_path: Arc) -> Self { - Self { - writer, - workspace_path, - executions: Vec::new(), - stats: ExecutionStats::default(), - first_error: None, - silent_if_cache_hit: false, - hide_summary: false, - cache_hit_executions: FxHashSet::default(), - } +impl GraphExecutionReporter for LabeledGraphReporter { + fn new_leaf_execution(&mut self, path: &LeafExecutionPath) -> Box { + // Look up display info from the graph using the path + let display = path.resolve_display(&self.graph).cloned(); + Box::new(LabeledLeafReporter { + shared: Rc::clone(&self.shared), + display, + workspace_path: Arc::clone(&self.workspace_path), + started: false, + is_cache_hit: false, + }) } - /// Set the `silent_if_cache_hit` option - pub const fn set_silent_if_cache_hit(&mut self, silent_if_cache_hit: bool) { - self.silent_if_cache_hit = silent_if_cache_hit; - } + fn finish(self: Box, error: Option) -> Result<(), ExitStatus> { + let mut shared = self.shared.borrow_mut(); - /// Set the `hide_summary` option - pub const fn set_hide_summary(&mut self, hide_summary: bool) { - self.hide_summary = hide_summary; - } + // If a graph-level error was passed in, store it as first_error + // (only if no leaf error was already recorded — leaf errors take precedence + // since they occurred first chronologically) + if let Some(ref error_msg) = error + && shared.first_error.is_none() + { + shared.first_error = Some(error_msg.clone()); + } - fn handle_start( - &mut self, - execution_id: ExecutionId, - display: Option, - cache_status: CacheStatus, - ) { - // Update statistics immediately based on cache status - match &cache_status { - CacheStatus::Hit { .. } => { - self.stats.cache_hits += 1; - // Track cache hit executions for silent mode - if self.silent_if_cache_hit { - self.cache_hit_executions.insert(execution_id); - } - } - CacheStatus::Miss(_) => self.stats.cache_misses += 1, - CacheStatus::Disabled(_) => self.stats.cache_disabled += 1, + // Check if execution was aborted due to error (either graph-level or leaf-level). + // Clone the error message before using the writer to avoid borrow conflicts. + if let Some(error_msg) = shared.first_error.clone() { + // Print error abort banner + let _ = writeln!(shared.writer); + let _ = writeln!( + shared.writer, + "{}", + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + .style(Style::new().bright_black()) + ); + let _ = writeln!( + shared.writer, + "{} {}", + "Execution aborted due to error:".style(Style::new().red().bold()), + error_msg.style(Style::new().red()) + ); + let _ = writeln!( + shared.writer, + "{}", + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + .style(Style::new().bright_black()) + ); + + return Err(ExitStatus::FAILURE); } - // Handle None display case - direct synthetic execution (e.g., via plan_exec) - // Don't print cache status here - will be printed at finish for cache hits only - let Some(display) = display else { - self.executions.push(ExecutionInfo { - display: None, - cache_status, - exit_status: None, - error_message: None, - }); - return; - }; + // No errors — print summary. + // Special case: single execution without display info (e.g., synthetic via nested expansion) + // → skip summary since there's nothing meaningful to show. + let is_single_displayless = + shared.executions.len() == 1 && shared.executions[0].display.is_none(); + if !is_single_displayless { + // Destructure to get simultaneous mutable access to writer and immutable + // access to executions/stats, satisfying the borrow checker. + let SharedReporterState { ref mut writer, ref executions, ref stats, .. } = *shared; + print_summary(writer, executions, stats, &self.workspace_path); + } - // Compute cwd relative to workspace root - let cwd_relative = if let Ok(Some(rel)) = display.cwd.strip_prefix(&self.workspace_path) { - Str::from(rel.as_str()) - } else { - Str::default() - }; + // Determine exit code based on failed tasks: + // 1. All tasks succeed → return Ok(()) + // 2. Exactly one task failed → return Err with that task's exit code + // 3. More than one task failed → return Err(1) + // Note: None means success (cache hit or in-process) + let failed_exit_codes: Vec = shared + .executions + .iter() + .filter_map(|exec| exec.exit_status.as_ref()) + .filter(|status| !status.success()) + .map(|status| exit_status_to_code(*status)) + .collect(); - let cwd_str = if cwd_relative.is_empty() { - Str::default() - } else { - vite_str::format!("~/{cwd_relative}") - }; - let command_str = vite_str::format!("{cwd_str}$ {}", display.command); - - // Skip printing if silent_if_cache_hit is enabled and this is a cache hit - let should_print = - !self.silent_if_cache_hit || !matches!(cache_status, CacheStatus::Hit { .. }); - - if should_print { - // Print command with optional inline cache status - // Use display module for plain text, apply styling here - if let Some(inline_status) = format_cache_status_inline(&cache_status) { - // Apply styling based on cache status type - let styled_status = match &cache_status { - CacheStatus::Hit { .. } => inline_status.style(Style::new().green().dimmed()), - CacheStatus::Miss(_) => inline_status.style(CACHE_MISS_STYLE.dimmed()), - CacheStatus::Disabled(_) => inline_status.style(Style::new().bright_black()), - }; - let _ = - writeln!(self.writer, "{} {}", command_str.style(COMMAND_STYLE), styled_status); - } else { - let _ = writeln!(self.writer, "{}", command_str.style(COMMAND_STYLE)); + match failed_exit_codes.as_slice() { + [] => Ok(()), + [code] => { + // Return the single failed task's exit code (clamped to u8 range) + #[expect( + clippy::cast_sign_loss, + reason = "value is clamped to 1..=255, always positive" + )] + Err(ExitStatus((*code).clamp(1, 255) as u8)) } + _ => Err(ExitStatus::FAILURE), } + } +} + +/// Leaf-level reporter created by [`LabeledGraphReporter::new_leaf_execution`]. +/// +/// Writes output in real-time to the shared writer and updates shared stats/errors +/// via `Rc>`. +struct LabeledLeafReporter { + shared: Rc>>, + /// Display info for this execution, looked up from the graph via the path. + display: Option, + workspace_path: Arc, + /// Whether `start()` has been called. Used to determine if stats should be updated + /// in `finish()` and whether to push an `ExecutionInfo` entry. + started: bool, + /// Whether the current execution is a cache hit, set by `start()`. + is_cache_hit: bool, +} + +impl LeafExecutionReporter for LabeledLeafReporter { + fn start(&mut self, cache_status: CacheStatus) { + self.started = true; + self.is_cache_hit = matches!(cache_status, CacheStatus::Hit { .. }); - // Store execution info for summary - self.executions.push(ExecutionInfo { - display: Some(display), + let mut shared = self.shared.borrow_mut(); + + // Update statistics based on cache status + match &cache_status { + CacheStatus::Hit { .. } => shared.stats.cache_hits += 1, + CacheStatus::Miss(_) => shared.stats.cache_misses += 1, + CacheStatus::Disabled(_) => shared.stats.cache_disabled += 1, + } + + // Print command line with cache status (if display info is available) + if let Some(ref display) = self.display { + write_command_with_cache_status( + &mut shared.writer, + display, + &self.workspace_path, + &cache_status, + ); + } + + // Store execution info for the summary + shared.executions.push(ExecutionInfo { + display: self.display.clone(), cache_status, exit_status: None, error_message: None, }); } - fn handle_error(&mut self, _execution_id: ExecutionId, message: Str) { - // Display error inline (in red, with error icon) - let _ = writeln!( - self.writer, - "{} {}", - "✗".style(Style::new().red().bold()), - message.style(Style::new().red()) - ); + fn output(&mut self, _kind: OutputKind, content: BString) { + let mut shared = self.shared.borrow_mut(); + let _ = shared.writer.write_all(&content); + let _ = shared.writer.flush(); + } - // Track first error - if self.first_error.is_none() { - self.first_error = Some(message.clone()); - } + fn finish( + self: Box, + status: Option, + _cache_update_status: CacheUpdateStatus, + error: Option, + ) { + let mut shared = self.shared.borrow_mut(); - // Track error for summary - if let Some(exec) = self.executions.last_mut() { - exec.error_message = Some(message); - } + // Handle errors + if let Some(ref message) = error { + write_error_message(&mut shared.writer, message); - self.stats.failed += 1; - } + // Track first error for the abort banner + if shared.first_error.is_none() { + shared.first_error = Some(message.clone()); + } + + // Update the execution info if start() was called (an entry was pushed). + // Without the `self.started` guard, `last_mut()` would return a + // *different* execution's entry, corrupting its error_message. + if self.started + && let Some(exec) = shared.executions.last_mut() + { + exec.error_message = Some(message.clone()); + } + + shared.stats.failed += 1; + } - fn handle_finish(&mut self, execution_id: ExecutionId, status: Option) { - // Update failure statistics + // Update failure statistics for non-zero exit status (not an error, just a failed task) // None means success (cache hit or in-process), Some checks the actual exit status - if status.is_some_and(|s| !s.success()) { - self.stats.failed += 1; + if error.is_none() && status.is_some_and(|s| !s.success()) { + shared.stats.failed += 1; } - // Update execution info exit status - if let Some(exec) = self.executions.last_mut() { + // Update execution info with exit status (if start() was called and an entry exists) + if self.started + && let Some(exec) = shared.executions.last_mut() + { exec.exit_status = status; } - // For direct synthetic execution with cache hit, print message at the bottom - if let Some(exec) = self.executions.last() - && exec.display.is_none() - && matches!(exec.cache_status, CacheStatus::Hit { .. }) - { - let should_print = - !self.silent_if_cache_hit || !self.cache_hit_executions.contains(&execution_id); - if should_print { - let _ = writeln!( - self.writer, - "{}", - "✓ cache hit, logs replayed".style(Style::new().green().dimmed()) - ); - } + // For executions without display info (synthetics via nested expansion) that are + // cache hits, print the cache hit message + if self.started && self.display.is_none() && self.is_cache_hit { + write_cache_hit_message(&mut shared.writer); } - // Add a line break after each task's output for better readability - // Skip if silent_if_cache_hit is enabled and this execution is a cache hit - if !self.silent_if_cache_hit || !self.cache_hit_executions.contains(&execution_id) { - let _ = writeln!(self.writer); + // Add a trailing newline after each task's output for readability. + // Skip if start() was never called (e.g. cache lookup failure) — there's + // no task output to separate. + if self.started { + let _ = writeln!(shared.writer); } } +} - /// Print execution summary after all events - #[expect( - clippy::too_many_lines, - reason = "summary formatting is inherently verbose with many write calls" - )] - pub fn print_summary(&mut self) { - let total = self.executions.len(); - let cache_hits = self.stats.cache_hits; - let cache_misses = self.stats.cache_misses; - let cache_disabled = self.stats.cache_disabled; - let failed = self.stats.failed; - - // Print summary header with decorative line - // Note: handle_finish already adds a trailing newline after each task's output - // Add an extra blank line before the summary for visual separation - let _ = writeln!(self.writer); - let _ = writeln!( - self.writer, - "{}", - "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) - ); - let _ = writeln!( - self.writer, - "{}", - " Vite+ Task Runner • Execution Summary".style(Style::new().bold().bright_white()) - ); - let _ = writeln!( - self.writer, - "{}", - "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) - ); - let _ = writeln!(self.writer); +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Summary printing +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - // Print statistics - let cache_disabled_str = if cache_disabled > 0 { - Str::from( - vite_str::format!("• {cache_disabled} cache disabled") - .style(Style::new().bright_black()) - .to_string(), - ) - } else { - Str::default() - }; - - let failed_str = if failed > 0 { - Str::from(vite_str::format!("• {failed} failed").style(Style::new().red()).to_string()) +/// Print the full execution summary with statistics, performance, and per-task details. +/// +/// This is called by [`LabeledGraphReporter::finish`] when there are no errors +/// and the summary should be displayed. +#[expect( + clippy::too_many_lines, + reason = "summary formatting is inherently verbose with many write calls" +)] +fn print_summary( + writer: &mut impl Write, + executions: &[ExecutionInfo], + stats: &ExecutionStats, + workspace_path: &AbsolutePath, +) { + let total = executions.len(); + let cache_hits = stats.cache_hits; + let cache_misses = stats.cache_misses; + let cache_disabled = stats.cache_disabled; + let failed = stats.failed; + + // Print summary header with decorative line + // Note: leaf finish already adds a trailing newline after each task's output + // Add an extra blank line before the summary for visual separation + let _ = writeln!(writer); + let _ = writeln!( + writer, + "{}", + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) + ); + let _ = writeln!( + writer, + "{}", + " Vite+ Task Runner • Execution Summary".style(Style::new().bold().bright_white()) + ); + let _ = writeln!( + writer, + "{}", + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) + ); + let _ = writeln!(writer); + + // Print statistics + let cache_disabled_str = if cache_disabled > 0 { + Str::from( + vite_str::format!("• {cache_disabled} cache disabled") + .style(Style::new().bright_black()) + .to_string(), + ) + } else { + Str::default() + }; + + let failed_str = if failed > 0 { + Str::from(vite_str::format!("• {failed} failed").style(Style::new().red()).to_string()) + } else { + Str::default() + }; + + // Build statistics line, only including non-empty parts + // Note: trailing space after "cache misses" is intentional for consistent formatting + let _ = write!( + writer, + "{} {} {} {} ", + "Statistics:".style(Style::new().bold()), + vite_str::format!(" {total} tasks").style(Style::new().bright_white()), + vite_str::format!("• {cache_hits} cache hits").style(Style::new().green()), + vite_str::format!("• {cache_misses} cache misses").style(CACHE_MISS_STYLE), + ); + if !cache_disabled_str.is_empty() { + let _ = write!(writer, "{cache_disabled_str} "); + } + if !failed_str.is_empty() { + let _ = write!(writer, "{failed_str} "); + } + let _ = writeln!(writer); + + // Calculate cache hit rate + let cache_rate = if total > 0 { + #[expect( + clippy::cast_possible_truncation, + reason = "percentage is always 0..=100, fits in u32" + )] + #[expect(clippy::cast_sign_loss, reason = "percentage is always non-negative")] + #[expect( + clippy::cast_precision_loss, + reason = "acceptable precision loss for display percentage" + )] + { + (f64::from(cache_hits as u32) / total as f64 * 100.0) as u32 + } + } else { + 0 + }; + + // Calculate total time saved from cache hits + let total_saved: Duration = executions + .iter() + .filter_map(|exec| { + if let CacheStatus::Hit { replayed_duration } = &exec.cache_status { + Some(*replayed_duration) + } else { + None + } + }) + .sum(); + + let _ = write!( + writer, + "{} {} cache hit rate", + "Performance:".style(Style::new().bold()), + format_args!("{cache_rate}%").style(if cache_rate >= 75 { + Style::new().green().bold() + } else if cache_rate >= 50 { + CACHE_MISS_STYLE } else { - Str::default() - }; + Style::new().red() + }) + ); - // Build statistics line, only including non-empty parts - // Note: trailing space after "cache misses" is intentional for consistent formatting + if total_saved > Duration::ZERO { let _ = write!( - self.writer, - "{} {} {} {} ", - "Statistics:".style(Style::new().bold()), - vite_str::format!(" {total} tasks").style(Style::new().bright_white()), - vite_str::format!("• {cache_hits} cache hits").style(Style::new().green()), - vite_str::format!("• {cache_misses} cache misses").style(CACHE_MISS_STYLE), + writer, + ", {:.2?} saved in total", + total_saved.style(Style::new().green().bold()) ); - if !cache_disabled_str.is_empty() { - let _ = write!(self.writer, "{cache_disabled_str} "); - } - if !failed_str.is_empty() { - let _ = write!(self.writer, "{failed_str} "); - } - let _ = writeln!(self.writer); - - // Calculate cache hit rate - let cache_rate = if total > 0 { - #[expect( - clippy::cast_possible_truncation, - reason = "percentage is always 0..=100, fits in u32" - )] - #[expect(clippy::cast_sign_loss, reason = "percentage is always non-negative")] - #[expect( - clippy::cast_precision_loss, - reason = "acceptable precision loss for display percentage" - )] - { - (f64::from(cache_hits as u32) / total as f64 * 100.0) as u32 - } - } else { - 0 + } + let _ = writeln!(writer); + let _ = writeln!(writer); + + // Detailed task results + let _ = writeln!(writer, "{}", "Task Details:".style(Style::new().bold())); + let _ = writeln!( + writer, + "{}", + "────────────────────────────────────────────────".style(Style::new().bright_black()) + ); + + for (idx, exec) in executions.iter().enumerate() { + // Skip executions without display info (they have nothing to show in the summary) + let Some(ref display) = exec.display else { + continue; }; - // Calculate total time saved - let total_saved: Duration = self - .executions - .iter() - .filter_map(|exec| { - if let CacheStatus::Hit { replayed_duration } = &exec.cache_status { - Some(*replayed_duration) - } else { - None - } - }) - .sum(); + let task_display = &display.task_display; + // Task name and index let _ = write!( - self.writer, - "{} {} cache hit rate", - "Performance:".style(Style::new().bold()), - format_args!("{cache_rate}%").style(if cache_rate >= 75 { - Style::new().green().bold() - } else if cache_rate >= 50 { - CACHE_MISS_STYLE - } else { - Style::new().red() - }) - ); - - if total_saved > Duration::ZERO { - let _ = write!( - self.writer, - ", {:.2?} saved in total", - total_saved.style(Style::new().green().bold()) - ); - } - let _ = writeln!(self.writer); - let _ = writeln!(self.writer); - - // Detailed task results - let _ = writeln!(self.writer, "{}", "Task Details:".style(Style::new().bold())); - let _ = writeln!( - self.writer, - "{}", - "────────────────────────────────────────────────".style(Style::new().bright_black()) + writer, + " {} {}", + vite_str::format!("[{}]", idx + 1).style(Style::new().bright_black()), + task_display.to_string().style(Style::new().bright_white().bold()) ); - for (idx, exec) in self.executions.iter().enumerate() { - // Skip if no display info - let Some(ref display) = exec.display else { - continue; - }; - - let task_display = &display.task_display; - - // Task name and index - let _ = write!( - self.writer, - " {} {}", - vite_str::format!("[{}]", idx + 1).style(Style::new().bright_black()), - task_display.to_string().style(Style::new().bright_white().bold()) - ); + // Command with cwd prefix + let command_display = format_command_display(display, workspace_path); + let _ = write!(writer, ": {}", command_display.style(COMMAND_STYLE)); - // Command with cwd prefix - let cwd_relative = if let Ok(Some(rel)) = display.cwd.strip_prefix(&self.workspace_path) - { - Str::from(rel.as_str()) - } else { - Str::default() - }; - let cwd_str = if cwd_relative.is_empty() { - Str::default() - } else { - vite_str::format!("~/{cwd_relative}") - }; - let command_display = vite_str::format!("{cwd_str}$ {}", display.command); - let _ = write!(self.writer, ": {}", command_display.style(COMMAND_STYLE)); - - // Execution result icon - // None means success (cache hit or in-process), Some checks actual status - match &exec.exit_status { - None => { - let _ = write!(self.writer, " {}", "✓".style(Style::new().green().bold())); - } - Some(status) if status.success() => { - let _ = write!(self.writer, " {}", "✓".style(Style::new().green().bold())); - } - Some(status) => { - let code = exit_status_to_code(*status); - let _ = write!( - self.writer, - " {} {}", - "✗".style(Style::new().red().bold()), - vite_str::format!("(exit code: {code})").style(Style::new().red()) - ); - } + // Execution result icon + // None means success (cache hit or in-process), Some checks actual status + match &exec.exit_status { + None => { + let _ = write!(writer, " {}", "✓".style(Style::new().green().bold())); } - let _ = writeln!(self.writer); - - // Cache status details - use display module for plain text, apply styling here - let cache_summary = format_cache_status_summary(&exec.cache_status); - let styled_summary = match &exec.cache_status { - CacheStatus::Hit { .. } => cache_summary.style(Style::new().green()), - CacheStatus::Miss(_) => cache_summary.style(CACHE_MISS_STYLE), - CacheStatus::Disabled(_) => cache_summary.style(Style::new().bright_black()), - }; - let _ = writeln!(self.writer, " {styled_summary}"); - - // Error message if present - if let Some(ref error_msg) = exec.error_message { - let _ = writeln!( - self.writer, - " {} {}", - "✗ Error:".style(Style::new().red().bold()), - error_msg.style(Style::new().red()) - ); + Some(exit_status) if exit_status.success() => { + let _ = write!(writer, " {}", "✓".style(Style::new().green().bold())); } - - // Add spacing between tasks except for the last one - if idx < self.executions.len() - 1 { - let _ = writeln!( - self.writer, - " {}", - "·······················································" - .style(Style::new().bright_black()) + Some(exit_status) => { + let code = exit_status_to_code(*exit_status); + let _ = write!( + writer, + " {} {}", + "✗".style(Style::new().red().bold()), + vite_str::format!("(exit code: {code})").style(Style::new().red()) ); } } + let _ = writeln!(writer); + + // Cache status details — use display module for plain text, apply styling here + let cache_summary = format_cache_status_summary(&exec.cache_status); + let styled_summary = match &exec.cache_status { + CacheStatus::Hit { .. } => cache_summary.style(Style::new().green()), + CacheStatus::Miss(_) => cache_summary.style(CACHE_MISS_STYLE), + CacheStatus::Disabled(_) => cache_summary.style(Style::new().bright_black()), + }; + let _ = writeln!(writer, " {styled_summary}"); - let _ = writeln!( - self.writer, - "{}", - "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) - ); - } - - /// Print simplified cache status for single built-in commands - /// - /// Note: Inline cache status is now printed at Start event in `handle_start()`, - /// so this function is a no-op to avoid duplicate output. - #[expect( - clippy::unused_self, - reason = "method signature kept for API consistency with print_summary" - )] - const fn print_simple_cache_status(&self) { - // Inline cache status already printed at Start event - nothing to do here - } -} - -impl Reporter for LabeledReporter { - fn handle_event(&mut self, event: ExecutionEvent) { - match event.kind { - ExecutionEventKind::Start { display, cache_status } => { - self.handle_start(event.execution_id, display, cache_status); - } - ExecutionEventKind::Output { content, .. } => { - // Skip output if silent_if_cache_hit is enabled and this execution is a cache hit - if self.silent_if_cache_hit - && self.cache_hit_executions.contains(&event.execution_id) - { - return; - } - let _ = self.writer.write_all(&content); - let _ = self.writer.flush(); - } - ExecutionEventKind::Error { message } => { - self.handle_error(event.execution_id, message); - } - ExecutionEventKind::Finish { status, cache_update_status: _ } => { - self.handle_finish(event.execution_id, status); - } - } - } - - fn post_execution(mut self: Box) -> Result<(), ExitStatus> { - // Check if execution was aborted due to error - if let Some(error_msg) = &self.first_error { - // Print separator - let _ = writeln!(self.writer); - let _ = writeln!( - self.writer, - "{}", - "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - .style(Style::new().bright_black()) - ); - - // Print error abort message + // Error message if present + if let Some(ref error_msg) = exec.error_message { let _ = writeln!( - self.writer, - "{} {}", - "Execution aborted due to error:".style(Style::new().red().bold()), + writer, + " {} {}", + "✗ Error:".style(Style::new().red().bold()), error_msg.style(Style::new().red()) ); + } + // Add spacing between tasks except for the last one + if idx < executions.len() - 1 { let _ = writeln!( - self.writer, - "{}", - "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + writer, + " {}", + "·······················································" .style(Style::new().bright_black()) ); - - return Err(ExitStatus::FAILURE); - } - - // No errors - print summary if not hidden - if !self.hide_summary { - // Special case: single built-in command (no display info) - if self.executions.len() == 1 && self.executions[0].display.is_none() { - self.print_simple_cache_status(); - } else { - self.print_summary(); - } - } - - // Determine exit code based on failed tasks: - // 1. All tasks succeed → return Ok(()) - // 2. Exactly one task failed → return Err with that task's exit code - // 3. More than one task failed → return Err(1) - // Note: None means success (cache hit or in-process) - let failed_exit_codes: Vec = self - .executions - .iter() - .filter_map(|exec| exec.exit_status.as_ref()) - .filter(|status| !status.success()) - .map(|status| exit_status_to_code(*status)) - .collect(); - - match failed_exit_codes.as_slice() { - [] => Ok(()), - [code] => { - // Return the single failed task's exit code (clamped to u8 range) - #[expect( - clippy::cast_sign_loss, - reason = "value is clamped to 1..=255, always positive" - )] - Err(ExitStatus((*code).clamp(1, 255) as u8)) - } - _ => Err(ExitStatus::FAILURE), } } + + let _ = writeln!( + writer, + "{}", + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) + ); } diff --git a/crates/vite_task_bin/src/main.rs b/crates/vite_task_bin/src/main.rs index 4b9a5250..6c9d41e0 100644 --- a/crates/vite_task_bin/src/main.rs +++ b/crates/vite_task_bin/src/main.rs @@ -21,12 +21,7 @@ async fn run() -> anyhow::Result { let mut owned_callbacks = OwnedSessionCallbacks::default(); let session = Session::init(owned_callbacks.as_callbacks())?; match args { - Args::Task(command) => { - #[expect(clippy::large_futures, reason = "session.main produces a large future")] - { - session.main(command).await - } - } + Args::Task(command) => session.main(command).await, args => { // If env FOO is set, run `print-env FOO` via Session::exec before proceeding. // In vite-plus, Session::exec is used for auto-install. diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap index 5bd8efc1..5099610e 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap @@ -4,7 +4,6 @@ expression: e2e_outputs input_file: crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency --- [1]> vp run task-a # task-a -> task-b -> task-a cycle -✗ Cycle dependencies detected: Cycle(NodeIndex(ExecutionIx(1))) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Execution aborted due to error: Cycle dependencies detected: Cycle(NodeIndex(ExecutionIx(1))) diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/exec-api/snapshots/exec caching.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/exec-api/snapshots/exec caching.snap index c6464766..39e773bc 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/exec-api/snapshots/exec caching.snap +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/exec-api/snapshots/exec caching.snap @@ -5,11 +5,9 @@ input_file: crates/vite_task_bin/tests/e2e_snapshots/fixtures/exec-api --- > FOO=bar vp lint # cache miss bar - Lint { args: [] } > FOO=bar vp lint # cache hit, silent Lint { args: [] } > FOO=baz vp lint # env changed, cache miss baz - Lint { args: [] } diff --git a/crates/vite_task_plan/src/lib.rs b/crates/vite_task_plan/src/lib.rs index 03fc7ea5..f8a6f016 100644 --- a/crates/vite_task_plan/src/lib.rs +++ b/crates/vite_task_plan/src/lib.rs @@ -277,7 +277,11 @@ impl ExecutionPlan { Ok(Self { root_node }) } - /// Plan a synthetic task execution. + /// Plan a synthetic task execution, returning the resolved [`SpawnExecution`] directly. + /// + /// Unlike `plan_query` which returns a full execution graph, synthetic executions + /// are always a single spawned process. The caller can execute it directly using + /// `execute_spawn`. /// /// # Errors /// Returns an error if the program is not found or path fingerprinting fails. @@ -287,16 +291,17 @@ impl ExecutionPlan { cwd: &Arc, synthetic_plan_request: SyntheticPlanRequest, cache_key: Arc<[Str]>, - ) -> Result { + ) -> Result { let execution_cache_key = cache_metadata::ExecutionCacheKey::ExecAPI(cache_key); - let execution = plan_synthetic_request( + plan_synthetic_request( workspace_path, &BTreeMap::default(), synthetic_plan_request, Some(execution_cache_key), cwd, ParentCacheConfig::None, - )?; + ) + .with_empty_call_stack()?; Ok(Self { root_node: ExecutionItemKind::Leaf(LeafExecutionKind::Spawn(execution)) }) } } diff --git a/crates/vite_task_plan/tests/plan_snapshots/main.rs b/crates/vite_task_plan/tests/plan_snapshots/main.rs index f47588e6..78641920 100644 --- a/crates/vite_task_plan/tests/plan_snapshots/main.rs +++ b/crates/vite_task_plan/tests/plan_snapshots/main.rs @@ -180,8 +180,9 @@ fn run_case_inner( panic!("only `run` commands supported in plan tests") }; - let plan_result = - session.plan_from_cli(workspace_root.path.join(plan.cwd).into(), run_command).await; + let plan_result = session + .plan_from_cli_run(workspace_root.path.join(plan.cwd).into(), run_command) + .await; let plan = match plan_result { Ok(graph) => ExecutionPlan::from_execution_graph(graph), From ae24b5240528a44cb1a8cbc483da09e5dbdd83b0 Mon Sep 17 00:00:00 2001 From: branchseer Date: Sat, 14 Feb 2026 11:08:55 +0800 Subject: [PATCH 03/11] feat: add stdin suggestion to reporter for spawned tasks Add StdinSuggestion enum to LeafExecutionReporter trait so reporters can suggest whether child processes should inherit stdin or use /dev/null. PlainReporter always suggests inherited stdin (single synthetic execution). LabeledReporter suggests inherited only when the graph has exactly one spawn leaf (computed via count_spawn_leaves at build time). The actual stdin is inherited only when BOTH conditions hold: - Reporter suggests Inherited - Task has no cache metadata (caching disabled) This prevents non-deterministic user input from corrupting cached output while allowing interactive stdin for single uncached tasks. Includes 15 unit tests for stdin suggestion logic and 3 E2E snapshot tests verifying stdin inheritance/null behavior end-to-end. --- crates/vite_task/src/session/execute/mod.rs | 17 +- crates/vite_task/src/session/execute/spawn.rs | 4 +- crates/vite_task/src/session/reporter.rs | 331 +++++++++++++++++- .../fixtures/stdin-inheritance/package.json | 4 + .../packages/other/package.json | 6 + .../stdin-inheritance/pnpm-workspace.yaml | 2 + .../fixtures/stdin-inheritance/snapshots.toml | 31 ++ .../multiple tasks get null stdin.snap | 25 ++ .../single task no cache inherits stdin.snap | 21 ++ ...ingle task with cache gets null stdin.snap | 20 ++ .../fixtures/stdin-inheritance/vite-task.json | 13 + crates/vite_task_plan/src/in_process.rs | 1 + crates/vite_task_plan/src/lib.rs | 2 +- 13 files changed, 472 insertions(+), 5 deletions(-) create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/package.json create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/packages/other/package.json create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/pnpm-workspace.yaml create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots.toml create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task no cache inherits stdin.snap create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task with cache gets null stdin.snap create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/vite-task.json diff --git a/crates/vite_task/src/session/execute/mod.rs b/crates/vite_task/src/session/execute/mod.rs index 1505d336..84d3eeeb 100644 --- a/crates/vite_task/src/session/execute/mod.rs +++ b/crates/vite_task/src/session/execute/mod.rs @@ -1,7 +1,7 @@ pub mod fingerprint; pub mod spawn; -use std::sync::Arc; +use std::{process::Stdio, sync::Arc}; use futures_util::FutureExt; use petgraph::{algo::toposort, stable_graph::StableGraph}; @@ -23,7 +23,7 @@ use super::{ }, reporter::{ ExitStatus, GraphExecutionReporter, GraphExecutionReporterBuilder, LeafExecutionPath, - LeafExecutionReporter, + LeafExecutionReporter, StdinSuggestion, }, }; use crate::{Session, session::execute::spawn::SpawnTrackResult}; @@ -252,6 +252,18 @@ pub async fn execute_spawn( let mut track_result_with_cache_metadata = cache_metadata.map(|cache_metadata| (SpawnTrackResult::default(), cache_metadata)); + // Determine the child process's stdin mode based on: + // - The reporter's suggestion (inherited only when appropriate, e.g., single task) + // - Whether caching is disabled (inherited stdin would make output non-deterministic, + // breaking cache semantics) + let stdin = if leaf_reporter.stdin_suggestion() == StdinSuggestion::Inherited + && cache_metadata.is_none() + { + Stdio::inherit() + } else { + Stdio::null() + }; + // Execute command with tracking, streaming output in real-time via the reporter #[expect( clippy::large_futures, @@ -260,6 +272,7 @@ pub async fn execute_spawn( let result = match spawn_with_tracking( &spawn_execution.spawn_command, cache_base_path, + stdin, |kind, content| { leaf_reporter.output( match kind { diff --git a/crates/vite_task/src/session/execute/spawn.rs b/crates/vite_task/src/session/execute/spawn.rs index 836de6db..68cd6558 100644 --- a/crates/vite_task/src/session/execute/spawn.rs +++ b/crates/vite_task/src/session/execute/spawn.rs @@ -62,6 +62,7 @@ pub struct SpawnTrackResult { /// Returns the execution result including captured outputs, exit status, /// and tracked file accesses. /// +/// - `stdin` controls the child process's stdin (typically `Stdio::null()` or `Stdio::inherit()`). /// - `on_output` is called in real-time as stdout/stderr data arrives. /// - `track_result` if provided, will be populated with captured outputs and path accesses for caching. If `None`, tracking is disabled. #[expect( @@ -71,6 +72,7 @@ pub struct SpawnTrackResult { pub async fn spawn_with_tracking( spawn_command: &SpawnCommand, workspace_root: &AbsolutePath, + stdin: Stdio, mut on_output: F, track_result: Option<&mut SpawnTrackResult>, ) -> anyhow::Result @@ -90,7 +92,7 @@ where cmd.args(spawn_command.args.iter().map(vite_str::Str::as_str)); cmd.envs(spawn_command.all_envs.iter()); cmd.current_dir(&*spawn_command.cwd); - cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); + cmd.stdin(stdin).stdout(Stdio::piped()).stderr(Stdio::piped()); let mut tracking_state = if let Some(track_result) = track_result { // track_result is Some. Spawn with tracking enabled diff --git a/crates/vite_task/src/session/reporter.rs b/crates/vite_task/src/session/reporter.rs index 495d5177..6de3f23e 100644 --- a/crates/vite_task/src/session/reporter.rs +++ b/crates/vite_task/src/session/reporter.rs @@ -36,7 +36,7 @@ use smallvec::SmallVec; use vite_path::AbsolutePath; use vite_str::Str; use vite_task_plan::{ - ExecutionGraph, ExecutionItem, ExecutionItemDisplay, ExecutionItemKind, + ExecutionGraph, ExecutionItem, ExecutionItemDisplay, ExecutionItemKind, LeafExecutionKind, execution_graph::ExecutionNodeIndex, }; @@ -60,6 +60,27 @@ impl ExitStatus { pub const SUCCESS: Self = Self(0); } +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Stdin suggestion +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// Suggestion from the reporter about what stdin mode to use for a spawned process. +/// +/// The actual stdin mode is determined by [`execute_spawn`](super::execute::execute_spawn) +/// based on this suggestion AND whether caching is enabled for the task: +/// - `Inherited` is only used when the suggestion is `Inherited` AND caching is disabled. +/// This prevents non-deterministic user input from corrupting cached output. +/// - `Null` is always respected as-is. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum StdinSuggestion { + /// Suggest connecting the child process's stdin to /dev/null (or NUL on Windows). + /// Used when multiple tasks run in sequence and stdin should not be shared. + Null, + /// Suggest inheriting stdin from the parent process, allowing interactive input. + /// Only effective when caching is disabled for the task. + Inherited, +} + // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ // Leaf execution path — identifies a leaf within a (potentially nested) execution graph // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ @@ -192,6 +213,15 @@ pub trait GraphExecutionReporter { /// `start()` may not be called before `finish()` if an error occurs before the cache /// status is determined (e.g., cache lookup failure). pub trait LeafExecutionReporter { + /// Suggest which stdin mode to use for the spawned process. + /// + /// Called by [`execute_spawn`](super::execute::execute_spawn) before spawning to + /// determine the child process's stdin configuration. The final decision also + /// depends on whether caching is enabled — inherited stdin is only used when + /// the suggestion is [`StdinSuggestion::Inherited`] AND the task has no cache + /// metadata (caching disabled). + fn stdin_suggestion(&self) -> StdinSuggestion; + /// Report that execution is starting with the given cache status. /// /// Called after the cache lookup completes, before any output is produced. @@ -333,6 +363,12 @@ impl PlainReporter { } impl LeafExecutionReporter for PlainReporter { + fn stdin_suggestion(&self) -> StdinSuggestion { + // PlainReporter is used for single-leaf synthetic executions (e.g., auto-install). + // Always suggest inherited stdin so the spawned process can be interactive. + StdinSuggestion::Inherited + } + fn start(&mut self, cache_status: CacheStatus) { self.is_cache_hit = matches!(cache_status, CacheStatus::Hit { .. }); // PlainReporter has no display info (synthetic executions), @@ -404,6 +440,27 @@ struct SharedReporterState { stats: ExecutionStats, /// The first error encountered during execution. Used to print the abort banner. first_error: Option, + /// Total number of spawned leaf executions in the graph (including nested `Expanded` + /// subgraphs). Computed once at build time and used to determine the stdin suggestion: + /// inherited stdin is only suggested when there is exactly one spawn leaf. + spawn_leaf_count: usize, +} + +/// Count the total number of spawned leaf executions in an execution graph, +/// recursing into nested `Expanded` subgraphs. +/// +/// In-process executions are not counted because they don't spawn child processes +/// and thus don't need stdin. +fn count_spawn_leaves(graph: &ExecutionGraph) -> usize { + graph + .node_weights() + .flat_map(|task| task.items.iter()) + .map(|item| match &item.kind { + ExecutionItemKind::Leaf(LeafExecutionKind::Spawn(_)) => 1, + ExecutionItemKind::Leaf(LeafExecutionKind::InProcess(_)) => 0, + ExecutionItemKind::Expanded(nested_graph) => count_spawn_leaves(nested_graph), + }) + .sum() } /// Builder for the labeled graph reporter. @@ -439,12 +496,14 @@ impl LabeledReporterBuilder { impl GraphExecutionReporterBuilder for LabeledReporterBuilder { fn build(self: Box, graph: &Arc) -> Box { + let spawn_leaf_count = count_spawn_leaves(graph); Box::new(LabeledGraphReporter { shared: Rc::new(RefCell::new(SharedReporterState { writer: self.writer, executions: Vec::new(), stats: ExecutionStats::default(), first_error: None, + spawn_leaf_count, })), graph: Arc::clone(graph), workspace_path: self.workspace_path, @@ -571,6 +630,18 @@ struct LabeledLeafReporter { } impl LeafExecutionReporter for LabeledLeafReporter { + fn stdin_suggestion(&self) -> StdinSuggestion { + // Only suggest inherited stdin when the graph has exactly one spawned leaf + // execution. With multiple spawned tasks, stdin should not be shared — each + // task gets /dev/null to avoid contention. + let shared = self.shared.borrow(); + if shared.spawn_leaf_count == 1 { + StdinSuggestion::Inherited + } else { + StdinSuggestion::Null + } + } + fn start(&mut self, cache_status: CacheStatus) { self.started = true; self.is_cache_hit = matches!(cache_status, CacheStatus::Hit { .. }); @@ -884,3 +955,261 @@ fn print_summary( "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) ); } + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Tests +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use vite_task_graph::display::TaskDisplay; + use vite_task_plan::{ + ExecutionItem, ExecutionItemDisplay, ExecutionItemKind, InProcessExecution, + LeafExecutionKind, SpawnCommand, SpawnExecution, TaskExecution, + }; + + use super::*; + + /// Create a dummy `AbsolutePath` for test fixtures. + fn test_path() -> Arc { + #[cfg(unix)] + { + Arc::from(AbsolutePath::new("/test").unwrap()) + } + #[cfg(windows)] + { + Arc::from(AbsolutePath::new("C:\\test").unwrap()) + } + } + + /// Create a dummy `TaskDisplay` for test fixtures. + fn test_task_display(name: &str) -> TaskDisplay { + TaskDisplay { + package_name: "pkg".into(), + task_name: name.into(), + package_path: test_path(), + } + } + + /// Create a dummy `ExecutionItemDisplay` for test fixtures. + fn test_display(name: &str) -> ExecutionItemDisplay { + ExecutionItemDisplay { + task_display: test_task_display(name), + command: name.into(), + and_item_index: None, + cwd: test_path(), + } + } + + /// Create a `TaskExecution` with a single spawn leaf. + fn spawn_task(name: &str) -> TaskExecution { + TaskExecution { + task_display: test_task_display(name), + items: vec![ExecutionItem { + execution_item_display: test_display(name), + kind: ExecutionItemKind::Leaf(LeafExecutionKind::Spawn(SpawnExecution { + cache_metadata: None, + spawn_command: SpawnCommand { + program_path: test_path(), + args: Arc::from([]), + all_envs: Arc::new(BTreeMap::new()), + cwd: test_path(), + }, + })), + }], + } + } + + /// Create a `TaskExecution` with a single in-process leaf (echo). + fn in_process_task(name: &str) -> TaskExecution { + TaskExecution { + task_display: test_task_display(name), + items: vec![ExecutionItem { + execution_item_display: test_display(name), + kind: ExecutionItemKind::Leaf(LeafExecutionKind::InProcess( + InProcessExecution::get_builtin_execution( + "echo", + ["hello"].iter(), + &test_path(), + ) + .unwrap(), + )), + }], + } + } + + /// Create a `TaskExecution` with an expanded (nested) subgraph as its item. + fn expanded_task(name: &str, nested_graph: ExecutionGraph) -> TaskExecution { + TaskExecution { + task_display: test_task_display(name), + items: vec![ExecutionItem { + execution_item_display: test_display(name), + kind: ExecutionItemKind::Expanded(nested_graph), + }], + } + } + + // ──────────────────────────────────────────────────────────── + // count_spawn_leaves tests + // ──────────────────────────────────────────────────────────── + + #[test] + fn count_spawn_leaves_empty_graph() { + let graph = ExecutionGraph::default(); + assert_eq!(count_spawn_leaves(&graph), 0); + } + + #[test] + fn count_spawn_leaves_single_spawn() { + let mut graph = ExecutionGraph::default(); + graph.add_node(spawn_task("build")); + assert_eq!(count_spawn_leaves(&graph), 1); + } + + #[test] + fn count_spawn_leaves_multiple_spawns() { + let mut graph = ExecutionGraph::default(); + graph.add_node(spawn_task("build")); + graph.add_node(spawn_task("test")); + graph.add_node(spawn_task("lint")); + assert_eq!(count_spawn_leaves(&graph), 3); + } + + #[test] + fn count_spawn_leaves_in_process_not_counted() { + let mut graph = ExecutionGraph::default(); + graph.add_node(in_process_task("echo")); + assert_eq!(count_spawn_leaves(&graph), 0); + } + + #[test] + fn count_spawn_leaves_mixed_spawn_and_in_process() { + let mut graph = ExecutionGraph::default(); + graph.add_node(spawn_task("build")); + graph.add_node(in_process_task("echo")); + assert_eq!(count_spawn_leaves(&graph), 1); + } + + #[test] + fn count_spawn_leaves_nested_expanded() { + // Build a nested graph containing two spawns + let mut nested = ExecutionGraph::default(); + nested.add_node(spawn_task("nested-build")); + nested.add_node(spawn_task("nested-test")); + + // Outer graph has one expanded item pointing to the nested graph + let mut graph = ExecutionGraph::default(); + graph.add_node(expanded_task("expand", nested)); + assert_eq!(count_spawn_leaves(&graph), 2); + } + + #[test] + fn count_spawn_leaves_nested_with_top_level() { + // Nested graph with one spawn + let mut nested = ExecutionGraph::default(); + nested.add_node(spawn_task("nested-lint")); + + // Top-level graph has one spawn + one expanded + let mut graph = ExecutionGraph::default(); + graph.add_node(spawn_task("build")); + graph.add_node(expanded_task("expand", nested)); + assert_eq!(count_spawn_leaves(&graph), 2); + } + + // ──────────────────────────────────────────────────────────── + // PlainReporter stdin_suggestion tests + // ──────────────────────────────────────────────────────────── + + #[test] + fn plain_reporter_always_suggests_inherited() { + let reporter = PlainReporter::new(Vec::::new(), false); + assert_eq!(reporter.stdin_suggestion(), StdinSuggestion::Inherited); + } + + #[test] + fn plain_reporter_suggests_inherited_even_when_silent() { + let reporter = PlainReporter::new(Vec::::new(), true); + assert_eq!(reporter.stdin_suggestion(), StdinSuggestion::Inherited); + } + + // ──────────────────────────────────────────────────────────── + // LabeledLeafReporter stdin_suggestion tests + // ──────────────────────────────────────────────────────────── + + /// Build a `LabeledGraphReporter` for the given graph and return a leaf reporter + /// for the first node's first item. + fn build_labeled_leaf(graph: ExecutionGraph) -> Box { + let graph_arc = Arc::new(graph); + let builder = Box::new(LabeledReporterBuilder::new(Vec::::new(), test_path())); + let mut reporter = builder.build(&graph_arc); + + // Create a leaf reporter for the first node + let path = LeafExecutionPath::default(); + reporter.new_leaf_execution(&path) + } + + #[test] + fn labeled_reporter_single_spawn_suggests_inherited() { + let mut graph = ExecutionGraph::default(); + graph.add_node(spawn_task("build")); + let leaf = build_labeled_leaf(graph); + assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Inherited); + } + + #[test] + fn labeled_reporter_multiple_spawns_suggests_null() { + let mut graph = ExecutionGraph::default(); + graph.add_node(spawn_task("build")); + graph.add_node(spawn_task("test")); + let leaf = build_labeled_leaf(graph); + assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Null); + } + + #[test] + fn labeled_reporter_single_in_process_suggests_inherited() { + // Zero spawn leaves → spawn_leaf_count == 0, so not == 1 → Null + // This is correct: in-process executions don't spawn child processes, + // so stdin suggestion doesn't apply to them. + let mut graph = ExecutionGraph::default(); + graph.add_node(in_process_task("echo")); + let leaf = build_labeled_leaf(graph); + assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Null); + } + + #[test] + fn labeled_reporter_one_spawn_one_in_process_suggests_inherited() { + // One spawn leaf + one in-process → spawn_leaf_count == 1 → Inherited + let mut graph = ExecutionGraph::default(); + graph.add_node(spawn_task("build")); + graph.add_node(in_process_task("echo")); + let leaf = build_labeled_leaf(graph); + assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Inherited); + } + + #[test] + fn labeled_reporter_nested_single_spawn_suggests_inherited() { + // Nested graph with exactly one spawn + let mut nested = ExecutionGraph::default(); + nested.add_node(spawn_task("nested-build")); + + let mut graph = ExecutionGraph::default(); + graph.add_node(expanded_task("expand", nested)); + let leaf = build_labeled_leaf(graph); + assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Inherited); + } + + #[test] + fn labeled_reporter_nested_multiple_spawns_suggests_null() { + // Nested graph with two spawns + let mut nested = ExecutionGraph::default(); + nested.add_node(spawn_task("nested-a")); + nested.add_node(spawn_task("nested-b")); + + let mut graph = ExecutionGraph::default(); + graph.add_node(expanded_task("expand", nested)); + let leaf = build_labeled_leaf(graph); + assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Null); + } +} diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/package.json b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/package.json new file mode 100644 index 00000000..9d985bfb --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/package.json @@ -0,0 +1,4 @@ +{ + "name": "stdin-inheritance-test", + "private": true +} diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/packages/other/package.json b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/packages/other/package.json new file mode 100644 index 00000000..d25708eb --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/packages/other/package.json @@ -0,0 +1,6 @@ +{ + "name": "other", + "scripts": { + "read-stdin": "cat" + } +} diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/pnpm-workspace.yaml b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/pnpm-workspace.yaml new file mode 100644 index 00000000..924b55f4 --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/pnpm-workspace.yaml @@ -0,0 +1,2 @@ +packages: + - packages/* diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots.toml b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots.toml new file mode 100644 index 00000000..1d25fc3b --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots.toml @@ -0,0 +1,31 @@ +# Tests stdin inheritance behavior for spawned tasks. +# +# Stdin is inherited from the parent process only when BOTH conditions are met: +# 1. The reporter suggests inherited stdin (single spawn leaf in the graph, or synthetic execution) +# 2. Caching is disabled for the task (cache_metadata is None) +# +# Otherwise, stdin is set to /dev/null. + +[[e2e]] +name = "single task no cache inherits stdin" +# Single spawn leaf + cache disabled → stdin is inherited +# The piped "from-stdin" should appear in the task's output +steps = [ + "echo from-stdin | vp run read-stdin", +] + +[[e2e]] +name = "single task with cache gets null stdin" +# Single spawn leaf + cache enabled → stdin is null (protects cache determinism) +# The piped "from-stdin" should NOT appear in the task's output +steps = [ + "echo from-stdin | vp run read-stdin-cached", +] + +[[e2e]] +name = "multiple tasks get null stdin" +# Multiple spawn leaves → stdin is null regardless of cache setting +# The piped "from-stdin" should NOT appear in any task's output +steps = [ + "echo from-stdin | vp run -r read-stdin", +] diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap new file mode 100644 index 00000000..2a5fa83d --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap @@ -0,0 +1,25 @@ +--- +source: crates/vite_task_bin/tests/e2e_snapshots/main.rs +expression: e2e_outputs +--- +> echo from-stdin | vp run -r read-stdin +$ cat ⊘ cache disabled: no cache config + +~/packages/other$ cat + + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + Vite+ Task Runner • Execution Summary +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Statistics: 2 tasks • 0 cache hits • 1 cache misses • 1 cache disabled +Performance: 0% cache hit rate + +Task Details: +──────────────────────────────────────────────── + [1] stdin-inheritance-test#read-stdin: $ cat ✓ + → Cache disabled in task configuration + ······················································· + [2] other#read-stdin: ~/packages/other$ cat ✓ + → Cache miss: no previous cache entry found +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task no cache inherits stdin.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task no cache inherits stdin.snap new file mode 100644 index 00000000..34738fec --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task no cache inherits stdin.snap @@ -0,0 +1,21 @@ +--- +source: crates/vite_task_bin/tests/e2e_snapshots/main.rs +expression: e2e_outputs +--- +> echo from-stdin | vp run read-stdin +$ cat ⊘ cache disabled: no cache config +from-stdin + + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + Vite+ Task Runner • Execution Summary +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Statistics: 1 tasks • 0 cache hits • 0 cache misses • 1 cache disabled +Performance: 0% cache hit rate + +Task Details: +──────────────────────────────────────────────── + [1] stdin-inheritance-test#read-stdin: $ cat ✓ + → Cache disabled in task configuration +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task with cache gets null stdin.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task with cache gets null stdin.snap new file mode 100644 index 00000000..6761095a --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task with cache gets null stdin.snap @@ -0,0 +1,20 @@ +--- +source: crates/vite_task_bin/tests/e2e_snapshots/main.rs +expression: e2e_outputs +--- +> echo from-stdin | vp run read-stdin-cached +$ cat + + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + Vite+ Task Runner • Execution Summary +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Statistics: 1 tasks • 0 cache hits • 1 cache misses +Performance: 0% cache hit rate + +Task Details: +──────────────────────────────────────────────── + [1] stdin-inheritance-test#read-stdin-cached: $ cat ✓ + → Cache miss: no previous cache entry found +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/vite-task.json b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/vite-task.json new file mode 100644 index 00000000..50566bc6 --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/vite-task.json @@ -0,0 +1,13 @@ +{ + "cacheScripts": true, + "tasks": { + "read-stdin": { + "command": "cat", + "cache": false + }, + "read-stdin-cached": { + "command": "cat", + "cache": true + } + } +} diff --git a/crates/vite_task_plan/src/in_process.rs b/crates/vite_task_plan/src/in_process.rs index bea525eb..d5ba3314 100644 --- a/crates/vite_task_plan/src/in_process.rs +++ b/crates/vite_task_plan/src/in_process.rs @@ -20,6 +20,7 @@ pub struct InProcessExecution { impl InProcessExecution { /// Execute the in-process execution and return the output. + #[must_use] pub fn execute(&self) -> InProcessExecutionOutput { match &self.kind { InProcessExecutionKind::Echo { strings, trailing_newline } => { diff --git a/crates/vite_task_plan/src/lib.rs b/crates/vite_task_plan/src/lib.rs index f8a6f016..8de4b5b7 100644 --- a/crates/vite_task_plan/src/lib.rs +++ b/crates/vite_task_plan/src/lib.rs @@ -14,7 +14,7 @@ use context::PlanContext; use error::TaskPlanErrorKindResultExt; pub use error::{Error, TaskPlanErrorKind}; pub use execution_graph::ExecutionGraph; -use in_process::InProcessExecution; +pub use in_process::InProcessExecution; pub use path_env::{get_path_env, prepend_path_env}; use plan::{ParentCacheConfig, plan_query_request, plan_synthetic_request}; use plan_request::{PlanRequest, QueryPlanRequest, SyntheticPlanRequest}; From 2c717317e778f9993663da1ad03b164dcff996aa Mon Sep 17 00:00:00 2001 From: branchseer Date: Sat, 14 Feb 2026 14:14:26 +0800 Subject: [PATCH 04/11] fix: replace MSYS cat with cross-platform read-stdin tool in stdin tests MSYS cat from Git for Windows crashes with MapViewOfFileEx errors when stdin is set to Stdio::null(), causing CI failures on Windows. Replace with a Node.js read-stdin tool that handles null stdin gracefully. --- .../stdin-inheritance/packages/other/package.json | 2 +- .../snapshots/multiple tasks get null stdin.snap | 8 ++++---- .../snapshots/single task no cache inherits stdin.snap | 4 ++-- .../snapshots/single task with cache gets null stdin.snap | 4 ++-- .../fixtures/stdin-inheritance/vite-task.json | 4 ++-- packages/tools/package.json | 1 + packages/tools/src/read-stdin.js | 6 ++++++ 7 files changed, 18 insertions(+), 11 deletions(-) create mode 100755 packages/tools/src/read-stdin.js diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/packages/other/package.json b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/packages/other/package.json index d25708eb..68b67a9e 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/packages/other/package.json +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/packages/other/package.json @@ -1,6 +1,6 @@ { "name": "other", "scripts": { - "read-stdin": "cat" + "read-stdin": "read-stdin" } } diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap index 2a5fa83d..cd8fb472 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap @@ -3,9 +3,9 @@ source: crates/vite_task_bin/tests/e2e_snapshots/main.rs expression: e2e_outputs --- > echo from-stdin | vp run -r read-stdin -$ cat ⊘ cache disabled: no cache config +$ read-stdin ⊘ cache disabled: no cache config -~/packages/other$ cat +~/packages/other$ read-stdin ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ @@ -17,9 +17,9 @@ Performance: 0% cache hit rate Task Details: ──────────────────────────────────────────────── - [1] stdin-inheritance-test#read-stdin: $ cat ✓ + [1] stdin-inheritance-test#read-stdin: $ read-stdin ✓ → Cache disabled in task configuration ······················································· - [2] other#read-stdin: ~/packages/other$ cat ✓ + [2] other#read-stdin: ~/packages/other$ read-stdin ✓ → Cache miss: no previous cache entry found ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task no cache inherits stdin.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task no cache inherits stdin.snap index 34738fec..b77a51fd 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task no cache inherits stdin.snap +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task no cache inherits stdin.snap @@ -3,7 +3,7 @@ source: crates/vite_task_bin/tests/e2e_snapshots/main.rs expression: e2e_outputs --- > echo from-stdin | vp run read-stdin -$ cat ⊘ cache disabled: no cache config +$ read-stdin ⊘ cache disabled: no cache config from-stdin @@ -16,6 +16,6 @@ Performance: 0% cache hit rate Task Details: ──────────────────────────────────────────────── - [1] stdin-inheritance-test#read-stdin: $ cat ✓ + [1] stdin-inheritance-test#read-stdin: $ read-stdin ✓ → Cache disabled in task configuration ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task with cache gets null stdin.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task with cache gets null stdin.snap index 6761095a..cb690886 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task with cache gets null stdin.snap +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/single task with cache gets null stdin.snap @@ -3,7 +3,7 @@ source: crates/vite_task_bin/tests/e2e_snapshots/main.rs expression: e2e_outputs --- > echo from-stdin | vp run read-stdin-cached -$ cat +$ read-stdin ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ @@ -15,6 +15,6 @@ Performance: 0% cache hit rate Task Details: ──────────────────────────────────────────────── - [1] stdin-inheritance-test#read-stdin-cached: $ cat ✓ + [1] stdin-inheritance-test#read-stdin-cached: $ read-stdin ✓ → Cache miss: no previous cache entry found ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/vite-task.json b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/vite-task.json index 50566bc6..3e5addfb 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/vite-task.json +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/vite-task.json @@ -2,11 +2,11 @@ "cacheScripts": true, "tasks": { "read-stdin": { - "command": "cat", + "command": "read-stdin", "cache": false }, "read-stdin-cached": { - "command": "cat", + "command": "read-stdin", "cache": true } } diff --git a/packages/tools/package.json b/packages/tools/package.json index e770356f..a242446e 100644 --- a/packages/tools/package.json +++ b/packages/tools/package.json @@ -7,6 +7,7 @@ "print-file": "./src/print-file.ts", "print-env": "./src/print-env.js", "json-edit": "./src/json-edit.ts", + "read-stdin": "./src/read-stdin.js", "replace-file-content": "./src/replace-file-content.ts" }, "dependencies": { diff --git a/packages/tools/src/read-stdin.js b/packages/tools/src/read-stdin.js new file mode 100755 index 00000000..4db26f57 --- /dev/null +++ b/packages/tools/src/read-stdin.js @@ -0,0 +1,6 @@ +#!/usr/bin/env node + +// Cross-platform cat replacement: reads all of stdin and writes it to stdout. +// Unlike MSYS cat on Windows, this handles Stdio::null() gracefully by +// receiving an immediate EOF rather than crashing. +process.stdin.pipe(process.stdout); From a0aae59c5d8a0956792c2bfffc447c3b6052c369 Mon Sep 17 00:00:00 2001 From: branchseer Date: Sun, 15 Feb 2026 00:07:42 +0800 Subject: [PATCH 05/11] refactor: remove ExecutionAborted, replace with SpawnOutcome enum Executions no longer abort the entire graph on infrastructure errors (cache lookup, spawn, or cache update failures). All tasks run to completion; failures are tracked by the reporter and reflected in the final exit code. - Replace ExecutionAborted with SpawnOutcome { CacheHit, Spawned, Failed } - Make execute_leaf infallible (returns ()) - execute_expanded_graph returns Option for cycle errors only - Remove first_error abort banner from labeled reporter - Update exit code logic to account for infrastructure errors - Update cycle dependency snapshot for new error format --- crates/vite_task/src/session/execute/mod.rs | 101 +++++++++--------- crates/vite_task/src/session/mod.rs | 20 ++-- crates/vite_task/src/session/reporter.rs | 73 ++++--------- .../snapshots/cycle dependency error.snap | 6 +- crates/vite_task_plan/src/lib.rs | 21 ++-- 5 files changed, 98 insertions(+), 123 deletions(-) diff --git a/crates/vite_task/src/session/execute/mod.rs b/crates/vite_task/src/session/execute/mod.rs index 84d3eeeb..9bab8a04 100644 --- a/crates/vite_task/src/session/execute/mod.rs +++ b/crates/vite_task/src/session/execute/mod.rs @@ -28,15 +28,21 @@ use super::{ }; use crate::{Session, session::execute::spawn::SpawnTrackResult}; -/// Internal error type used to abort execution when errors occur. +/// Outcome of a spawned execution. /// -/// Contains an optional graph-level error message: -/// - `None`: A leaf-level error occurred and was already reported through -/// `LeafExecutionReporter::finish()` -/// - `Some(message)`: A graph-level error occurred (e.g., cycle detection) -/// that needs to be passed to `GraphExecutionReporter::finish()` -#[derive(Debug)] -pub struct ExecutionAborted(Option); +/// Returned by [`execute_spawn`] to communicate what happened. Errors are +/// already reported through `LeafExecutionReporter::finish()` before this +/// value is returned — the caller does not need to handle error display. +pub enum SpawnOutcome { + /// Cache hit — no process was spawned. Cached outputs were replayed. + CacheHit, + /// Process was spawned and exited with this status. + Spawned(std::process::ExitStatus), + /// An infrastructure error prevented the process from running + /// (cache lookup failure or spawn failure). + /// Already reported through the leaf reporter. + Failed, +} /// Holds mutable references needed during graph execution. /// @@ -62,12 +68,15 @@ impl ExecutionContext<'_> { /// The `path_prefix` tracks our position within nested execution graphs. For the /// root call this is an empty path; for nested `Expanded` items it carries the /// path so far. + /// Returns `Some(error_message)` if the graph contains a cycle (graph-level error), + /// or `None` on success. Leaf-level errors are reported through the reporter + /// and do not abort the graph. #[expect(clippy::future_not_send, reason = "uses !Send types internally")] async fn execute_expanded_graph( &mut self, graph: &ExecutionGraph, path_prefix: &LeafExecutionPath, - ) -> Result<(), ExecutionAborted> { + ) -> Option { // Use StableGraph to preserve node indices during removal. // We need stable indices because the LeafExecutionPath references nodes // by their original index in the graph. @@ -87,9 +96,7 @@ impl ExecutionContext<'_> { Err(cycle) => { // Cycle detected — return a graph-level error. // This will be passed to `GraphExecutionReporter::finish(Some(msg))`. - return Err(ExecutionAborted(Some(vite_str::format!( - "Cycle dependencies detected: {cycle:?}" - )))); + return Some(vite_str::format!("Cycle dependencies detected: {cycle:?}")); } }; @@ -110,16 +117,23 @@ impl ExecutionContext<'_> { match &item.kind { ExecutionItemKind::Leaf(leaf_kind) => { - self.execute_leaf(&item_path, leaf_kind).boxed_local().await?; + self.execute_leaf(&item_path, leaf_kind).boxed_local().await; } ExecutionItemKind::Expanded(nested_graph) => { - // Recurse into the nested graph, carrying the path prefix forward - self.execute_expanded_graph(nested_graph, &item_path).boxed_local().await?; + // Recurse into the nested graph, carrying the path prefix forward. + // Cycle errors in nested graphs propagate up immediately. + if let Some(err) = self + .execute_expanded_graph(nested_graph, &item_path) + .boxed_local() + .await + { + return Some(err); + } } } } } - Ok(()) + None } /// Execute a single leaf item (in-process command or spawned process). @@ -131,7 +145,7 @@ impl ExecutionContext<'_> { &mut self, path: &LeafExecutionPath, leaf_execution_kind: &LeafExecutionKind, - ) -> Result<(), ExecutionAborted> { + ) { let mut leaf_reporter = self.reporter.new_leaf_execution(path); match leaf_execution_kind { @@ -147,17 +161,15 @@ impl ExecutionContext<'_> { CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), None, ); - Ok(()) } - LeafExecutionKind::Spawn(spawn_execution) => - { + LeafExecutionKind::Spawn(spawn_execution) => { #[expect( clippy::large_futures, reason = "spawn execution with cache management creates large futures" )] - execute_spawn(leaf_reporter, spawn_execution, self.cache, self.cache_base_path) - .await - .map(|_status| ()) + let _ = + execute_spawn(leaf_reporter, spawn_execution, self.cache, self.cache_base_path) + .await; } } } @@ -174,12 +186,8 @@ impl ExecutionContext<'_> { /// 3. If cache hit: replay cached outputs → finish /// 4. If cache miss/disabled: spawn process → stream output → update cache → finish /// -/// # Returns -/// -/// - `Ok(None)` — cache hit, no process was spawned -/// - `Ok(Some(exit_status))` — process ran, here's its exit status -/// - `Err(ExecutionAborted(None))` — an error occurred, already reported through -/// `leaf_reporter.finish(..., Some(error_message))` +/// Errors (cache lookup failure, spawn failure, cache update failure) are reported +/// through `leaf_reporter.finish()` and do not abort the caller. #[expect(clippy::future_not_send, reason = "uses !Send types internally")] #[expect( clippy::too_many_lines, @@ -190,7 +198,7 @@ pub async fn execute_spawn( spawn_execution: &SpawnExecution, cache: &ExecutionCache, cache_base_path: &Arc, -) -> Result, ExecutionAborted> { +) -> SpawnOutcome { let cache_metadata = spawn_execution.cache_metadata.as_ref(); // 1. Determine cache status FIRST by trying cache hit. @@ -209,14 +217,14 @@ pub async fn execute_spawn( None, ), Err(err) => { - // Cache lookup error — report through finish and abort. + // Cache lookup error — report through finish. // Note: start() is NOT called because we don't have a valid cache status. leaf_reporter.finish( None, CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), Some(vite_str::format!("Cache lookup failed: {err}")), ); - return Err(ExecutionAborted(None)); + return SpawnOutcome::Failed; } } } else { @@ -244,7 +252,7 @@ pub async fn execute_spawn( CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheHit), None, ); - return Ok(None); + return SpawnOutcome::CacheHit; } // 4. Execute spawn (cache miss or disabled). @@ -293,7 +301,7 @@ pub async fn execute_spawn( CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), Some(vite_str::format!("Failed to spawn process: {err}")), ); - return Err(ExecutionAborted(None)); + return SpawnOutcome::Failed; } }; @@ -339,11 +347,12 @@ pub async fn execute_spawn( (CacheUpdateStatus::NotUpdated(CacheNotUpdatedReason::CacheDisabled), None) }; - // 6. Finish the leaf execution with the result and optional error - let has_error = cache_error.is_some(); + // 6. Finish the leaf execution with the result and optional cache error. + // Cache update/fingerprint failures are reported but do not affect the outcome — + // the process ran, so we return its actual exit status. leaf_reporter.finish(Some(result.exit_status), cache_update_status, cache_error); - if has_error { Err(ExecutionAborted(None)) } else { Ok(Some(result.exit_status)) } + SpawnOutcome::Spawned(result.exit_status) } impl Session<'_> { @@ -382,18 +391,14 @@ impl Session<'_> { cache_base_path: &self.workspace_path, }; - // Execute the graph. On abort, extract the optional graph-level error message. - let graph_error = match execution_context - .execute_expanded_graph(&graph, &LeafExecutionPath::default()) - .await - { - Ok(()) => None, - Err(ExecutionAborted(error)) => error, - }; + // Execute the graph. Individual leaf errors are reported through the reporter + // and do not abort the graph. Only graph-level errors (cycle detection) are + // returned here. + let graph_error = + execution_context.execute_expanded_graph(&graph, &LeafExecutionPath::default()).await; - // Always call finish, whether execution succeeded or was aborted. - // graph_error is None for leaf-level errors (already handled by leaf reporter) - // and Some(msg) for graph-level errors (cycle detection). + // Leaf-level errors and non-zero exit statuses are tracked internally + // by the reporter. graph_error is Some only for cycle detection. reporter.finish(graph_error) } } diff --git a/crates/vite_task/src/session/mod.rs b/crates/vite_task/src/session/mod.rs index 61c9bc9a..b6a7fd9d 100644 --- a/crates/vite_task/src/session/mod.rs +++ b/crates/vite_task/src/session/mod.rs @@ -20,7 +20,7 @@ use vite_task_graph::{ loader::UserConfigLoader, }; use vite_task_plan::{ - ExecutionGraph, ExecutionPlan, TaskGraphLoader, TaskPlanErrorKind, + ExecutionGraph, ExecutionPlan, TaskGraphLoader, plan_request::{PlanRequest, ScriptCommand, SyntheticPlanRequest}, prepend_path_env, }; @@ -457,12 +457,18 @@ impl<'a> Session<'a> { ) -> anyhow::Result { // Plan the synthetic execution — returns a SpawnExecution directly // (synthetic plans are always a single spawned process) - let spawn_execution = ExecutionPlan::plan_synthetic( + let execution_plan = ExecutionPlan::plan_synthetic( &self.workspace_path, &self.cwd, synthetic_plan_request, cache_key, )?; + let vite_task_plan::ExecutionItemKind::Leaf(vite_task_plan::LeafExecutionKind::Spawn( + spawn_execution, + )) = execution_plan.into_root_node() + else { + unreachable!("plan_synthetic always produces a Leaf(Spawn(..)) node") + }; // Initialize cache (needed for cache-aware execution) let cache = self.cache()?; @@ -480,11 +486,11 @@ impl<'a> Session<'a> { .await { // Cache hit — no process was spawned, success - Ok(None) => Ok(ExitStatus::SUCCESS), + execute::SpawnOutcome::CacheHit => Ok(ExitStatus::SUCCESS), // Process ran successfully - Ok(Some(status)) if status.success() => Ok(ExitStatus::SUCCESS), + execute::SpawnOutcome::Spawned(status) if status.success() => Ok(ExitStatus::SUCCESS), // Process ran but exited with non-zero status - Ok(Some(status)) => { + execute::SpawnOutcome::Spawned(status) => { let code = event::exit_status_to_code(status); #[expect( clippy::cast_sign_loss, @@ -492,8 +498,8 @@ impl<'a> Session<'a> { )] Ok(ExitStatus(code.clamp(1, 255) as u8)) } - // Error — already reported through the reporter's finish() - Err(_) => Ok(ExitStatus::FAILURE), + // Infrastructure error — already reported through the reporter's finish() + execute::SpawnOutcome::Failed => Ok(ExitStatus::FAILURE), } } diff --git a/crates/vite_task/src/session/reporter.rs b/crates/vite_task/src/session/reporter.rs index 6de3f23e..d1a19feb 100644 --- a/crates/vite_task/src/session/reporter.rs +++ b/crates/vite_task/src/session/reporter.rs @@ -438,8 +438,6 @@ struct SharedReporterState { writer: W, executions: Vec, stats: ExecutionStats, - /// The first error encountered during execution. Used to print the abort banner. - first_error: Option, /// Total number of spawned leaf executions in the graph (including nested `Expanded` /// subgraphs). Computed once at build time and used to determine the stdin suggestion: /// inherited stdin is only suggested when there is exactly one spawn leaf. @@ -502,7 +500,6 @@ impl GraphExecutionReporterBuilder for LabeledReporterBuilde writer: self.writer, executions: Vec::new(), stats: ExecutionStats::default(), - first_error: None, spawn_leaf_count, })), graph: Arc::clone(graph), @@ -537,43 +534,14 @@ impl GraphExecutionReporter for LabeledGraphReporter { fn finish(self: Box, error: Option) -> Result<(), ExitStatus> { let mut shared = self.shared.borrow_mut(); - // If a graph-level error was passed in, store it as first_error - // (only if no leaf error was already recorded — leaf errors take precedence - // since they occurred first chronologically) - if let Some(ref error_msg) = error - && shared.first_error.is_none() - { - shared.first_error = Some(error_msg.clone()); - } - - // Check if execution was aborted due to error (either graph-level or leaf-level). - // Clone the error message before using the writer to avoid borrow conflicts. - if let Some(error_msg) = shared.first_error.clone() { - // Print error abort banner - let _ = writeln!(shared.writer); - let _ = writeln!( - shared.writer, - "{}", - "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - .style(Style::new().bright_black()) - ); - let _ = writeln!( - shared.writer, - "{} {}", - "Execution aborted due to error:".style(Style::new().red().bold()), - error_msg.style(Style::new().red()) - ); - let _ = writeln!( - shared.writer, - "{}", - "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - .style(Style::new().bright_black()) - ); - + // Handle graph-level errors (e.g., cycle detection, cache init failure). + // These are distinct from leaf-level errors which are tracked per-execution. + if let Some(error_msg) = error { + write_error_message(&mut shared.writer, &error_msg); return Err(ExitStatus::FAILURE); } - // No errors — print summary. + // Print summary. // Special case: single execution without display info (e.g., synthetic via nested expansion) // → skip summary since there's nothing meaningful to show. let is_single_displayless = @@ -585,11 +553,17 @@ impl GraphExecutionReporter for LabeledGraphReporter { print_summary(writer, executions, stats, &self.workspace_path); } - // Determine exit code based on failed tasks: - // 1. All tasks succeed → return Ok(()) - // 2. Exactly one task failed → return Err with that task's exit code - // 3. More than one task failed → return Err(1) - // Note: None means success (cache hit or in-process) + // Determine exit code based on failed tasks and infrastructure errors: + // - Infrastructure errors (cache lookup, spawn failure) have error_message set + // but no meaningful exit_status. + // - Process failures have a non-zero exit_status. + // + // Rules: + // 1. No failures at all → Ok(()) + // 2. Exactly one process failure, no infra errors → use that task's exit code + // 3. Any infra errors, or multiple failures → Err(1) + let has_infra_errors = shared.executions.iter().any(|exec| exec.error_message.is_some()); + let failed_exit_codes: Vec = shared .executions .iter() @@ -598,9 +572,9 @@ impl GraphExecutionReporter for LabeledGraphReporter { .map(|status| exit_status_to_code(*status)) .collect(); - match failed_exit_codes.as_slice() { - [] => Ok(()), - [code] => { + match (has_infra_errors, failed_exit_codes.as_slice()) { + (false, []) => Ok(()), + (false, [code]) => { // Return the single failed task's exit code (clamped to u8 range) #[expect( clippy::cast_sign_loss, @@ -692,11 +666,6 @@ impl LeafExecutionReporter for LabeledLeafReporter { if let Some(ref message) = error { write_error_message(&mut shared.writer, message); - // Track first error for the abort banner - if shared.first_error.is_none() { - shared.first_error = Some(message.clone()); - } - // Update the execution info if start() was called (an entry was pushed). // Without the `self.started` guard, `last_mut()` would return a // *different* execution's entry, corrupting its error_message. @@ -743,8 +712,8 @@ impl LeafExecutionReporter for LabeledLeafReporter { /// Print the full execution summary with statistics, performance, and per-task details. /// -/// This is called by [`LabeledGraphReporter::finish`] when there are no errors -/// and the summary should be displayed. +/// Called by [`LabeledGraphReporter::finish`] after all tasks have executed. +/// Infrastructure errors and task failures are included in the summary. #[expect( clippy::too_many_lines, reason = "summary formatting is inherently verbose with many write calls" diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap index 5099610e..e0b66675 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap @@ -1,10 +1,6 @@ --- source: crates/vite_task_bin/tests/e2e_snapshots/main.rs expression: e2e_outputs -input_file: crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency --- [1]> vp run task-a # task-a -> task-b -> task-a cycle - -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -Execution aborted due to error: Cycle dependencies detected: Cycle(NodeIndex(ExecutionIx(1))) -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +✗ Cycle dependencies detected: Cycle(NodeIndex(ExecutionIx(1))) diff --git a/crates/vite_task_plan/src/lib.rs b/crates/vite_task_plan/src/lib.rs index 8de4b5b7..c7567d04 100644 --- a/crates/vite_task_plan/src/lib.rs +++ b/crates/vite_task_plan/src/lib.rs @@ -11,8 +11,7 @@ pub mod plan_request; use std::{collections::BTreeMap, ffi::OsStr, fmt::Debug, sync::Arc}; use context::PlanContext; -use error::TaskPlanErrorKindResultExt; -pub use error::{Error, TaskPlanErrorKind}; +pub use error::Error; pub use execution_graph::ExecutionGraph; pub use in_process::InProcessExecution; pub use path_env::{get_path_env, prepend_path_env}; @@ -192,6 +191,11 @@ impl ExecutionPlan { &self.root_node } + #[must_use] + pub fn into_root_node(self) -> ExecutionItemKind { + self.root_node + } + /// Returns `true` if the plan contains no tasks to execute. #[must_use] pub fn is_empty(&self) -> bool { @@ -220,11 +224,7 @@ impl ExecutionPlan { plan_request_parser: &mut (dyn PlanRequestParser + '_), task_graph_loader: &mut (dyn TaskGraphLoader + '_), ) -> Result { - let indexed_task_graph = task_graph_loader - .load_task_graph() - .await - .map_err(TaskPlanErrorKind::TaskGraphLoadError) - .with_empty_call_stack()?; + let indexed_task_graph = task_graph_loader.load_task_graph().await?; let context = PlanContext::new( workspace_path, @@ -291,17 +291,16 @@ impl ExecutionPlan { cwd: &Arc, synthetic_plan_request: SyntheticPlanRequest, cache_key: Arc<[Str]>, - ) -> Result { + ) -> Result { let execution_cache_key = cache_metadata::ExecutionCacheKey::ExecAPI(cache_key); - plan_synthetic_request( + let execution = plan_synthetic_request( workspace_path, &BTreeMap::default(), synthetic_plan_request, Some(execution_cache_key), cwd, ParentCacheConfig::None, - ) - .with_empty_call_stack()?; + )?; Ok(Self { root_node: ExecutionItemKind::Leaf(LeafExecutionKind::Spawn(execution)) }) } } From f8f70c5b8b8c8889371ffc34a104574dfa11cc07 Mon Sep 17 00:00:00 2001 From: branchseer Date: Sat, 14 Feb 2026 23:13:12 +0800 Subject: [PATCH 06/11] feat: move cycle detection from execution to plan time Use petgraph::acyclic::Acyclic for the ExecutionGraph type alias, providing type-level acyclicity guarantees. Cycles are now detected when building the execution plan rather than during graph execution. - Add CycleDependencyDetected variant to TaskPlanErrorKind - Replace StableGraph + toposort with Acyclic::nodes_iter() for execution order - Simplify ExecutionAborted to a unit struct (no error message field) - Remove error parameter from GraphExecutionReporter::finish() - Move cache initialization before reporter construction in execute_graph - Add plan snapshot test for cycle dependency errors - Update E2E snapshots for new error format and execution order --- crates/vite_task/src/session/execute/mod.rs | 119 ++++++++---------- crates/vite_task/src/session/reporter.rs | 28 +++-- .../snapshots/cycle dependency error.snap | 5 +- ...ple task failures returns exit code 1.snap | 9 +- .../multiple tasks get null stdin.snap | 12 +- crates/vite_task_plan/src/error.rs | 8 ++ crates/vite_task_plan/src/execution_graph.rs | 15 ++- crates/vite_task_plan/src/lib.rs | 20 +-- crates/vite_task_plan/src/plan.rs | 44 ++++++- .../fixtures/cycle-dependency/package.json | 3 + .../fixtures/cycle-dependency/snapshots.toml | 5 + .../query - cycle dependency error.snap | 6 + .../snapshots/task graph.snap | 79 ++++++++++++ .../fixtures/cycle-dependency/vite-task.json | 12 ++ .../tests/plan_snapshots/main.rs | 15 ++- 15 files changed, 272 insertions(+), 108 deletions(-) create mode 100644 crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/package.json create mode 100644 crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots.toml create mode 100644 crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/query - cycle dependency error.snap create mode 100644 crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/task graph.snap create mode 100644 crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/vite-task.json diff --git a/crates/vite_task/src/session/execute/mod.rs b/crates/vite_task/src/session/execute/mod.rs index 9bab8a04..f2de47c8 100644 --- a/crates/vite_task/src/session/execute/mod.rs +++ b/crates/vite_task/src/session/execute/mod.rs @@ -4,12 +4,10 @@ pub mod spawn; use std::{process::Stdio, sync::Arc}; use futures_util::FutureExt; -use petgraph::{algo::toposort, stable_graph::StableGraph}; use vite_path::AbsolutePath; -use vite_str::Str; use vite_task_plan::{ - ExecutionGraph, ExecutionItemKind, LeafExecutionKind, SpawnExecution, TaskExecution, - execution_graph::ExecutionIx, + ExecutionGraph, ExecutionItemKind, LeafExecutionKind, SpawnExecution, + execution_graph::ExecutionNodeIndex, }; use self::{ @@ -59,56 +57,43 @@ struct ExecutionContext<'a> { } impl ExecutionContext<'_> { - /// Execute all tasks in an execution graph in topological order. + /// Execute all tasks in an execution graph in dependency order. /// - /// This is the main entry point for graph traversal. It topologically sorts the graph - /// (reversing edges so dependencies execute first), then executes each task's items - /// sequentially. + /// Since `ExecutionGraph` is `Acyclic>`, the graph is guaranteed to have + /// no cycles at the type level. We use `nodes_iter()` which returns nodes in topological + /// order (for every edge A→B, A comes before B). + /// + /// In our graph, edges go from dependents to dependencies (A→B means "A depends on B"), + /// so `nodes_iter()` gives dependents before dependencies. We reverse this to execute + /// dependencies first. /// /// The `path_prefix` tracks our position within nested execution graphs. For the /// root call this is an empty path; for nested `Expanded` items it carries the /// path so far. - /// Returns `Some(error_message)` if the graph contains a cycle (graph-level error), - /// or `None` on success. Leaf-level errors are reported through the reporter - /// and do not abort the graph. + /// Leaf-level errors are reported through the reporter and do not abort the graph. + /// Cycle detection is handled at plan time, so this function cannot encounter cycles. #[expect(clippy::future_not_send, reason = "uses !Send types internally")] async fn execute_expanded_graph( &mut self, graph: &ExecutionGraph, path_prefix: &LeafExecutionPath, - ) -> Option { - // Use StableGraph to preserve node indices during removal. - // We need stable indices because the LeafExecutionPath references nodes - // by their original index in the graph. - let mut stable_graph: StableGraph<&TaskExecution, (), _, ExecutionIx> = - graph.map(|_, task_execution| task_execution, |_, ()| ()).into(); - - // The graph is constructed with edges from dependents to dependencies - // (e.g., A → B means "A depends on B"). For execution we need the reverse: - // dependencies should execute first. Reversing edges before topological sort - // achieves this. - stable_graph.reverse(); - - // Topological sort ensures tasks execute in dependency order. - // A cycle means the dependency graph is invalid. - let node_indices = match toposort(&stable_graph, None) { - Ok(ok) => ok, - Err(cycle) => { - // Cycle detected — return a graph-level error. - // This will be passed to `GraphExecutionReporter::finish(Some(msg))`. - return Some(vite_str::format!("Cycle dependencies detected: {cycle:?}")); - } - }; + ) { + // `nodes_iter()` returns nodes in topological order: for every edge A→B, + // A appears before B. Since our edges mean "A depends on B", dependencies + // (B) appear after their dependents (A). Reversing gives us execution order + // where dependencies run first. + // + // Collect into a Vec first since the `nodes_iter()` return type + // (`impl Iterator`) does not implement `DoubleEndedIterator`. + let mut node_indices: Vec = graph.nodes_iter().collect(); + node_indices.reverse(); - // Execute tasks in topological order. Each task may have multiple items + // Execute tasks in dependency-first order. Each task may have multiple items // (from `&&`-split commands), which are executed sequentially. for node_ix in node_indices { - // `remove_node` on a StableGraph preserves other node indices. - // The original node index (`node_ix`) is still valid for path construction - // because it corresponds to the same node in the original (non-stable) graph. - let task_execution = stable_graph - .remove_node(node_ix) - .expect("node was returned by toposort so it must exist"); + // Access the inner DiGraph directly for node indexing, since the `Acyclic` + // wrapper's only `Index` impl is our custom `Index`. + let task_execution = &graph.inner()[node_ix]; for (item_idx, item) in task_execution.items.iter().enumerate() { // Build the path for this item by appending to the prefix @@ -121,19 +106,11 @@ impl ExecutionContext<'_> { } ExecutionItemKind::Expanded(nested_graph) => { // Recurse into the nested graph, carrying the path prefix forward. - // Cycle errors in nested graphs propagate up immediately. - if let Some(err) = self - .execute_expanded_graph(nested_graph, &item_path) - .boxed_local() - .await - { - return Some(err); - } + self.execute_expanded_graph(nested_graph, &item_path).boxed_local().await; } } } } - None } /// Execute a single leaf item (in-process command or spawned process). @@ -358,9 +335,9 @@ pub async fn execute_spawn( impl Session<'_> { /// Execute an execution graph, reporting events through the provided reporter builder. /// - /// The builder is first transitioned to a `GraphExecutionReporter` by providing the graph. - /// Then each task in the graph is executed in topological order, with leaf executions - /// reported through individual `LeafExecutionReporter` instances. + /// Cache is initialized only if any leaf execution needs it. The reporter is built + /// after cache initialization, so cache errors are reported directly to stderr + /// without involving the reporter at all. /// /// Returns `Err(ExitStatus)` to indicate the caller should exit with the given status code. /// Returns `Ok(())` when all tasks succeeded. @@ -370,35 +347,39 @@ impl Session<'_> { execution_graph: ExecutionGraph, builder: Box, ) -> Result<(), ExitStatus> { - // Wrap the graph in Arc so both the reporter and execution can reference it. - // The reporter clones the Arc internally for display lookups. - let graph = Arc::new(execution_graph); - let mut reporter = builder.build(&graph); - - // Lazily initialize the cache on first execution + // Initialize cache before building the reporter. Cache errors are reported + // directly to stderr and cause an early exit, keeping the reporter flow clean + // (the reporter's `finish()` no longer accepts graph-level error messages). let cache = match self.cache() { Ok(cache) => cache, + #[expect(clippy::print_stderr, reason = "cache init errors bypass the reporter")] Err(err) => { - // Cache initialization failure is a graph-level error — pass to finish() - return reporter - .finish(Some(vite_str::format!("Failed to initialize cache: {err}"))); + eprintln!("Failed to initialize cache: {err}"); + return Err(ExitStatus::FAILURE); } }; + // Wrap the graph in Arc so both the reporter and execution can reference it. + // The reporter clones the Arc internally for display lookups. + // `Acyclic` uses `RefCell` internally for topological-order caching, making it + // `!Sync`. We still need `Arc` here because the reporter holds a clone for display + // lookups. The graph is only ever accessed from a single thread in practice. + #[expect(clippy::arc_with_non_send_sync, reason = "Acyclic but single-threaded use")] + let graph = Arc::new(execution_graph); + let mut reporter = builder.build(&graph); + let mut execution_context = ExecutionContext { reporter: &mut *reporter, cache, cache_base_path: &self.workspace_path, }; - // Execute the graph. Individual leaf errors are reported through the reporter - // and do not abort the graph. Only graph-level errors (cycle detection) are - // returned here. - let graph_error = - execution_context.execute_expanded_graph(&graph, &LeafExecutionPath::default()).await; + // Execute the graph. Leaf-level errors are reported through the reporter + // and do not abort the graph. Cycle detection is handled at plan time. + execution_context.execute_expanded_graph(&graph, &LeafExecutionPath::default()).await; // Leaf-level errors and non-zero exit statuses are tracked internally - // by the reporter. graph_error is Some only for cycle detection. - reporter.finish(graph_error) + // by the reporter. + reporter.finish() } } diff --git a/crates/vite_task/src/session/reporter.rs b/crates/vite_task/src/session/reporter.rs index d1a19feb..db49e262 100644 --- a/crates/vite_task/src/session/reporter.rs +++ b/crates/vite_task/src/session/reporter.rs @@ -97,11 +97,15 @@ struct ExecutionPathItem { /// Indexing a graph by a single `ExecutionPathItem` yields the [`ExecutionItem`] /// at that position. +/// +/// Uses `.inner()` to access the underlying `DiGraph`'s `Index` impl, +/// since `Acyclic` delegates indexing through `Deref` and the compiler might +/// otherwise recurse into this very impl. impl Index for ExecutionGraph { type Output = ExecutionItem; fn index(&self, index: ExecutionPathItem) -> &Self::Output { - &self[index.graph_node_ix].items[index.task_execution_item_index] + &self.inner()[index.graph_node_ix].items[index.task_execution_item_index] } } @@ -197,13 +201,13 @@ pub trait GraphExecutionReporter { /// Finalize the graph execution session. /// - /// If `error` is `Some`, a graph-level error occurred (e.g., cycle detection, cache - /// initialization failure). Leaf-level errors are already tracked internally by the - /// reporter via the leaf reporter's `finish()` method. + /// Leaf-level errors are already tracked internally by the reporter via the + /// leaf reporter's `finish()` method. Graph-level errors (cycle detection) are + /// now caught at plan time and never reach the reporter. /// /// Returns `Ok(())` if all tasks succeeded, or `Err(ExitStatus)` to indicate the /// process should exit with the given status code. - fn finish(self: Box, error: Option) -> Result<(), ExitStatus>; + fn finish(self: Box) -> Result<(), ExitStatus>; } /// Reporter for a single leaf execution (one spawned process or in-process command). @@ -531,16 +535,9 @@ impl GraphExecutionReporter for LabeledGraphReporter { }) } - fn finish(self: Box, error: Option) -> Result<(), ExitStatus> { + fn finish(self: Box) -> Result<(), ExitStatus> { let mut shared = self.shared.borrow_mut(); - // Handle graph-level errors (e.g., cycle detection, cache init failure). - // These are distinct from leaf-level errors which are tracked per-execution. - if let Some(error_msg) = error { - write_error_message(&mut shared.writer, &error_msg); - return Err(ExitStatus::FAILURE); - } - // Print summary. // Special case: single execution without display info (e.g., synthetic via nested expansion) // → skip summary since there's nothing meaningful to show. @@ -933,6 +930,7 @@ fn print_summary( mod tests { use std::collections::BTreeMap; + use petgraph::data::Build; use vite_task_graph::display::TaskDisplay; use vite_task_plan::{ ExecutionItem, ExecutionItemDisplay, ExecutionItemKind, InProcessExecution, @@ -1110,6 +1108,10 @@ mod tests { /// Build a `LabeledGraphReporter` for the given graph and return a leaf reporter /// for the first node's first item. fn build_labeled_leaf(graph: ExecutionGraph) -> Box { + #[expect( + clippy::arc_with_non_send_sync, + reason = "Acyclic but single-threaded test" + )] let graph_arc = Arc::new(graph); let builder = Box::new(LabeledReporterBuilder::new(Vec::::new(), test_path())); let mut reporter = builder.build(&graph_arc); diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap index e0b66675..ef0765b6 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap @@ -3,4 +3,7 @@ source: crates/vite_task_bin/tests/e2e_snapshots/main.rs expression: e2e_outputs --- [1]> vp run task-a # task-a -> task-b -> task-a cycle -✗ Cycle dependencies detected: Cycle(NodeIndex(ExecutionIx(1))) +Error: Failed to plan execution + +Caused by: + Cycle dependency detected involving task error-cycle-dependency-test#task-b diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/exit-codes/snapshots/multiple task failures returns exit code 1.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/exit-codes/snapshots/multiple task failures returns exit code 1.snap index 648f2ceb..17a21b96 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/exit-codes/snapshots/multiple task failures returns exit code 1.snap +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/exit-codes/snapshots/multiple task failures returns exit code 1.snap @@ -1,13 +1,12 @@ --- source: crates/vite_task_bin/tests/e2e_snapshots/main.rs expression: e2e_outputs -input_file: crates/vite_task_bin/tests/e2e_snapshots/fixtures/exit-codes --- [1]> vp run -r fail # multiple failures -> exit code 1 -~/packages/pkg-b$ node -e "process.exit(7)" - ~/packages/pkg-a$ node -e "process.exit(42)" +~/packages/pkg-b$ node -e "process.exit(7)" + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Vite+ Task Runner • Execution Summary @@ -18,9 +17,9 @@ Performance: 0% cache hit rate Task Details: ──────────────────────────────────────────────── - [1] pkg-b#fail: ~/packages/pkg-b$ node -e "process.exit(7)" ✗ (exit code: 7) + [1] pkg-a#fail: ~/packages/pkg-a$ node -e "process.exit(42)" ✗ (exit code: 42) → Cache miss: no previous cache entry found ······················································· - [2] pkg-a#fail: ~/packages/pkg-a$ node -e "process.exit(42)" ✗ (exit code: 42) + [2] pkg-b#fail: ~/packages/pkg-b$ node -e "process.exit(7)" ✗ (exit code: 7) → Cache miss: no previous cache entry found ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap index cd8fb472..a3ee77a0 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/stdin-inheritance/snapshots/multiple tasks get null stdin.snap @@ -3,10 +3,10 @@ source: crates/vite_task_bin/tests/e2e_snapshots/main.rs expression: e2e_outputs --- > echo from-stdin | vp run -r read-stdin -$ read-stdin ⊘ cache disabled: no cache config - ~/packages/other$ read-stdin +$ read-stdin ⊘ cache disabled: no cache config + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Vite+ Task Runner • Execution Summary @@ -17,9 +17,9 @@ Performance: 0% cache hit rate Task Details: ──────────────────────────────────────────────── - [1] stdin-inheritance-test#read-stdin: $ read-stdin ✓ - → Cache disabled in task configuration - ······················································· - [2] other#read-stdin: ~/packages/other$ read-stdin ✓ + [1] other#read-stdin: ~/packages/other$ read-stdin ✓ → Cache miss: no previous cache entry found + ······················································· + [2] stdin-inheritance-test#read-stdin: $ read-stdin ✓ + → Cache disabled in task configuration ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ diff --git a/crates/vite_task_plan/src/error.rs b/crates/vite_task_plan/src/error.rs index 6e94937d..a685cc7a 100644 --- a/crates/vite_task_plan/src/error.rs +++ b/crates/vite_task_plan/src/error.rs @@ -143,6 +143,14 @@ pub enum Error { #[error("No task specifier provided for 'run' command")] MissingTaskSpecifier, + + /// A cycle was detected in the task dependency graph during planning. + /// + /// This is caught when wrapping the built `DiGraph` in `petgraph::acyclic::Acyclic`, + /// which validates that the graph is a DAG. The contained `TaskDisplay` identifies + /// one of the tasks involved in the cycle. + #[error("Cycle dependency detected involving task {0}")] + CycleDependencyDetected(TaskDisplay), } impl Error { diff --git a/crates/vite_task_plan/src/execution_graph.rs b/crates/vite_task_plan/src/execution_graph.rs index 87c4a385..3ddc69f9 100644 --- a/crates/vite_task_plan/src/execution_graph.rs +++ b/crates/vite_task_plan/src/execution_graph.rs @@ -23,4 +23,17 @@ unsafe impl IndexType for ExecutionIx { pub type ExecutionNodeIndex = NodeIndex; pub type ExecutionEdgeIndex = EdgeIndex; -pub type ExecutionGraph = petgraph::graph::DiGraph; +/// The inner directed graph type before acyclicity wrapping. +/// Used during graph construction in `plan_query_request` before validation. +type InnerExecutionGraph = petgraph::graph::DiGraph; + +/// A directed acyclic execution graph. +/// +/// Wraps `petgraph::graph::DiGraph` in `petgraph::acyclic::Acyclic` to enforce at the +/// type level that the graph has no cycles. This guarantee is established at plan time +/// when the graph is constructed in `plan_query_request`, eliminating the need for +/// runtime cycle detection during execution. +/// +/// `Acyclic` implements `Deref>`, so all read operations on the +/// inner graph (indexing, iteration, node/edge counts) work transparently. +pub type ExecutionGraph = petgraph::acyclic::Acyclic; diff --git a/crates/vite_task_plan/src/lib.rs b/crates/vite_task_plan/src/lib.rs index c7567d04..e24304d1 100644 --- a/crates/vite_task_plan/src/lib.rs +++ b/crates/vite_task_plan/src/lib.rs @@ -18,8 +18,7 @@ pub use path_env::{get_path_env, prepend_path_env}; use plan::{ParentCacheConfig, plan_query_request, plan_synthetic_request}; use plan_request::{PlanRequest, QueryPlanRequest, SyntheticPlanRequest}; use rustc_hash::FxHashMap; -use serde::{Serialize, ser::SerializeMap as _}; -use vite_graph_ser::serialize_by_key; +use serde::{Serialize, Serializer, ser::SerializeMap as _}; use vite_path::AbsolutePath; use vite_str::Str; use vite_task_graph::{TaskGraphLoadError, display::TaskDisplay}; @@ -139,15 +138,22 @@ pub enum LeafExecutionKind { InProcess(InProcessExecution), } +/// Serialize an `ExecutionGraph` (which is `Acyclic>`) using `serialize_by_key`. +/// +/// `vite_graph_ser::serialize_by_key` expects `&DiGraph`, so we call `.inner()` +/// on the `Acyclic` wrapper to get the underlying `DiGraph` reference. +fn serialize_execution_graph_by_key( + graph: &ExecutionGraph, + serializer: S, +) -> Result { + vite_graph_ser::serialize_by_key(graph.inner(), serializer) +} + /// An execution item, from a split subcommand in a task's command (`item1 && item2 && ...`). #[derive(Debug, Serialize)] -#[expect( - clippy::large_enum_variant, - reason = "variants are used in-place, boxing would add indirection" -)] pub enum ExecutionItemKind { /// Expanded from a known vp subcommand, like `vp run ...` or a synthesized task. - Expanded(#[serde(serialize_with = "serialize_by_key")] ExecutionGraph), + Expanded(#[serde(serialize_with = "serialize_execution_graph_by_key")] ExecutionGraph), /// A normal execution that spawns a child process, like `tsc --noEmit`. Leaf(LeafExecutionKind), } diff --git a/crates/vite_task_plan/src/plan.rs b/crates/vite_task_plan/src/plan.rs index e7d63dc2..b8683d23 100644 --- a/crates/vite_task_plan/src/plan.rs +++ b/crates/vite_task_plan/src/plan.rs @@ -36,6 +36,10 @@ use crate::{ plan_request::{PlanRequest, QueryPlanRequest, ScriptCommand, SyntheticPlanRequest}, }; +/// Type alias for the inner graph used during construction, before acyclicity validation. +type InnerExecutionGraph = + petgraph::graph::DiGraph; + /// Locate the executable path for a given program name in the provided envs and cwd. fn which( program: &Arc, @@ -499,6 +503,9 @@ fn plan_spawn_execution( } /// Expand the parsed task request (like `run -r build`/`lint`) into an execution graph. +/// +/// Builds a `DiGraph` of task executions, then validates it is acyclic by wrapping +/// in `petgraph::acyclic::Acyclic`. Returns `CycleDependencyDetected` if a cycle is found. #[expect(clippy::future_not_send, reason = "PlanContext contains !Send dyn PlanRequestParser")] pub async fn plan_query_request( query_plan_request: QueryPlanRequest, @@ -517,7 +524,8 @@ pub async fn plan_query_request( rustc_hash::FxBuildHasher, ); - let mut execution_graph = ExecutionGraph::with_capacity( + // Build the inner DiGraph first, then validate acyclicity at the end. + let mut inner_graph = InnerExecutionGraph::with_capacity( task_node_index_graph.node_count(), task_node_index_graph.edge_count(), ); @@ -527,17 +535,45 @@ pub async fn plan_query_request( let task_execution = plan_task_as_execution_node(task_index, context.duplicate()).boxed_local().await?; execution_node_indices_by_task_index - .insert(task_index, execution_graph.add_node(task_execution)); + .insert(task_index, inner_graph.add_node(task_execution)); } // Add edges between execution nodes according to task dependencies for (from_task_index, to_task_index, ()) in task_node_index_graph.all_edges() { - execution_graph.add_edge( + inner_graph.add_edge( execution_node_indices_by_task_index[&from_task_index], execution_node_indices_by_task_index[&to_task_index], (), ); } - Ok(execution_graph) + // Validate the graph is acyclic by wrapping in `Acyclic`. + // `Acyclic::try_from` performs a topological sort; if a cycle is found, + // it returns `Cycle(node_id)` identifying one node in the cycle. + ExecutionGraph::try_from(inner_graph).map_err(|cycle| { + // Look up the human-readable task display for the node involved in the cycle. + // The cycle's node_id is still valid against the original inner_graph (which + // was moved into try_from but returned back on error — however, `Cycle` only + // contains the node index, not the graph). We stored the display info in our + // local map, so we can look it up by reversing the index mapping. + // + // Since we no longer have access to the inner_graph after the failed try_from, + // we find the TaskDisplay by searching our index map for the node index. + let task_display = + execution_node_indices_by_task_index.iter().find_map(|(task_idx, &exec_idx)| { + if exec_idx == cycle.node_id() { + Some(context.indexed_task_graph().display_task(*task_idx)) + } else { + None + } + }); + // If for some reason the node wasn't found in our map (shouldn't happen), + // fall back to a placeholder display. + let display = task_display.unwrap_or_else(|| vite_task_graph::display::TaskDisplay { + package_name: "unknown".into(), + task_name: "unknown".into(), + package_path: Arc::clone(context.workspace_path()), + }); + Error::CycleDependencyDetected(display) + }) } diff --git a/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/package.json b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/package.json new file mode 100644 index 00000000..042bf614 --- /dev/null +++ b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/package.json @@ -0,0 +1,3 @@ +{ + "name": "cycle-dependency-test" +} diff --git a/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots.toml b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots.toml new file mode 100644 index 00000000..de37aa4e --- /dev/null +++ b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots.toml @@ -0,0 +1,5 @@ +# Tests that cycle dependencies are detected at plan time + +[[plan]] +name = "cycle dependency error" +args = ["run", "task-a"] diff --git a/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/query - cycle dependency error.snap b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/query - cycle dependency error.snap new file mode 100644 index 00000000..b1309d5e --- /dev/null +++ b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/query - cycle dependency error.snap @@ -0,0 +1,6 @@ +--- +source: crates/vite_task_plan/tests/plan_snapshots/main.rs +expression: err_str.as_ref() +input_file: crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency +--- +Failed to plan execution: Cycle dependency detected involving task cycle-dependency-test#task-b diff --git a/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/task graph.snap b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/task graph.snap new file mode 100644 index 00000000..13301c92 --- /dev/null +++ b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/task graph.snap @@ -0,0 +1,79 @@ +--- +source: crates/vite_task_plan/tests/plan_snapshots/main.rs +expression: task_graph_json +input_file: crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency +--- +[ + { + "key": [ + "/", + "task-a" + ], + "node": { + "task_display": { + "package_name": "cycle-dependency-test", + "task_name": "task-a", + "package_path": "/" + }, + "resolved_config": { + "command": "echo a", + "resolved_options": { + "cwd": "/", + "cache_config": { + "env_config": { + "fingerprinted_envs": [], + "pass_through_envs": [ + "" + ] + } + } + } + } + }, + "neighbors": [ + [ + [ + "/", + "task-b" + ], + "Explicit" + ] + ] + }, + { + "key": [ + "/", + "task-b" + ], + "node": { + "task_display": { + "package_name": "cycle-dependency-test", + "task_name": "task-b", + "package_path": "/" + }, + "resolved_config": { + "command": "echo b", + "resolved_options": { + "cwd": "/", + "cache_config": { + "env_config": { + "fingerprinted_envs": [], + "pass_through_envs": [ + "" + ] + } + } + } + } + }, + "neighbors": [ + [ + [ + "/", + "task-a" + ], + "Explicit" + ] + ] + } +] diff --git a/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/vite-task.json b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/vite-task.json new file mode 100644 index 00000000..82baa830 --- /dev/null +++ b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/vite-task.json @@ -0,0 +1,12 @@ +{ + "tasks": { + "task-a": { + "command": "echo a", + "dependsOn": ["task-b"] + }, + "task-b": { + "command": "echo b", + "dependsOn": ["task-a"] + } + } +} diff --git a/crates/vite_task_plan/tests/plan_snapshots/main.rs b/crates/vite_task_plan/tests/plan_snapshots/main.rs index 78641920..11b7bab1 100644 --- a/crates/vite_task_plan/tests/plan_snapshots/main.rs +++ b/crates/vite_task_plan/tests/plan_snapshots/main.rs @@ -187,12 +187,23 @@ fn run_case_inner( let plan = match plan_result { Ok(graph) => ExecutionPlan::from_execution_graph(graph), Err(err) => { + // Format the full error chain using anyhow's `{:#}` formatter + // and redact workspace paths for snapshot stability. + let anyhow_err: anyhow::Error = err.into(); + let err_formatted = vite_str::format!("{anyhow_err:#}"); + let err_str = + err_formatted.as_str().cow_replace(workspace_root_str, ""); + let err_str = if cfg!(windows) { + err_str.as_ref().cow_replace('\\', "/") + } else { + err_str + }; #[expect( clippy::disallowed_macros, - reason = "insta::assert_debug_snapshot! internally uses std::format!" + reason = "insta::assert_snapshot! internally uses std::format!" )] { - insta::assert_debug_snapshot!(snapshot_name.as_str(), err); + insta::assert_snapshot!(snapshot_name.as_str(), err_str.as_ref()); } continue; } From 28de769ef0590adc069fac76d624473d1a0dce4a Mon Sep 17 00:00:00 2001 From: branchseer Date: Sat, 14 Feb 2026 23:48:33 +0800 Subject: [PATCH 07/11] refactor: replace petgraph Acyclic with custom Sync ExecutionGraph struct Replace the petgraph::acyclic::Acyclic type alias with a custom ExecutionGraph struct that owns a DiGraph and a pre-computed toposort. This eliminates the RefCell-based !Sync issue that required arc_with_non_send_sync suppressions. - Add try_from_graph() for validated construction with CycleError - Add inner() for access to the underlying DiGraph - Add toposort() returning cached &[ExecutionNodeIndex] - Add from_node_list() for building edge-free test graphs - Implement Deref for transparent read access - Remove petgraph::data::Build import and arc_with_non_send_sync expects - Replace Index impl with ExecutionPathItem::resolve() --- crates/vite_task/src/session/execute/mod.rs | 36 ++---- crates/vite_task/src/session/reporter.rs | 87 +++++-------- .../snapshots/cycle dependency error.snap | 5 +- crates/vite_task_plan/src/error.rs | 6 +- crates/vite_task_plan/src/execution_graph.rs | 118 ++++++++++++++++-- crates/vite_task_plan/src/lib.rs | 4 +- crates/vite_task_plan/src/plan.rs | 25 ++-- .../query - cycle dependency error.snap | 2 +- 8 files changed, 160 insertions(+), 123 deletions(-) diff --git a/crates/vite_task/src/session/execute/mod.rs b/crates/vite_task/src/session/execute/mod.rs index f2de47c8..73a37186 100644 --- a/crates/vite_task/src/session/execute/mod.rs +++ b/crates/vite_task/src/session/execute/mod.rs @@ -5,10 +5,7 @@ use std::{process::Stdio, sync::Arc}; use futures_util::FutureExt; use vite_path::AbsolutePath; -use vite_task_plan::{ - ExecutionGraph, ExecutionItemKind, LeafExecutionKind, SpawnExecution, - execution_graph::ExecutionNodeIndex, -}; +use vite_task_plan::{ExecutionGraph, ExecutionItemKind, LeafExecutionKind, SpawnExecution}; use self::{ fingerprint::PostRunFingerprint, @@ -59,13 +56,9 @@ struct ExecutionContext<'a> { impl ExecutionContext<'_> { /// Execute all tasks in an execution graph in dependency order. /// - /// Since `ExecutionGraph` is `Acyclic>`, the graph is guaranteed to have - /// no cycles at the type level. We use `nodes_iter()` which returns nodes in topological - /// order (for every edge A→B, A comes before B). - /// - /// In our graph, edges go from dependents to dependencies (A→B means "A depends on B"), - /// so `nodes_iter()` gives dependents before dependencies. We reverse this to execute - /// dependencies first. + /// `ExecutionGraph` guarantees acyclicity at construction time and caches a + /// topological order. We iterate `toposort()` in reverse to get execution order + /// (dependencies before dependents). /// /// The `path_prefix` tracks our position within nested execution graphs. For the /// root call this is an empty path; for nested `Expanded` items it carries the @@ -78,22 +71,15 @@ impl ExecutionContext<'_> { graph: &ExecutionGraph, path_prefix: &LeafExecutionPath, ) { - // `nodes_iter()` returns nodes in topological order: for every edge A→B, + // `toposort()` returns nodes in topological order: for every edge A→B, // A appears before B. Since our edges mean "A depends on B", dependencies - // (B) appear after their dependents (A). Reversing gives us execution order - // where dependencies run first. - // - // Collect into a Vec first since the `nodes_iter()` return type - // (`impl Iterator`) does not implement `DoubleEndedIterator`. - let mut node_indices: Vec = graph.nodes_iter().collect(); - node_indices.reverse(); + // (B) appear after their dependents (A). We iterate in reverse to get + // execution order where dependencies run first. // Execute tasks in dependency-first order. Each task may have multiple items // (from `&&`-split commands), which are executed sequentially. - for node_ix in node_indices { - // Access the inner DiGraph directly for node indexing, since the `Acyclic` - // wrapper's only `Index` impl is our custom `Index`. - let task_execution = &graph.inner()[node_ix]; + for &node_ix in graph.toposort().iter().rev() { + let task_execution = &graph[node_ix]; for (item_idx, item) in task_execution.items.iter().enumerate() { // Build the path for this item by appending to the prefix @@ -361,10 +347,6 @@ impl Session<'_> { // Wrap the graph in Arc so both the reporter and execution can reference it. // The reporter clones the Arc internally for display lookups. - // `Acyclic` uses `RefCell` internally for topological-order caching, making it - // `!Sync`. We still need `Arc` here because the reporter holds a clone for display - // lookups. The graph is only ever accessed from a single thread in practice. - #[expect(clippy::arc_with_non_send_sync, reason = "Acyclic but single-threaded use")] let graph = Arc::new(execution_graph); let mut reporter = builder.build(&graph); diff --git a/crates/vite_task/src/session/reporter.rs b/crates/vite_task/src/session/reporter.rs index db49e262..6b301774 100644 --- a/crates/vite_task/src/session/reporter.rs +++ b/crates/vite_task/src/session/reporter.rs @@ -23,7 +23,6 @@ use std::{ cell::RefCell, io::Write, - ops::Index, process::ExitStatus as StdExitStatus, rc::Rc, sync::{Arc, LazyLock}, @@ -95,17 +94,11 @@ struct ExecutionPathItem { task_execution_item_index: usize, } -/// Indexing a graph by a single `ExecutionPathItem` yields the [`ExecutionItem`] -/// at that position. -/// -/// Uses `.inner()` to access the underlying `DiGraph`'s `Index` impl, -/// since `Acyclic` delegates indexing through `Deref` and the compiler might -/// otherwise recurse into this very impl. -impl Index for ExecutionGraph { - type Output = ExecutionItem; - - fn index(&self, index: ExecutionPathItem) -> &Self::Output { - &self.inner()[index.graph_node_ix].items[index.task_execution_item_index] +impl ExecutionPathItem { + /// Resolve this path item against a graph, returning the [`ExecutionItem`] + /// at the identified position. + fn resolve(self, graph: &ExecutionGraph) -> &ExecutionItem { + &graph[self.graph_node_ix].items[self.task_execution_item_index] } } @@ -145,7 +138,7 @@ impl LeafExecutionPath { ) -> Option<&'a ExecutionItemDisplay> { let mut current_graph = root_graph; for (depth, path_item) in self.0.iter().enumerate() { - let item = ¤t_graph[*path_item]; + let item = path_item.resolve(current_graph); let is_last = depth == self.0.len() - 1; if is_last { // Last element — return the display info regardless of Leaf/Expanded @@ -930,7 +923,6 @@ fn print_summary( mod tests { use std::collections::BTreeMap; - use petgraph::data::Build; use vite_task_graph::display::TaskDisplay; use vite_task_plan::{ ExecutionItem, ExecutionItemDisplay, ExecutionItemKind, InProcessExecution, @@ -1030,58 +1022,51 @@ mod tests { #[test] fn count_spawn_leaves_single_spawn() { - let mut graph = ExecutionGraph::default(); - graph.add_node(spawn_task("build")); + let graph = ExecutionGraph::from_node_list([spawn_task("build")]); assert_eq!(count_spawn_leaves(&graph), 1); } #[test] fn count_spawn_leaves_multiple_spawns() { - let mut graph = ExecutionGraph::default(); - graph.add_node(spawn_task("build")); - graph.add_node(spawn_task("test")); - graph.add_node(spawn_task("lint")); + let graph = ExecutionGraph::from_node_list([ + spawn_task("build"), + spawn_task("test"), + spawn_task("lint"), + ]); assert_eq!(count_spawn_leaves(&graph), 3); } #[test] fn count_spawn_leaves_in_process_not_counted() { - let mut graph = ExecutionGraph::default(); - graph.add_node(in_process_task("echo")); + let graph = ExecutionGraph::from_node_list([in_process_task("echo")]); assert_eq!(count_spawn_leaves(&graph), 0); } #[test] fn count_spawn_leaves_mixed_spawn_and_in_process() { - let mut graph = ExecutionGraph::default(); - graph.add_node(spawn_task("build")); - graph.add_node(in_process_task("echo")); + let graph = ExecutionGraph::from_node_list([spawn_task("build"), in_process_task("echo")]); assert_eq!(count_spawn_leaves(&graph), 1); } #[test] fn count_spawn_leaves_nested_expanded() { // Build a nested graph containing two spawns - let mut nested = ExecutionGraph::default(); - nested.add_node(spawn_task("nested-build")); - nested.add_node(spawn_task("nested-test")); + let nested = + ExecutionGraph::from_node_list([spawn_task("nested-build"), spawn_task("nested-test")]); // Outer graph has one expanded item pointing to the nested graph - let mut graph = ExecutionGraph::default(); - graph.add_node(expanded_task("expand", nested)); + let graph = ExecutionGraph::from_node_list([expanded_task("expand", nested)]); assert_eq!(count_spawn_leaves(&graph), 2); } #[test] fn count_spawn_leaves_nested_with_top_level() { // Nested graph with one spawn - let mut nested = ExecutionGraph::default(); - nested.add_node(spawn_task("nested-lint")); + let nested = ExecutionGraph::from_node_list([spawn_task("nested-lint")]); // Top-level graph has one spawn + one expanded - let mut graph = ExecutionGraph::default(); - graph.add_node(spawn_task("build")); - graph.add_node(expanded_task("expand", nested)); + let graph = + ExecutionGraph::from_node_list([spawn_task("build"), expanded_task("expand", nested)]); assert_eq!(count_spawn_leaves(&graph), 2); } @@ -1108,10 +1093,6 @@ mod tests { /// Build a `LabeledGraphReporter` for the given graph and return a leaf reporter /// for the first node's first item. fn build_labeled_leaf(graph: ExecutionGraph) -> Box { - #[expect( - clippy::arc_with_non_send_sync, - reason = "Acyclic but single-threaded test" - )] let graph_arc = Arc::new(graph); let builder = Box::new(LabeledReporterBuilder::new(Vec::::new(), test_path())); let mut reporter = builder.build(&graph_arc); @@ -1123,17 +1104,14 @@ mod tests { #[test] fn labeled_reporter_single_spawn_suggests_inherited() { - let mut graph = ExecutionGraph::default(); - graph.add_node(spawn_task("build")); + let graph = ExecutionGraph::from_node_list([spawn_task("build")]); let leaf = build_labeled_leaf(graph); assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Inherited); } #[test] fn labeled_reporter_multiple_spawns_suggests_null() { - let mut graph = ExecutionGraph::default(); - graph.add_node(spawn_task("build")); - graph.add_node(spawn_task("test")); + let graph = ExecutionGraph::from_node_list([spawn_task("build"), spawn_task("test")]); let leaf = build_labeled_leaf(graph); assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Null); } @@ -1143,8 +1121,7 @@ mod tests { // Zero spawn leaves → spawn_leaf_count == 0, so not == 1 → Null // This is correct: in-process executions don't spawn child processes, // so stdin suggestion doesn't apply to them. - let mut graph = ExecutionGraph::default(); - graph.add_node(in_process_task("echo")); + let graph = ExecutionGraph::from_node_list([in_process_task("echo")]); let leaf = build_labeled_leaf(graph); assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Null); } @@ -1152,9 +1129,7 @@ mod tests { #[test] fn labeled_reporter_one_spawn_one_in_process_suggests_inherited() { // One spawn leaf + one in-process → spawn_leaf_count == 1 → Inherited - let mut graph = ExecutionGraph::default(); - graph.add_node(spawn_task("build")); - graph.add_node(in_process_task("echo")); + let graph = ExecutionGraph::from_node_list([spawn_task("build"), in_process_task("echo")]); let leaf = build_labeled_leaf(graph); assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Inherited); } @@ -1162,11 +1137,9 @@ mod tests { #[test] fn labeled_reporter_nested_single_spawn_suggests_inherited() { // Nested graph with exactly one spawn - let mut nested = ExecutionGraph::default(); - nested.add_node(spawn_task("nested-build")); + let nested = ExecutionGraph::from_node_list([spawn_task("nested-build")]); - let mut graph = ExecutionGraph::default(); - graph.add_node(expanded_task("expand", nested)); + let graph = ExecutionGraph::from_node_list([expanded_task("expand", nested)]); let leaf = build_labeled_leaf(graph); assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Inherited); } @@ -1174,12 +1147,10 @@ mod tests { #[test] fn labeled_reporter_nested_multiple_spawns_suggests_null() { // Nested graph with two spawns - let mut nested = ExecutionGraph::default(); - nested.add_node(spawn_task("nested-a")); - nested.add_node(spawn_task("nested-b")); + let nested = + ExecutionGraph::from_node_list([spawn_task("nested-a"), spawn_task("nested-b")]); - let mut graph = ExecutionGraph::default(); - graph.add_node(expanded_task("expand", nested)); + let graph = ExecutionGraph::from_node_list([expanded_task("expand", nested)]); let leaf = build_labeled_leaf(graph); assert_eq!(leaf.stdin_suggestion(), StdinSuggestion::Null); } diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap index ef0765b6..6dc0a699 100644 --- a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/error_cycle_dependency/snapshots/cycle dependency error.snap @@ -3,7 +3,4 @@ source: crates/vite_task_bin/tests/e2e_snapshots/main.rs expression: e2e_outputs --- [1]> vp run task-a # task-a -> task-b -> task-a cycle -Error: Failed to plan execution - -Caused by: - Cycle dependency detected involving task error-cycle-dependency-test#task-b +Error: Cycle dependency detected involving task error-cycle-dependency-test#task-b diff --git a/crates/vite_task_plan/src/error.rs b/crates/vite_task_plan/src/error.rs index a685cc7a..51c07c2f 100644 --- a/crates/vite_task_plan/src/error.rs +++ b/crates/vite_task_plan/src/error.rs @@ -146,9 +146,9 @@ pub enum Error { /// A cycle was detected in the task dependency graph during planning. /// - /// This is caught when wrapping the built `DiGraph` in `petgraph::acyclic::Acyclic`, - /// which validates that the graph is a DAG. The contained `TaskDisplay` identifies - /// one of the tasks involved in the cycle. + /// This is caught by `ExecutionGraph::try_from_graph`, which validates that the + /// graph is a DAG. The contained `TaskDisplay` identifies one of the tasks + /// involved in the cycle. #[error("Cycle dependency detected involving task {0}")] CycleDependencyDetected(TaskDisplay), } diff --git a/crates/vite_task_plan/src/execution_graph.rs b/crates/vite_task_plan/src/execution_graph.rs index 3ddc69f9..214b0e46 100644 --- a/crates/vite_task_plan/src/execution_graph.rs +++ b/crates/vite_task_plan/src/execution_graph.rs @@ -1,4 +1,6 @@ -use petgraph::graph::{DefaultIx, EdgeIndex, IndexType, NodeIndex}; +use std::ops::Deref; + +use petgraph::graph::{DefaultIx, DiGraph, EdgeIndex, IndexType, NodeIndex}; use crate::TaskExecution; @@ -23,17 +25,113 @@ unsafe impl IndexType for ExecutionIx { pub type ExecutionNodeIndex = NodeIndex; pub type ExecutionEdgeIndex = EdgeIndex; -/// The inner directed graph type before acyclicity wrapping. -/// Used during graph construction in `plan_query_request` before validation. -type InnerExecutionGraph = petgraph::graph::DiGraph; +/// The inner directed graph type used for construction and storage. +pub(crate) type InnerExecutionGraph = DiGraph; + +/// Error returned by [`ExecutionGraph::try_from_graph`] when a cycle is detected. +/// +/// Contains the [`ExecutionNodeIndex`] of one node that participates in the cycle, +/// allowing the caller to look up task details for error reporting. +#[derive(Debug)] +pub struct CycleError { + node_id: ExecutionNodeIndex, +} + +impl CycleError { + /// Return a node index that participates in the detected cycle. + #[must_use] + pub const fn node_id(&self) -> ExecutionNodeIndex { + self.node_id + } +} /// A directed acyclic execution graph. /// -/// Wraps `petgraph::graph::DiGraph` in `petgraph::acyclic::Acyclic` to enforce at the -/// type level that the graph has no cycles. This guarantee is established at plan time -/// when the graph is constructed in `plan_query_request`, eliminating the need for -/// runtime cycle detection during execution. +/// Wraps a `petgraph::graph::DiGraph` with a compile-time guarantee that it contains +/// no cycles. The acyclicity invariant is validated at construction time by +/// [`try_from_graph`](Self::try_from_graph), which performs a topological sort and +/// caches the resulting order for later use. /// -/// `Acyclic` implements `Deref>`, so all read operations on the +/// Unlike `petgraph::acyclic::Acyclic`, this type is `Sync` (no internal `RefCell`), +/// so it can be safely shared via `Arc` across threads. +/// +/// The type implements `Deref>`, so all read operations on the /// inner graph (indexing, iteration, node/edge counts) work transparently. -pub type ExecutionGraph = petgraph::acyclic::Acyclic; +#[derive(Debug)] +pub struct ExecutionGraph { + /// The underlying directed graph. + graph: InnerExecutionGraph, + /// Pre-computed topological order of node indices. + /// For every edge A→B in the graph, A appears before B in this vector. + toposort: Vec, +} + +impl ExecutionGraph { + /// Validate that `graph` is acyclic and wrap it in an `ExecutionGraph`. + /// + /// Performs a topological sort using `petgraph::algo::toposort`. If the graph + /// contains a cycle, returns a [`CycleError`] identifying one node in the cycle. + /// On success, the topological order is cached so that subsequent calls to + /// [`toposort`](Self::toposort) are free. + /// + /// # Errors + /// + /// Returns [`CycleError`] if the graph contains a cycle. + pub fn try_from_graph(graph: InnerExecutionGraph) -> Result { + let toposort = petgraph::algo::toposort(&graph, None) + .map_err(|cycle| CycleError { node_id: cycle.node_id() })?; + Ok(Self { graph, toposort }) + } + + /// Return a reference to the underlying `DiGraph`. + /// + /// Useful when an API requires `&DiGraph` explicitly (e.g. `vite_graph_ser::serialize_by_key`). + #[must_use] + pub const fn inner(&self) -> &InnerExecutionGraph { + &self.graph + } + + /// Build an `ExecutionGraph` from an iterator of task executions with no edges. + /// + /// Each task execution becomes an independent node in the graph. Since there are + /// no edges, the graph is trivially acyclic. + pub fn from_node_list(nodes: impl IntoIterator) -> Self { + let mut graph = InnerExecutionGraph::default(); + let mut toposort = Vec::new(); + for node in nodes { + toposort.push(graph.add_node(node)); + } + Self { graph, toposort } + } + + /// Return the pre-computed topological order of node indices. + /// + /// For every edge A→B in the graph, A appears before B in the returned slice. + /// Since our edges mean "A depends on B", dependents come before their + /// dependencies. Callers that need execution order (dependencies first) should + /// iterate in reverse. + #[must_use] + pub fn toposort(&self) -> &[ExecutionNodeIndex] { + &self.toposort + } +} + +impl Default for ExecutionGraph { + /// Create an empty `ExecutionGraph` (no nodes, no edges). + /// + /// An empty graph is trivially acyclic. + fn default() -> Self { + Self { graph: InnerExecutionGraph::default(), toposort: Vec::new() } + } +} + +/// Deref to the inner `DiGraph` so that read-only graph operations +/// (`node_count()`, `node_weights()`, `node_indices()`, indexing by `NodeIndex`, etc.) +/// work transparently on `ExecutionGraph`. +impl Deref for ExecutionGraph { + type Target = InnerExecutionGraph; + + fn deref(&self) -> &Self::Target { + &self.graph + } +} diff --git a/crates/vite_task_plan/src/lib.rs b/crates/vite_task_plan/src/lib.rs index e24304d1..c4f1cd25 100644 --- a/crates/vite_task_plan/src/lib.rs +++ b/crates/vite_task_plan/src/lib.rs @@ -138,10 +138,10 @@ pub enum LeafExecutionKind { InProcess(InProcessExecution), } -/// Serialize an `ExecutionGraph` (which is `Acyclic>`) using `serialize_by_key`. +/// Serialize an `ExecutionGraph` using `serialize_by_key`. /// /// `vite_graph_ser::serialize_by_key` expects `&DiGraph`, so we call `.inner()` -/// on the `Acyclic` wrapper to get the underlying `DiGraph` reference. +/// to get the underlying `DiGraph` reference. fn serialize_execution_graph_by_key( graph: &ExecutionGraph, serializer: S, diff --git a/crates/vite_task_plan/src/plan.rs b/crates/vite_task_plan/src/plan.rs index b8683d23..9aa6fa79 100644 --- a/crates/vite_task_plan/src/plan.rs +++ b/crates/vite_task_plan/src/plan.rs @@ -30,16 +30,12 @@ use crate::{ cache_metadata::{CacheMetadata, ExecutionCacheKey, ProgramFingerprint, SpawnFingerprint}, envs::EnvFingerprints, error::{CdCommandError, Error, PathFingerprintError, PathFingerprintErrorKind, PathType}, - execution_graph::{ExecutionGraph, ExecutionNodeIndex}, + execution_graph::{ExecutionGraph, ExecutionNodeIndex, InnerExecutionGraph}, in_process::InProcessExecution, path_env::get_path_env, plan_request::{PlanRequest, QueryPlanRequest, ScriptCommand, SyntheticPlanRequest}, }; -/// Type alias for the inner graph used during construction, before acyclicity validation. -type InnerExecutionGraph = - petgraph::graph::DiGraph; - /// Locate the executable path for a given program name in the provided envs and cwd. fn which( program: &Arc, @@ -504,8 +500,8 @@ fn plan_spawn_execution( /// Expand the parsed task request (like `run -r build`/`lint`) into an execution graph. /// -/// Builds a `DiGraph` of task executions, then validates it is acyclic by wrapping -/// in `petgraph::acyclic::Acyclic`. Returns `CycleDependencyDetected` if a cycle is found. +/// Builds a `DiGraph` of task executions, then validates it is acyclic via +/// `ExecutionGraph::try_from_graph`. Returns `CycleDependencyDetected` if a cycle is found. #[expect(clippy::future_not_send, reason = "PlanContext contains !Send dyn PlanRequestParser")] pub async fn plan_query_request( query_plan_request: QueryPlanRequest, @@ -547,18 +543,11 @@ pub async fn plan_query_request( ); } - // Validate the graph is acyclic by wrapping in `Acyclic`. - // `Acyclic::try_from` performs a topological sort; if a cycle is found, - // it returns `Cycle(node_id)` identifying one node in the cycle. - ExecutionGraph::try_from(inner_graph).map_err(|cycle| { + // Validate the graph is acyclic. + // `try_from_graph` performs a topological sort; if a cycle is found, + // it returns `CycleError` identifying one node in the cycle. + ExecutionGraph::try_from_graph(inner_graph).map_err(|cycle| { // Look up the human-readable task display for the node involved in the cycle. - // The cycle's node_id is still valid against the original inner_graph (which - // was moved into try_from but returned back on error — however, `Cycle` only - // contains the node index, not the graph). We stored the display info in our - // local map, so we can look it up by reversing the index mapping. - // - // Since we no longer have access to the inner_graph after the failed try_from, - // we find the TaskDisplay by searching our index map for the node index. let task_display = execution_node_indices_by_task_index.iter().find_map(|(task_idx, &exec_idx)| { if exec_idx == cycle.node_id() { diff --git a/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/query - cycle dependency error.snap b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/query - cycle dependency error.snap index b1309d5e..7d0db585 100644 --- a/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/query - cycle dependency error.snap +++ b/crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency/snapshots/query - cycle dependency error.snap @@ -3,4 +3,4 @@ source: crates/vite_task_plan/tests/plan_snapshots/main.rs expression: err_str.as_ref() input_file: crates/vite_task_plan/tests/plan_snapshots/fixtures/cycle-dependency --- -Failed to plan execution: Cycle dependency detected involving task cycle-dependency-test#task-b +Cycle dependency detected involving task cycle-dependency-test#task-b From 60f32ae8460555f607a85764a412451001b17e22 Mon Sep 17 00:00:00 2001 From: branchseer Date: Sat, 14 Feb 2026 17:05:47 +0800 Subject: [PATCH 08/11] refactor: split reporter into plain and labeled child modules Move PlainReporter and LabeledReporter family from reporter.rs into reporter/plain.rs and reporter/labeled.rs. Shared types, traits, and display helpers remain in reporter/mod.rs with pub use re-exports. --- .../vite_task/src/session/reporter/labeled.rs | 541 +++++++++++++++ .../session/{reporter.rs => reporter/mod.rs} | 627 +----------------- .../vite_task/src/session/reporter/plain.rs | 87 +++ 3 files changed, 646 insertions(+), 609 deletions(-) create mode 100644 crates/vite_task/src/session/reporter/labeled.rs rename crates/vite_task/src/session/{reporter.rs => reporter/mod.rs} (50%) create mode 100644 crates/vite_task/src/session/reporter/plain.rs diff --git a/crates/vite_task/src/session/reporter/labeled.rs b/crates/vite_task/src/session/reporter/labeled.rs new file mode 100644 index 00000000..c6d72ea2 --- /dev/null +++ b/crates/vite_task/src/session/reporter/labeled.rs @@ -0,0 +1,541 @@ +//! Labeled reporter family — graph-aware reporter with aggregation and summary. +//! +//! Provides the full reporter lifecycle: +//! - [`LabeledReporterBuilder`] → [`LabeledGraphReporter`] → [`LabeledLeafReporter`] +//! +//! Tracks statistics across multiple leaf executions, prints command lines with cache +//! status indicators, and renders a summary with per-task details at the end. + +use std::{ + cell::RefCell, io::Write, process::ExitStatus as StdExitStatus, rc::Rc, sync::Arc, + time::Duration, +}; + +use bstr::BString; +use owo_colors::Style; +use vite_path::AbsolutePath; +use vite_str::Str; +use vite_task_plan::{ExecutionGraph, ExecutionItemDisplay, ExecutionItemKind, LeafExecutionKind}; + +use super::{ + CACHE_MISS_STYLE, COMMAND_STYLE, ColorizeExt, ExitStatus, GraphExecutionReporter, + GraphExecutionReporterBuilder, LeafExecutionPath, LeafExecutionReporter, StdinSuggestion, + format_command_display, write_cache_hit_message, write_command_with_cache_status, + write_error_message, +}; +use crate::session::{ + cache::format_cache_status_summary, + event::{CacheStatus, CacheUpdateStatus, OutputKind, exit_status_to_code}, +}; + +/// Information tracked for each leaf execution, used in the final summary. +#[derive(Debug)] +struct ExecutionInfo { + /// Display info for this execution. `None` for displayless executions + /// (e.g., synthetics reached via nested expansion). + display: Option, + /// Cache status, determined at `start()`. + cache_status: CacheStatus, + /// Exit status from the process. `None` means no process was spawned (cache hit or in-process). + exit_status: Option, + /// Error message, set on error. + error_message: Option, +} + +/// Running statistics updated as leaf executions complete. +#[derive(Default)] +struct ExecutionStats { + cache_hits: usize, + cache_misses: usize, + cache_disabled: usize, + failed: usize, +} + +/// Mutable state shared between [`LabeledGraphReporter`] and its [`LabeledLeafReporter`] instances +/// via `Rc>`. +/// +/// This is safe because execution is single-threaded and sequential — only one leaf +/// reporter is active at a time. +struct SharedReporterState { + writer: W, + executions: Vec, + stats: ExecutionStats, + /// Total number of spawned leaf executions in the graph (including nested `Expanded` + /// subgraphs). Computed once at build time and used to determine the stdin suggestion: + /// inherited stdin is only suggested when there is exactly one spawn leaf. + spawn_leaf_count: usize, +} + +/// Count the total number of spawned leaf executions in an execution graph, +/// recursing into nested `Expanded` subgraphs. +/// +/// In-process executions are not counted because they don't spawn child processes +/// and thus don't need stdin. +pub(super) fn count_spawn_leaves(graph: &ExecutionGraph) -> usize { + graph + .node_weights() + .flat_map(|task| task.items.iter()) + .map(|item| match &item.kind { + ExecutionItemKind::Leaf(LeafExecutionKind::Spawn(_)) => 1, + ExecutionItemKind::Leaf(LeafExecutionKind::InProcess(_)) => 0, + ExecutionItemKind::Expanded(nested_graph) => count_spawn_leaves(nested_graph), + }) + .sum() +} + +/// Builder for the labeled graph reporter. +/// +/// Created by the caller before execution, then transitioned to [`LabeledGraphReporter`] +/// by calling `build()` with the execution graph. +/// +/// # Output Modes +/// +/// ## Normal Mode (default) +/// - Prints command lines with cache status indicators during execution +/// - Shows full summary with Statistics and Task Details at the end +/// +/// ## Simplified Summary for Single Tasks +/// - When a single task with display info is executed: +/// - Skips full summary (no Statistics/Task Details sections) +/// - Shows only cache status inline +/// - Results in clean output showing just the command's stdout/stderr +pub struct LabeledReporterBuilder { + writer: W, + workspace_path: Arc, +} + +impl LabeledReporterBuilder { + /// Create a new labeled reporter builder. + /// + /// - `writer`: The output stream (typically `std::io::stdout()`). + /// - `workspace_path`: The workspace root, used to compute relative cwds in display. + pub const fn new(writer: W, workspace_path: Arc) -> Self { + Self { writer, workspace_path } + } +} + +impl GraphExecutionReporterBuilder for LabeledReporterBuilder { + fn build(self: Box, graph: &Arc) -> Box { + let spawn_leaf_count = count_spawn_leaves(graph); + Box::new(LabeledGraphReporter { + shared: Rc::new(RefCell::new(SharedReporterState { + writer: self.writer, + executions: Vec::new(), + stats: ExecutionStats::default(), + spawn_leaf_count, + })), + graph: Arc::clone(graph), + workspace_path: self.workspace_path, + }) + } +} + +/// Graph-level reporter that tracks multiple leaf executions and prints a summary. +/// +/// Creates [`LabeledLeafReporter`] instances for each leaf execution. The leaf reporters +/// share mutable state with this reporter via `Rc>`. +pub struct LabeledGraphReporter { + shared: Rc>>, + graph: Arc, + workspace_path: Arc, +} + +impl GraphExecutionReporter for LabeledGraphReporter { + fn new_leaf_execution(&mut self, path: &LeafExecutionPath) -> Box { + // Look up display info from the graph using the path + let display = path.resolve_display(&self.graph).cloned(); + Box::new(LabeledLeafReporter { + shared: Rc::clone(&self.shared), + display, + workspace_path: Arc::clone(&self.workspace_path), + started: false, + is_cache_hit: false, + }) + } + + fn finish(self: Box) -> Result<(), ExitStatus> { + let mut shared = self.shared.borrow_mut(); + + // Print summary. + // Special case: single execution without display info (e.g., synthetic via nested expansion) + // → skip summary since there's nothing meaningful to show. + let is_single_displayless = + shared.executions.len() == 1 && shared.executions[0].display.is_none(); + if !is_single_displayless { + // Destructure to get simultaneous mutable access to writer and immutable + // access to executions/stats, satisfying the borrow checker. + let SharedReporterState { ref mut writer, ref executions, ref stats, .. } = *shared; + print_summary(writer, executions, stats, &self.workspace_path); + } + + // Determine exit code based on failed tasks and infrastructure errors: + // - Infrastructure errors (cache lookup, spawn failure) have error_message set + // but no meaningful exit_status. + // - Process failures have a non-zero exit_status. + // + // Rules: + // 1. No failures at all → Ok(()) + // 2. Exactly one process failure, no infra errors → use that task's exit code + // 3. Any infra errors, or multiple failures → Err(1) + let has_infra_errors = shared.executions.iter().any(|exec| exec.error_message.is_some()); + + let failed_exit_codes: Vec = shared + .executions + .iter() + .filter_map(|exec| exec.exit_status.as_ref()) + .filter(|status| !status.success()) + .map(|status| exit_status_to_code(*status)) + .collect(); + + match (has_infra_errors, failed_exit_codes.as_slice()) { + (false, []) => Ok(()), + (false, [code]) => { + // Return the single failed task's exit code (clamped to u8 range) + #[expect( + clippy::cast_sign_loss, + reason = "value is clamped to 1..=255, always positive" + )] + Err(ExitStatus((*code).clamp(1, 255) as u8)) + } + _ => Err(ExitStatus::FAILURE), + } + } +} + +/// Leaf-level reporter created by [`LabeledGraphReporter::new_leaf_execution`]. +/// +/// Writes output in real-time to the shared writer and updates shared stats/errors +/// via `Rc>`. +struct LabeledLeafReporter { + shared: Rc>>, + /// Display info for this execution, looked up from the graph via the path. + display: Option, + workspace_path: Arc, + /// Whether `start()` has been called. Used to determine if stats should be updated + /// in `finish()` and whether to push an `ExecutionInfo` entry. + started: bool, + /// Whether the current execution is a cache hit, set by `start()`. + is_cache_hit: bool, +} + +impl LeafExecutionReporter for LabeledLeafReporter { + fn stdin_suggestion(&self) -> StdinSuggestion { + // Only suggest inherited stdin when the graph has exactly one spawned leaf + // execution. With multiple spawned tasks, stdin should not be shared — each + // task gets /dev/null to avoid contention. + let shared = self.shared.borrow(); + if shared.spawn_leaf_count == 1 { + StdinSuggestion::Inherited + } else { + StdinSuggestion::Null + } + } + + fn start(&mut self, cache_status: CacheStatus) { + self.started = true; + self.is_cache_hit = matches!(cache_status, CacheStatus::Hit { .. }); + + let mut shared = self.shared.borrow_mut(); + + // Update statistics based on cache status + match &cache_status { + CacheStatus::Hit { .. } => shared.stats.cache_hits += 1, + CacheStatus::Miss(_) => shared.stats.cache_misses += 1, + CacheStatus::Disabled(_) => shared.stats.cache_disabled += 1, + } + + // Print command line with cache status (if display info is available) + if let Some(ref display) = self.display { + write_command_with_cache_status( + &mut shared.writer, + display, + &self.workspace_path, + &cache_status, + ); + } + + // Store execution info for the summary + shared.executions.push(ExecutionInfo { + display: self.display.clone(), + cache_status, + exit_status: None, + error_message: None, + }); + } + + fn output(&mut self, _kind: OutputKind, content: BString) { + let mut shared = self.shared.borrow_mut(); + let _ = shared.writer.write_all(&content); + let _ = shared.writer.flush(); + } + + fn finish( + self: Box, + status: Option, + _cache_update_status: CacheUpdateStatus, + error: Option, + ) { + let mut shared = self.shared.borrow_mut(); + + // Handle errors + if let Some(ref message) = error { + write_error_message(&mut shared.writer, message); + + // Update the execution info if start() was called (an entry was pushed). + // Without the `self.started` guard, `last_mut()` would return a + // *different* execution's entry, corrupting its error_message. + if self.started + && let Some(exec) = shared.executions.last_mut() + { + exec.error_message = Some(message.clone()); + } + + shared.stats.failed += 1; + } + + // Update failure statistics for non-zero exit status (not an error, just a failed task) + // None means success (cache hit or in-process), Some checks the actual exit status + if error.is_none() && status.is_some_and(|s| !s.success()) { + shared.stats.failed += 1; + } + + // Update execution info with exit status (if start() was called and an entry exists) + if self.started + && let Some(exec) = shared.executions.last_mut() + { + exec.exit_status = status; + } + + // For executions without display info (synthetics via nested expansion) that are + // cache hits, print the cache hit message + if self.started && self.display.is_none() && self.is_cache_hit { + write_cache_hit_message(&mut shared.writer); + } + + // Add a trailing newline after each task's output for readability. + // Skip if start() was never called (e.g. cache lookup failure) — there's + // no task output to separate. + if self.started { + let _ = writeln!(shared.writer); + } + } +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Summary printing +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// Print the full execution summary with statistics, performance, and per-task details. +/// +/// Called by [`LabeledGraphReporter::finish`] after all tasks have executed. +/// Infrastructure errors and task failures are included in the summary. +#[expect( + clippy::too_many_lines, + reason = "summary formatting is inherently verbose with many write calls" +)] +fn print_summary( + writer: &mut impl Write, + executions: &[ExecutionInfo], + stats: &ExecutionStats, + workspace_path: &AbsolutePath, +) { + let total = executions.len(); + let cache_hits = stats.cache_hits; + let cache_misses = stats.cache_misses; + let cache_disabled = stats.cache_disabled; + let failed = stats.failed; + + // Print summary header with decorative line + // Note: leaf finish already adds a trailing newline after each task's output + // Add an extra blank line before the summary for visual separation + let _ = writeln!(writer); + let _ = writeln!( + writer, + "{}", + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) + ); + let _ = writeln!( + writer, + "{}", + " Vite+ Task Runner • Execution Summary".style(Style::new().bold().bright_white()) + ); + let _ = writeln!( + writer, + "{}", + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) + ); + let _ = writeln!(writer); + + // Print statistics + let cache_disabled_str = if cache_disabled > 0 { + Str::from( + vite_str::format!("• {cache_disabled} cache disabled") + .style(Style::new().bright_black()) + .to_string(), + ) + } else { + Str::default() + }; + + let failed_str = if failed > 0 { + Str::from(vite_str::format!("• {failed} failed").style(Style::new().red()).to_string()) + } else { + Str::default() + }; + + // Build statistics line, only including non-empty parts + // Note: trailing space after "cache misses" is intentional for consistent formatting + let _ = write!( + writer, + "{} {} {} {} ", + "Statistics:".style(Style::new().bold()), + vite_str::format!(" {total} tasks").style(Style::new().bright_white()), + vite_str::format!("• {cache_hits} cache hits").style(Style::new().green()), + vite_str::format!("• {cache_misses} cache misses").style(CACHE_MISS_STYLE), + ); + if !cache_disabled_str.is_empty() { + let _ = write!(writer, "{cache_disabled_str} "); + } + if !failed_str.is_empty() { + let _ = write!(writer, "{failed_str} "); + } + let _ = writeln!(writer); + + // Calculate cache hit rate + let cache_rate = if total > 0 { + #[expect( + clippy::cast_possible_truncation, + reason = "percentage is always 0..=100, fits in u32" + )] + #[expect(clippy::cast_sign_loss, reason = "percentage is always non-negative")] + #[expect( + clippy::cast_precision_loss, + reason = "acceptable precision loss for display percentage" + )] + { + (f64::from(cache_hits as u32) / total as f64 * 100.0) as u32 + } + } else { + 0 + }; + + // Calculate total time saved from cache hits + let total_saved: Duration = executions + .iter() + .filter_map(|exec| { + if let CacheStatus::Hit { replayed_duration } = &exec.cache_status { + Some(*replayed_duration) + } else { + None + } + }) + .sum(); + + let _ = write!( + writer, + "{} {} cache hit rate", + "Performance:".style(Style::new().bold()), + format_args!("{cache_rate}%").style(if cache_rate >= 75 { + Style::new().green().bold() + } else if cache_rate >= 50 { + CACHE_MISS_STYLE + } else { + Style::new().red() + }) + ); + + if total_saved > Duration::ZERO { + let _ = write!( + writer, + ", {:.2?} saved in total", + total_saved.style(Style::new().green().bold()) + ); + } + let _ = writeln!(writer); + let _ = writeln!(writer); + + // Detailed task results + let _ = writeln!(writer, "{}", "Task Details:".style(Style::new().bold())); + let _ = writeln!( + writer, + "{}", + "────────────────────────────────────────────────".style(Style::new().bright_black()) + ); + + for (idx, exec) in executions.iter().enumerate() { + // Skip executions without display info (they have nothing to show in the summary) + let Some(ref display) = exec.display else { + continue; + }; + + let task_display = &display.task_display; + + // Task name and index + let _ = write!( + writer, + " {} {}", + vite_str::format!("[{}]", idx + 1).style(Style::new().bright_black()), + task_display.to_string().style(Style::new().bright_white().bold()) + ); + + // Command with cwd prefix + let command_display = format_command_display(display, workspace_path); + let _ = write!(writer, ": {}", command_display.style(COMMAND_STYLE)); + + // Execution result icon + // None means success (cache hit or in-process), Some checks actual status + match &exec.exit_status { + None => { + let _ = write!(writer, " {}", "✓".style(Style::new().green().bold())); + } + Some(exit_status) if exit_status.success() => { + let _ = write!(writer, " {}", "✓".style(Style::new().green().bold())); + } + Some(exit_status) => { + let code = exit_status_to_code(*exit_status); + let _ = write!( + writer, + " {} {}", + "✗".style(Style::new().red().bold()), + vite_str::format!("(exit code: {code})").style(Style::new().red()) + ); + } + } + let _ = writeln!(writer); + + // Cache status details — use display module for plain text, apply styling here + let cache_summary = format_cache_status_summary(&exec.cache_status); + let styled_summary = match &exec.cache_status { + CacheStatus::Hit { .. } => cache_summary.style(Style::new().green()), + CacheStatus::Miss(_) => cache_summary.style(CACHE_MISS_STYLE), + CacheStatus::Disabled(_) => cache_summary.style(Style::new().bright_black()), + }; + let _ = writeln!(writer, " {styled_summary}"); + + // Error message if present + if let Some(ref error_msg) = exec.error_message { + let _ = writeln!( + writer, + " {} {}", + "✗ Error:".style(Style::new().red().bold()), + error_msg.style(Style::new().red()) + ); + } + + // Add spacing between tasks except for the last one + if idx < executions.len() - 1 { + let _ = writeln!( + writer, + " {}", + "·······················································" + .style(Style::new().bright_black()) + ); + } + } + + let _ = writeln!( + writer, + "{}", + "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) + ); +} diff --git a/crates/vite_task/src/session/reporter.rs b/crates/vite_task/src/session/reporter/mod.rs similarity index 50% rename from crates/vite_task/src/session/reporter.rs rename to crates/vite_task/src/session/reporter/mod.rs index 6b301774..84aaa70e 100644 --- a/crates/vite_task/src/session/reporter.rs +++ b/crates/vite_task/src/session/reporter/mod.rs @@ -11,37 +11,38 @@ //! 3. [`LeafExecutionReporter`] — handles events for a single leaf execution (output streaming, //! cache status, errors). Finalized with `finish()`. //! -//! Two concrete implementations are provided: +//! Two concrete implementations are provided in child modules: //! -//! - [`PlainReporter`] — a standalone [`LeafExecutionReporter`] for single-leaf execution +//! - [`plain::PlainReporter`] — a standalone [`LeafExecutionReporter`] for single-leaf execution //! (e.g., `execute_synthetic`). Self-contained, no shared state, no summary. //! -//! - [`LabeledReporterBuilder`] / [`LabeledGraphReporter`] / [`LabeledLeafReporter`] — a full -//! graph-aware reporter family. Tracks stats across multiple leaf executions, prints command -//! lines with cache status, and renders a summary at the end. +//! - [`labeled::LabeledReporterBuilder`] / [`labeled::LabeledGraphReporter`] / +//! `LabeledLeafReporter` — a full graph-aware reporter family. Tracks stats across multiple +//! leaf executions, prints command lines with cache status, and renders a summary at the end. +mod labeled; +mod plain; + +// Re-export the concrete implementations so callers can use `reporter::PlainReporter` +// and `reporter::LabeledReporterBuilder` without navigating into child modules. use std::{ - cell::RefCell, io::Write, process::ExitStatus as StdExitStatus, - rc::Rc, sync::{Arc, LazyLock}, - time::Duration, }; use bstr::BString; +pub use labeled::LabeledReporterBuilder; use owo_colors::{Style, Styled}; +pub use plain::PlainReporter; use smallvec::SmallVec; use vite_path::AbsolutePath; use vite_str::Str; -use vite_task_plan::{ - ExecutionGraph, ExecutionItem, ExecutionItemDisplay, ExecutionItemKind, LeafExecutionKind, - execution_graph::ExecutionNodeIndex, -}; +use vite_task_plan::{ExecutionGraph, ExecutionItem, ExecutionItemDisplay, ExecutionItemKind}; use super::{ - cache::{format_cache_status_inline, format_cache_status_summary}, - event::{CacheStatus, CacheUpdateStatus, OutputKind, exit_status_to_code}, + cache::format_cache_status_inline, + event::{CacheStatus, CacheUpdateStatus, OutputKind}, }; // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ @@ -94,6 +95,8 @@ struct ExecutionPathItem { task_execution_item_index: usize, } +use vite_task_plan::execution_graph::ExecutionNodeIndex; + impl ExecutionPathItem { /// Resolve this path item against a graph, returning the [`ExecutionItem`] /// at the identified position. @@ -320,601 +323,6 @@ fn write_cache_hit_message(writer: &mut impl Write) { writeln!(writer, "{}", "✓ cache hit, logs replayed".style(Style::new().green().dimmed())); } -// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -// PlainReporter — standalone LeafExecutionReporter for single-leaf execution -// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - -/// A self-contained [`LeafExecutionReporter`] for single-leaf executions -/// (e.g., `execute_synthetic`). -/// -/// This reporter: -/// - Owns its writer directly (no `Rc` shared state) -/// - Has no display info (synthetic executions have no task display) -/// - Does not track stats or print summaries -/// - Supports `silent_if_cache_hit` to suppress output for cached executions -/// -/// The exit status is determined by the caller from the `execute_spawn` return value, -/// not from the reporter. -pub struct PlainReporter { - writer: W, - /// When true, suppresses all output (command line, process output, cache hit message) - /// for executions that are cache hits. - silent_if_cache_hit: bool, - /// Whether the current execution is a cache hit, set by `start()`. - is_cache_hit: bool, -} - -impl PlainReporter { - /// Create a new plain reporter. - /// - /// - `writer`: The output stream (typically `std::io::stdout()`). - /// - `silent_if_cache_hit`: If true, suppress all output when the execution is a cache hit. - pub const fn new(writer: W, silent_if_cache_hit: bool) -> Self { - Self { writer, silent_if_cache_hit, is_cache_hit: false } - } - - /// Returns true if output should be suppressed for this execution. - const fn is_silent(&self) -> bool { - self.silent_if_cache_hit && self.is_cache_hit - } -} - -impl LeafExecutionReporter for PlainReporter { - fn stdin_suggestion(&self) -> StdinSuggestion { - // PlainReporter is used for single-leaf synthetic executions (e.g., auto-install). - // Always suggest inherited stdin so the spawned process can be interactive. - StdinSuggestion::Inherited - } - - fn start(&mut self, cache_status: CacheStatus) { - self.is_cache_hit = matches!(cache_status, CacheStatus::Hit { .. }); - // PlainReporter has no display info (synthetic executions), - // so there's no command line to print at start. - } - - fn output(&mut self, _kind: OutputKind, content: BString) { - if self.is_silent() { - return; - } - let _ = self.writer.write_all(&content); - let _ = self.writer.flush(); - } - - fn finish( - mut self: Box, - _status: Option, - _cache_update_status: CacheUpdateStatus, - error: Option, - ) { - // Handle errors — print inline error message - if let Some(ref message) = error { - write_error_message(&mut self.writer, message); - return; - } - - // For cache hits, print the "cache hit" message (unless silent) - if self.is_cache_hit && !self.is_silent() { - write_cache_hit_message(&mut self.writer); - } - } -} - -// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -// LabeledReporter family — graph-aware reporter with aggregation and summary -// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - -/// Information tracked for each leaf execution, used in the final summary. -#[derive(Debug)] -struct ExecutionInfo { - /// Display info for this execution. `None` for displayless executions - /// (e.g., synthetics reached via nested expansion). - display: Option, - /// Cache status, determined at `start()`. - cache_status: CacheStatus, - /// Exit status from the process. `None` means no process was spawned (cache hit or in-process). - exit_status: Option, - /// Error message, set on error. - error_message: Option, -} - -/// Running statistics updated as leaf executions complete. -#[derive(Default)] -struct ExecutionStats { - cache_hits: usize, - cache_misses: usize, - cache_disabled: usize, - failed: usize, -} - -/// Mutable state shared between [`LabeledGraphReporter`] and its [`LabeledLeafReporter`] instances -/// via `Rc>`. -/// -/// This is safe because execution is single-threaded and sequential — only one leaf -/// reporter is active at a time. -struct SharedReporterState { - writer: W, - executions: Vec, - stats: ExecutionStats, - /// Total number of spawned leaf executions in the graph (including nested `Expanded` - /// subgraphs). Computed once at build time and used to determine the stdin suggestion: - /// inherited stdin is only suggested when there is exactly one spawn leaf. - spawn_leaf_count: usize, -} - -/// Count the total number of spawned leaf executions in an execution graph, -/// recursing into nested `Expanded` subgraphs. -/// -/// In-process executions are not counted because they don't spawn child processes -/// and thus don't need stdin. -fn count_spawn_leaves(graph: &ExecutionGraph) -> usize { - graph - .node_weights() - .flat_map(|task| task.items.iter()) - .map(|item| match &item.kind { - ExecutionItemKind::Leaf(LeafExecutionKind::Spawn(_)) => 1, - ExecutionItemKind::Leaf(LeafExecutionKind::InProcess(_)) => 0, - ExecutionItemKind::Expanded(nested_graph) => count_spawn_leaves(nested_graph), - }) - .sum() -} - -/// Builder for the labeled graph reporter. -/// -/// Created by the caller before execution, then transitioned to [`LabeledGraphReporter`] -/// by calling `build()` with the execution graph. -/// -/// # Output Modes -/// -/// ## Normal Mode (default) -/// - Prints command lines with cache status indicators during execution -/// - Shows full summary with Statistics and Task Details at the end -/// -/// ## Simplified Summary for Single Tasks -/// - When a single task with display info is executed: -/// - Skips full summary (no Statistics/Task Details sections) -/// - Shows only cache status inline -/// - Results in clean output showing just the command's stdout/stderr -pub struct LabeledReporterBuilder { - writer: W, - workspace_path: Arc, -} - -impl LabeledReporterBuilder { - /// Create a new labeled reporter builder. - /// - /// - `writer`: The output stream (typically `std::io::stdout()`). - /// - `workspace_path`: The workspace root, used to compute relative cwds in display. - pub const fn new(writer: W, workspace_path: Arc) -> Self { - Self { writer, workspace_path } - } -} - -impl GraphExecutionReporterBuilder for LabeledReporterBuilder { - fn build(self: Box, graph: &Arc) -> Box { - let spawn_leaf_count = count_spawn_leaves(graph); - Box::new(LabeledGraphReporter { - shared: Rc::new(RefCell::new(SharedReporterState { - writer: self.writer, - executions: Vec::new(), - stats: ExecutionStats::default(), - spawn_leaf_count, - })), - graph: Arc::clone(graph), - workspace_path: self.workspace_path, - }) - } -} - -/// Graph-level reporter that tracks multiple leaf executions and prints a summary. -/// -/// Creates [`LabeledLeafReporter`] instances for each leaf execution. The leaf reporters -/// share mutable state with this reporter via `Rc>`. -pub struct LabeledGraphReporter { - shared: Rc>>, - graph: Arc, - workspace_path: Arc, -} - -impl GraphExecutionReporter for LabeledGraphReporter { - fn new_leaf_execution(&mut self, path: &LeafExecutionPath) -> Box { - // Look up display info from the graph using the path - let display = path.resolve_display(&self.graph).cloned(); - Box::new(LabeledLeafReporter { - shared: Rc::clone(&self.shared), - display, - workspace_path: Arc::clone(&self.workspace_path), - started: false, - is_cache_hit: false, - }) - } - - fn finish(self: Box) -> Result<(), ExitStatus> { - let mut shared = self.shared.borrow_mut(); - - // Print summary. - // Special case: single execution without display info (e.g., synthetic via nested expansion) - // → skip summary since there's nothing meaningful to show. - let is_single_displayless = - shared.executions.len() == 1 && shared.executions[0].display.is_none(); - if !is_single_displayless { - // Destructure to get simultaneous mutable access to writer and immutable - // access to executions/stats, satisfying the borrow checker. - let SharedReporterState { ref mut writer, ref executions, ref stats, .. } = *shared; - print_summary(writer, executions, stats, &self.workspace_path); - } - - // Determine exit code based on failed tasks and infrastructure errors: - // - Infrastructure errors (cache lookup, spawn failure) have error_message set - // but no meaningful exit_status. - // - Process failures have a non-zero exit_status. - // - // Rules: - // 1. No failures at all → Ok(()) - // 2. Exactly one process failure, no infra errors → use that task's exit code - // 3. Any infra errors, or multiple failures → Err(1) - let has_infra_errors = shared.executions.iter().any(|exec| exec.error_message.is_some()); - - let failed_exit_codes: Vec = shared - .executions - .iter() - .filter_map(|exec| exec.exit_status.as_ref()) - .filter(|status| !status.success()) - .map(|status| exit_status_to_code(*status)) - .collect(); - - match (has_infra_errors, failed_exit_codes.as_slice()) { - (false, []) => Ok(()), - (false, [code]) => { - // Return the single failed task's exit code (clamped to u8 range) - #[expect( - clippy::cast_sign_loss, - reason = "value is clamped to 1..=255, always positive" - )] - Err(ExitStatus((*code).clamp(1, 255) as u8)) - } - _ => Err(ExitStatus::FAILURE), - } - } -} - -/// Leaf-level reporter created by [`LabeledGraphReporter::new_leaf_execution`]. -/// -/// Writes output in real-time to the shared writer and updates shared stats/errors -/// via `Rc>`. -struct LabeledLeafReporter { - shared: Rc>>, - /// Display info for this execution, looked up from the graph via the path. - display: Option, - workspace_path: Arc, - /// Whether `start()` has been called. Used to determine if stats should be updated - /// in `finish()` and whether to push an `ExecutionInfo` entry. - started: bool, - /// Whether the current execution is a cache hit, set by `start()`. - is_cache_hit: bool, -} - -impl LeafExecutionReporter for LabeledLeafReporter { - fn stdin_suggestion(&self) -> StdinSuggestion { - // Only suggest inherited stdin when the graph has exactly one spawned leaf - // execution. With multiple spawned tasks, stdin should not be shared — each - // task gets /dev/null to avoid contention. - let shared = self.shared.borrow(); - if shared.spawn_leaf_count == 1 { - StdinSuggestion::Inherited - } else { - StdinSuggestion::Null - } - } - - fn start(&mut self, cache_status: CacheStatus) { - self.started = true; - self.is_cache_hit = matches!(cache_status, CacheStatus::Hit { .. }); - - let mut shared = self.shared.borrow_mut(); - - // Update statistics based on cache status - match &cache_status { - CacheStatus::Hit { .. } => shared.stats.cache_hits += 1, - CacheStatus::Miss(_) => shared.stats.cache_misses += 1, - CacheStatus::Disabled(_) => shared.stats.cache_disabled += 1, - } - - // Print command line with cache status (if display info is available) - if let Some(ref display) = self.display { - write_command_with_cache_status( - &mut shared.writer, - display, - &self.workspace_path, - &cache_status, - ); - } - - // Store execution info for the summary - shared.executions.push(ExecutionInfo { - display: self.display.clone(), - cache_status, - exit_status: None, - error_message: None, - }); - } - - fn output(&mut self, _kind: OutputKind, content: BString) { - let mut shared = self.shared.borrow_mut(); - let _ = shared.writer.write_all(&content); - let _ = shared.writer.flush(); - } - - fn finish( - self: Box, - status: Option, - _cache_update_status: CacheUpdateStatus, - error: Option, - ) { - let mut shared = self.shared.borrow_mut(); - - // Handle errors - if let Some(ref message) = error { - write_error_message(&mut shared.writer, message); - - // Update the execution info if start() was called (an entry was pushed). - // Without the `self.started` guard, `last_mut()` would return a - // *different* execution's entry, corrupting its error_message. - if self.started - && let Some(exec) = shared.executions.last_mut() - { - exec.error_message = Some(message.clone()); - } - - shared.stats.failed += 1; - } - - // Update failure statistics for non-zero exit status (not an error, just a failed task) - // None means success (cache hit or in-process), Some checks the actual exit status - if error.is_none() && status.is_some_and(|s| !s.success()) { - shared.stats.failed += 1; - } - - // Update execution info with exit status (if start() was called and an entry exists) - if self.started - && let Some(exec) = shared.executions.last_mut() - { - exec.exit_status = status; - } - - // For executions without display info (synthetics via nested expansion) that are - // cache hits, print the cache hit message - if self.started && self.display.is_none() && self.is_cache_hit { - write_cache_hit_message(&mut shared.writer); - } - - // Add a trailing newline after each task's output for readability. - // Skip if start() was never called (e.g. cache lookup failure) — there's - // no task output to separate. - if self.started { - let _ = writeln!(shared.writer); - } - } -} - -// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -// Summary printing -// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - -/// Print the full execution summary with statistics, performance, and per-task details. -/// -/// Called by [`LabeledGraphReporter::finish`] after all tasks have executed. -/// Infrastructure errors and task failures are included in the summary. -#[expect( - clippy::too_many_lines, - reason = "summary formatting is inherently verbose with many write calls" -)] -fn print_summary( - writer: &mut impl Write, - executions: &[ExecutionInfo], - stats: &ExecutionStats, - workspace_path: &AbsolutePath, -) { - let total = executions.len(); - let cache_hits = stats.cache_hits; - let cache_misses = stats.cache_misses; - let cache_disabled = stats.cache_disabled; - let failed = stats.failed; - - // Print summary header with decorative line - // Note: leaf finish already adds a trailing newline after each task's output - // Add an extra blank line before the summary for visual separation - let _ = writeln!(writer); - let _ = writeln!( - writer, - "{}", - "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) - ); - let _ = writeln!( - writer, - "{}", - " Vite+ Task Runner • Execution Summary".style(Style::new().bold().bright_white()) - ); - let _ = writeln!( - writer, - "{}", - "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) - ); - let _ = writeln!(writer); - - // Print statistics - let cache_disabled_str = if cache_disabled > 0 { - Str::from( - vite_str::format!("• {cache_disabled} cache disabled") - .style(Style::new().bright_black()) - .to_string(), - ) - } else { - Str::default() - }; - - let failed_str = if failed > 0 { - Str::from(vite_str::format!("• {failed} failed").style(Style::new().red()).to_string()) - } else { - Str::default() - }; - - // Build statistics line, only including non-empty parts - // Note: trailing space after "cache misses" is intentional for consistent formatting - let _ = write!( - writer, - "{} {} {} {} ", - "Statistics:".style(Style::new().bold()), - vite_str::format!(" {total} tasks").style(Style::new().bright_white()), - vite_str::format!("• {cache_hits} cache hits").style(Style::new().green()), - vite_str::format!("• {cache_misses} cache misses").style(CACHE_MISS_STYLE), - ); - if !cache_disabled_str.is_empty() { - let _ = write!(writer, "{cache_disabled_str} "); - } - if !failed_str.is_empty() { - let _ = write!(writer, "{failed_str} "); - } - let _ = writeln!(writer); - - // Calculate cache hit rate - let cache_rate = if total > 0 { - #[expect( - clippy::cast_possible_truncation, - reason = "percentage is always 0..=100, fits in u32" - )] - #[expect(clippy::cast_sign_loss, reason = "percentage is always non-negative")] - #[expect( - clippy::cast_precision_loss, - reason = "acceptable precision loss for display percentage" - )] - { - (f64::from(cache_hits as u32) / total as f64 * 100.0) as u32 - } - } else { - 0 - }; - - // Calculate total time saved from cache hits - let total_saved: Duration = executions - .iter() - .filter_map(|exec| { - if let CacheStatus::Hit { replayed_duration } = &exec.cache_status { - Some(*replayed_duration) - } else { - None - } - }) - .sum(); - - let _ = write!( - writer, - "{} {} cache hit rate", - "Performance:".style(Style::new().bold()), - format_args!("{cache_rate}%").style(if cache_rate >= 75 { - Style::new().green().bold() - } else if cache_rate >= 50 { - CACHE_MISS_STYLE - } else { - Style::new().red() - }) - ); - - if total_saved > Duration::ZERO { - let _ = write!( - writer, - ", {:.2?} saved in total", - total_saved.style(Style::new().green().bold()) - ); - } - let _ = writeln!(writer); - let _ = writeln!(writer); - - // Detailed task results - let _ = writeln!(writer, "{}", "Task Details:".style(Style::new().bold())); - let _ = writeln!( - writer, - "{}", - "────────────────────────────────────────────────".style(Style::new().bright_black()) - ); - - for (idx, exec) in executions.iter().enumerate() { - // Skip executions without display info (they have nothing to show in the summary) - let Some(ref display) = exec.display else { - continue; - }; - - let task_display = &display.task_display; - - // Task name and index - let _ = write!( - writer, - " {} {}", - vite_str::format!("[{}]", idx + 1).style(Style::new().bright_black()), - task_display.to_string().style(Style::new().bright_white().bold()) - ); - - // Command with cwd prefix - let command_display = format_command_display(display, workspace_path); - let _ = write!(writer, ": {}", command_display.style(COMMAND_STYLE)); - - // Execution result icon - // None means success (cache hit or in-process), Some checks actual status - match &exec.exit_status { - None => { - let _ = write!(writer, " {}", "✓".style(Style::new().green().bold())); - } - Some(exit_status) if exit_status.success() => { - let _ = write!(writer, " {}", "✓".style(Style::new().green().bold())); - } - Some(exit_status) => { - let code = exit_status_to_code(*exit_status); - let _ = write!( - writer, - " {} {}", - "✗".style(Style::new().red().bold()), - vite_str::format!("(exit code: {code})").style(Style::new().red()) - ); - } - } - let _ = writeln!(writer); - - // Cache status details — use display module for plain text, apply styling here - let cache_summary = format_cache_status_summary(&exec.cache_status); - let styled_summary = match &exec.cache_status { - CacheStatus::Hit { .. } => cache_summary.style(Style::new().green()), - CacheStatus::Miss(_) => cache_summary.style(CACHE_MISS_STYLE), - CacheStatus::Disabled(_) => cache_summary.style(Style::new().bright_black()), - }; - let _ = writeln!(writer, " {styled_summary}"); - - // Error message if present - if let Some(ref error_msg) = exec.error_message { - let _ = writeln!( - writer, - " {} {}", - "✗ Error:".style(Style::new().red().bold()), - error_msg.style(Style::new().red()) - ); - } - - // Add spacing between tasks except for the last one - if idx < executions.len() - 1 { - let _ = writeln!( - writer, - " {}", - "·······················································" - .style(Style::new().bright_black()) - ); - } - } - - let _ = writeln!( - writer, - "{}", - "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━".style(Style::new().bright_black()) - ); -} - // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ // Tests // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ @@ -930,6 +338,7 @@ mod tests { }; use super::*; + use crate::session::reporter::labeled::count_spawn_leaves; /// Create a dummy `AbsolutePath` for test fixtures. fn test_path() -> Arc { diff --git a/crates/vite_task/src/session/reporter/plain.rs b/crates/vite_task/src/session/reporter/plain.rs new file mode 100644 index 00000000..88a81942 --- /dev/null +++ b/crates/vite_task/src/session/reporter/plain.rs @@ -0,0 +1,87 @@ +//! Plain reporter — a standalone [`LeafExecutionReporter`] for single-leaf execution. +//! +//! Used for synthetic executions (e.g., auto-install) where there is no execution graph +//! and no summary is needed. Owns its writer directly with no shared state. + +use std::io::Write; + +use bstr::BString; +use vite_str::Str; + +use super::{LeafExecutionReporter, StdinSuggestion, write_cache_hit_message, write_error_message}; +use crate::session::event::{CacheStatus, CacheUpdateStatus, OutputKind}; + +/// A self-contained [`LeafExecutionReporter`] for single-leaf executions +/// (e.g., `execute_synthetic`). +/// +/// This reporter: +/// - Owns its writer directly (no `Rc` shared state) +/// - Has no display info (synthetic executions have no task display) +/// - Does not track stats or print summaries +/// - Supports `silent_if_cache_hit` to suppress output for cached executions +/// +/// The exit status is determined by the caller from the `execute_spawn` return value, +/// not from the reporter. +pub struct PlainReporter { + writer: W, + /// When true, suppresses all output (command line, process output, cache hit message) + /// for executions that are cache hits. + silent_if_cache_hit: bool, + /// Whether the current execution is a cache hit, set by `start()`. + is_cache_hit: bool, +} + +impl PlainReporter { + /// Create a new plain reporter. + /// + /// - `writer`: The output stream (typically `std::io::stdout()`). + /// - `silent_if_cache_hit`: If true, suppress all output when the execution is a cache hit. + pub const fn new(writer: W, silent_if_cache_hit: bool) -> Self { + Self { writer, silent_if_cache_hit, is_cache_hit: false } + } + + /// Returns true if output should be suppressed for this execution. + const fn is_silent(&self) -> bool { + self.silent_if_cache_hit && self.is_cache_hit + } +} + +impl LeafExecutionReporter for PlainReporter { + fn stdin_suggestion(&self) -> StdinSuggestion { + // PlainReporter is used for single-leaf synthetic executions (e.g., auto-install). + // Always suggest inherited stdin so the spawned process can be interactive. + StdinSuggestion::Inherited + } + + fn start(&mut self, cache_status: CacheStatus) { + self.is_cache_hit = matches!(cache_status, CacheStatus::Hit { .. }); + // PlainReporter has no display info (synthetic executions), + // so there's no command line to print at start. + } + + fn output(&mut self, _kind: OutputKind, content: BString) { + if self.is_silent() { + return; + } + let _ = self.writer.write_all(&content); + let _ = self.writer.flush(); + } + + fn finish( + mut self: Box, + _status: Option, + _cache_update_status: CacheUpdateStatus, + error: Option, + ) { + // Handle errors — print inline error message + if let Some(ref message) = error { + write_error_message(&mut self.writer, message); + return; + } + + // For cache hits, print the "cache hit" message (unless silent) + if self.is_cache_hit && !self.is_silent() { + write_cache_hit_message(&mut self.writer); + } + } +} From ccee3b76b179e417ba50f3024e440d4b6b71e88d Mon Sep 17 00:00:00 2001 From: branchseer Date: Sun, 15 Feb 2026 12:12:14 +0800 Subject: [PATCH 09/11] test: add e2e tests for topological execution order with -r and -t --- .../topological-execution-order/package.json | 4 +++ .../packages/app/package.json | 9 +++++ .../packages/core/package.json | 6 ++++ .../packages/lib/package.json | 9 +++++ .../pnpm-workspace.yaml | 2 ++ .../snapshots.toml | 22 +++++++++++++ ...d runs dependencies before dependents.snap | 33 +++++++++++++++++++ ... build from app runs all dependencies.snap | 33 +++++++++++++++++++ ...d from lib runs only its dependencies.snap | 27 +++++++++++++++ .../vite-task.json | 3 ++ 10 files changed, 148 insertions(+) create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/package.json create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/app/package.json create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/core/package.json create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/lib/package.json create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/pnpm-workspace.yaml create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots.toml create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/recursive build runs dependencies before dependents.snap create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/transitive build from app runs all dependencies.snap create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/transitive build from lib runs only its dependencies.snap create mode 100644 crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/vite-task.json diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/package.json b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/package.json new file mode 100644 index 00000000..7a6b40e1 --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/package.json @@ -0,0 +1,4 @@ +{ + "name": "topological-execution-order-test", + "private": true +} diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/app/package.json b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/app/package.json new file mode 100644 index 00000000..22001dac --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/app/package.json @@ -0,0 +1,9 @@ +{ + "name": "@topo/app", + "dependencies": { + "@topo/lib": "workspace:*" + }, + "scripts": { + "build": "echo 'Building app'" + } +} diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/core/package.json b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/core/package.json new file mode 100644 index 00000000..1ae14806 --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/core/package.json @@ -0,0 +1,6 @@ +{ + "name": "@topo/core", + "scripts": { + "build": "echo 'Building core'" + } +} diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/lib/package.json b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/lib/package.json new file mode 100644 index 00000000..181806cc --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/packages/lib/package.json @@ -0,0 +1,9 @@ +{ + "name": "@topo/lib", + "dependencies": { + "@topo/core": "workspace:*" + }, + "scripts": { + "build": "echo 'Building lib'" + } +} diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/pnpm-workspace.yaml b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/pnpm-workspace.yaml new file mode 100644 index 00000000..924b55f4 --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/pnpm-workspace.yaml @@ -0,0 +1,2 @@ +packages: + - packages/* diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots.toml b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots.toml new file mode 100644 index 00000000..fab682d8 --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots.toml @@ -0,0 +1,22 @@ +# Tests that tasks execute in dependency (topological) order. +# Dependency chain: @topo/core <- @topo/lib <- @topo/app + +[[e2e]] +name = "recursive build runs dependencies before dependents" +steps = [ + "vp run -r build # core -> lib -> app", +] + +[[e2e]] +name = "transitive build from app runs all dependencies" +cwd = "packages/app" +steps = [ + "vp run -t build # core -> lib -> app", +] + +[[e2e]] +name = "transitive build from lib runs only its dependencies" +cwd = "packages/lib" +steps = [ + "vp run -t build # core -> lib", +] diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/recursive build runs dependencies before dependents.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/recursive build runs dependencies before dependents.snap new file mode 100644 index 00000000..45d3c58c --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/recursive build runs dependencies before dependents.snap @@ -0,0 +1,33 @@ +--- +source: crates/vite_task_bin/tests/e2e_snapshots/main.rs +expression: e2e_outputs +--- +> vp run -r build # core -> lib -> app +~/packages/core$ echo 'Building core' ⊘ cache disabled: built-in command +Building core + +~/packages/lib$ echo 'Building lib' ⊘ cache disabled: built-in command +Building lib + +~/packages/app$ echo 'Building app' ⊘ cache disabled: built-in command +Building app + + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + Vite+ Task Runner • Execution Summary +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Statistics: 3 tasks • 0 cache hits • 0 cache misses • 3 cache disabled +Performance: 0% cache hit rate + +Task Details: +──────────────────────────────────────────────── + [1] @topo/core#build: ~/packages/core$ echo 'Building core' ✓ + → Cache disabled for built-in command + ······················································· + [2] @topo/lib#build: ~/packages/lib$ echo 'Building lib' ✓ + → Cache disabled for built-in command + ······················································· + [3] @topo/app#build: ~/packages/app$ echo 'Building app' ✓ + → Cache disabled for built-in command +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/transitive build from app runs all dependencies.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/transitive build from app runs all dependencies.snap new file mode 100644 index 00000000..b86b760b --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/transitive build from app runs all dependencies.snap @@ -0,0 +1,33 @@ +--- +source: crates/vite_task_bin/tests/e2e_snapshots/main.rs +expression: e2e_outputs +--- +> vp run -t build # core -> lib -> app +~/packages/core$ echo 'Building core' ⊘ cache disabled: built-in command +Building core + +~/packages/lib$ echo 'Building lib' ⊘ cache disabled: built-in command +Building lib + +~/packages/app$ echo 'Building app' ⊘ cache disabled: built-in command +Building app + + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + Vite+ Task Runner • Execution Summary +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Statistics: 3 tasks • 0 cache hits • 0 cache misses • 3 cache disabled +Performance: 0% cache hit rate + +Task Details: +──────────────────────────────────────────────── + [1] @topo/core#build: ~/packages/core$ echo 'Building core' ✓ + → Cache disabled for built-in command + ······················································· + [2] @topo/lib#build: ~/packages/lib$ echo 'Building lib' ✓ + → Cache disabled for built-in command + ······················································· + [3] @topo/app#build: ~/packages/app$ echo 'Building app' ✓ + → Cache disabled for built-in command +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/transitive build from lib runs only its dependencies.snap b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/transitive build from lib runs only its dependencies.snap new file mode 100644 index 00000000..b352d2da --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/snapshots/transitive build from lib runs only its dependencies.snap @@ -0,0 +1,27 @@ +--- +source: crates/vite_task_bin/tests/e2e_snapshots/main.rs +expression: e2e_outputs +--- +> vp run -t build # core -> lib +~/packages/core$ echo 'Building core' ⊘ cache disabled: built-in command +Building core + +~/packages/lib$ echo 'Building lib' ⊘ cache disabled: built-in command +Building lib + + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + Vite+ Task Runner • Execution Summary +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Statistics: 2 tasks • 0 cache hits • 0 cache misses • 2 cache disabled +Performance: 0% cache hit rate + +Task Details: +──────────────────────────────────────────────── + [1] @topo/core#build: ~/packages/core$ echo 'Building core' ✓ + → Cache disabled for built-in command + ······················································· + [2] @topo/lib#build: ~/packages/lib$ echo 'Building lib' ✓ + → Cache disabled for built-in command +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ diff --git a/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/vite-task.json b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/vite-task.json new file mode 100644 index 00000000..1d0fe9f2 --- /dev/null +++ b/crates/vite_task_bin/tests/e2e_snapshots/fixtures/topological-execution-order/vite-task.json @@ -0,0 +1,3 @@ +{ + "cacheScripts": true +} From 0392c28fefdaf84c085d99e70027f147c0a97654 Mon Sep 17 00:00:00 2001 From: branchseer Date: Sun, 15 Feb 2026 12:15:56 +0800 Subject: [PATCH 10/11] cargo shear --fix --- Cargo.lock | 1 - crates/vite_task/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5932fe33..63bdc15d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3877,7 +3877,6 @@ dependencies = [ "nix 0.30.1", "once_cell", "owo-colors", - "petgraph", "pty_terminal_test_client", "rayon", "rusqlite", diff --git a/crates/vite_task/Cargo.toml b/crates/vite_task/Cargo.toml index 131fcbb7..c1224d45 100644 --- a/crates/vite_task/Cargo.toml +++ b/crates/vite_task/Cargo.toml @@ -23,7 +23,6 @@ fspy = { workspace = true } futures-util = { workspace = true } once_cell = { workspace = true } owo-colors = { workspace = true } -petgraph = { workspace = true } pty_terminal_test_client = { workspace = true } rayon = { workspace = true } rusqlite = { workspace = true, features = ["bundled"] } From cd6aa790e036f5b0be6a7625b10ff6c26ec15e77 Mon Sep 17 00:00:00 2001 From: branchseer Date: Sun, 15 Feb 2026 12:53:26 +0800 Subject: [PATCH 11/11] fix: replace impossible fallbacks with expect/unreachable --- crates/vite_task/src/session/reporter/mod.rs | 13 ++++++------- crates/vite_task_plan/src/plan.rs | 18 ++++++++---------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/crates/vite_task/src/session/reporter/mod.rs b/crates/vite_task/src/session/reporter/mod.rs index 84aaa70e..f4cde301 100644 --- a/crates/vite_task/src/session/reporter/mod.rs +++ b/crates/vite_task/src/session/reporter/mod.rs @@ -153,13 +153,12 @@ impl LeafExecutionPath { current_graph = nested_graph; } ExecutionItemKind::Leaf(_) => { - // A Leaf in the middle of the path is a bug in path construction. - // This should never happen if the execution engine builds paths correctly. - debug_assert!( - false, - "LeafExecutionPath: intermediate element is a Leaf, expected Expanded" - ); - return None; + // A Leaf in the middle of the path means the execution engine + // pushed a Leaf node as an intermediate step, which is a bug — + // only Expanded items can contain nested graphs to descend into. + unreachable!( + "LeafExecutionPath: intermediate element at depth {depth} is a Leaf, expected Expanded" + ) } } } diff --git a/crates/vite_task_plan/src/plan.rs b/crates/vite_task_plan/src/plan.rs index 9aa6fa79..cb394079 100644 --- a/crates/vite_task_plan/src/plan.rs +++ b/crates/vite_task_plan/src/plan.rs @@ -548,21 +548,19 @@ pub async fn plan_query_request( // it returns `CycleError` identifying one node in the cycle. ExecutionGraph::try_from_graph(inner_graph).map_err(|cycle| { // Look up the human-readable task display for the node involved in the cycle. - let task_display = - execution_node_indices_by_task_index.iter().find_map(|(task_idx, &exec_idx)| { + // Every node in `inner_graph` was added via `inner_graph.add_node()` above, + // with a corresponding entry inserted into `execution_node_indices_by_task_index`. + // The cycle error's node_id comes from the same graph, so the lookup always succeeds. + let display = execution_node_indices_by_task_index + .iter() + .find_map(|(task_idx, &exec_idx)| { if exec_idx == cycle.node_id() { Some(context.indexed_task_graph().display_task(*task_idx)) } else { None } - }); - // If for some reason the node wasn't found in our map (shouldn't happen), - // fall back to a placeholder display. - let display = task_display.unwrap_or_else(|| vite_task_graph::display::TaskDisplay { - package_name: "unknown".into(), - task_name: "unknown".into(), - package_path: Arc::clone(context.workspace_path()), - }); + }) + .expect("cycle node must exist in execution_node_indices_by_task_index"); Error::CycleDependencyDetected(display) }) }