From 6165d94aee114d8e9bfd098f5f9f19d55a6715f5 Mon Sep 17 00:00:00 2001
From: matheusBBarni
Date: Sat, 11 Apr 2026 09:47:48 -0300
Subject: [PATCH 01/32] Add project-scoped setup and document settings flow
---
docs/PRD.md | 32 +-
docs/SPEC.md | 54 +-
src-tauri/src/lib.rs | 634 +++++++++--
src/App.tsx | 1289 ++++++++++++++--------
src/components/AppRail.tsx | 24 +-
src/components/CliHealthCard.tsx | 44 +
src/components/MainWorkspace.tsx | 37 +-
src/components/PrdEmptyState.tsx | 71 ++
src/components/ProjectAiSettingsCard.tsx | 126 +++
src/components/ProjectDocumentsCard.tsx | 98 ++
src/components/SettingsView.tsx | 173 +--
src/components/SpecEmptyState.tsx | 52 +-
src/components/StatusPill.tsx | 1 +
src/lib/appShell.ts | 8 +
src/lib/projectConfig.ts | 114 ++
src/lib/runtime.ts | 68 +-
src/screens/ConfigurationScreen.tsx | 276 +++++
src/store/useProjectStore.ts | 62 +-
src/store/useSettingsStore.ts | 34 +-
src/types.ts | 23 +
20 files changed, 2476 insertions(+), 744 deletions(-)
create mode 100644 src/components/CliHealthCard.tsx
create mode 100644 src/components/PrdEmptyState.tsx
create mode 100644 src/components/ProjectAiSettingsCard.tsx
create mode 100644 src/components/ProjectDocumentsCard.tsx
create mode 100644 src/lib/projectConfig.ts
create mode 100644 src/screens/ConfigurationScreen.tsx
diff --git a/docs/PRD.md b/docs/PRD.md
index 995cecc..8b4747c 100644
--- a/docs/PRD.md
+++ b/docs/PRD.md
@@ -2,7 +2,7 @@
## 1. Product Overview
-**SpecForge** is a spec review workspace for desktop-first development. It helps a user load a PRD and a technical spec, inspect workspace files, review environment readiness, draft a missing spec from AI when needed, and step through an execution-style dashboard before handing work off to a real IDE or CLI workflow.
+**SpecForge** is a setup-first review workspace for desktop-first development. It helps a user choose a project folder, persist project-scoped AI/document settings in `.specforge/settings.json`, inspect CLI readiness, draft missing PRD/spec documents from AI when needed, review workspace files, and step through an execution-style dashboard before handing work off to a real IDE or CLI workflow.
Today the product focuses on **review, import, diff inspection, and approval UX**. The execution loop shown in the app is currently a **simulated agent run**, not a real Claude CLI or Codex CLI orchestration engine.
@@ -14,29 +14,28 @@ Today the product focuses on **review, import, diff inspection, and approval UX*
## 3. Current User Flow
-1. **Open the review workspace:** The app starts with bundled `docs/PRD.md` and `docs/SPEC.md`.
-2. **Load documents:** The user can use pane-header `Load PRD` and `Load Spec` actions to import Markdown or PDF through the desktop native file picker, or Markdown through the browser fallback picker.
-3. **Open a workspace folder:** The user can scan a folder into the workspace tree. Desktop scanning respects `.gitignore`, and browser folder import applies root and nested `.gitignore` rules.
-4. **Handle missing documents:** If the opened workspace does not contain a PRD, the left pane should show a dedicated PRD empty state until the user loads a file or switches into edit mode. If the workspace does not contain a spec, the right pane should show either a generation state or a blocked state that asks for a PRD first.
-5. **Review and adjust:** The user can edit either document directly, switch between preview and edit, approve the spec from the spec pane header, and then prepare the execution flow.
-6. **Approve and run:** Once the spec is approved, the user can launch the execution dashboard in stepped, milestone, or god mode.
-7. **Inspect the result:** The app streams simulated terminal output, shows approval gates, and renders a diff based on the current git state when available.
+1. **Open the setup screen:** The app starts on a configuration flow instead of dropping directly into review.
+2. **Choose the project folder:** The user picks a workspace folder. If `.specforge/settings.json` already exists, SpecForge loads it immediately.
+3. **Review CLI status:** The user sees Claude CLI, Codex CLI, and Git health plus optional machine-local override paths.
+4. **Configure AI defaults:** The user chooses the default model/reasoning profile and edits the saved PRD/spec prompt templates for this project.
+5. **Configure document locations:** The user sets the PRD path, spec path, and optional supporting document paths relative to the selected workspace, then saves the setup to create or update `.specforge/settings.json`.
+6. **Review and adjust:** The review workspace loads the configured PRD/spec files when they exist. Missing files surface dedicated empty states instead of fallback bundled docs.
+7. **Approve and run:** Once the spec is approved, the user can launch the execution dashboard in stepped, milestone, or god mode.
+8. **Inspect the result:** The app streams simulated terminal output, shows approval gates, and renders a diff based on the current git state when available.
## 4. Functional Requirements
### 4.1. Document Ingestion
* **Desktop native picker:** Must support `.md` and `.pdf` imports for PRD and spec documents.
-* **Browser file import:** Must support Markdown only and explain that PDF parsing requires the desktop runtime.
* **Pane-local controls:** The PRD and spec panes must own their own load actions instead of relying on a separate sidebar ingestion panel.
-* **Workspace auto-detection:** When a workspace is opened, the app should try to load:
- * `PRD.md`, then `PRD.pdf`
- * `spec.md`, then `spec.pdf`
-* **Missing document reset:** Opening a workspace must clear stale PRD/spec content from the previous workspace when those files are not found in the new one.
-* **PRD empty state:** When the active PRD content is empty in preview mode, the PRD pane must show a dedicated empty state with the same preview/load/edit controls as the normal document header.
-* **Empty spec generation:** When the active spec content is empty and a PRD is available, the spec pane must show a textbox and generate button that use the current PRD plus the user's note to draft a markdown spec through the selected desktop AI CLI.
+* **Project configuration file:** Saving setup must create or update `.specforge/settings.json` inside the selected workspace.
+* **Configured document paths:** The review panes should use the PRD/spec paths stored in `.specforge/settings.json`, not bundled defaults.
+* **Missing document reset:** Loading a project must clear stale PRD/spec content when the configured files do not exist yet.
+* **PRD empty state:** When the active PRD content is empty in preview mode, the PRD pane must show a dedicated empty state with a textbox, helper copy describing the saved default PRD prompt, and a generate action that appends the textbox note after that saved prompt.
+* **Empty spec generation:** When the active spec content is empty and a PRD is available, the spec pane must show a textbox and generate button that append the user's note after the saved default spec prompt and include the current PRD content.
* **Blocked spec state:** When both the PRD and spec are empty in preview mode, the spec pane must explain that a PRD is required before generation while still allowing an existing spec to be loaded.
-* **Generated spec persistence:** After generation succeeds in the desktop runtime, the markdown must be saved into the same folder as the active PRD using a sibling `SPEC.md` or `spec.md` file before the pane updates.
+* **Generated document persistence:** After PRD/spec generation succeeds in the desktop runtime, the markdown must be saved into the configured project-relative Markdown path from `.specforge/settings.json` before the pane updates.
### 4.2. Workspace Review
@@ -54,6 +53,7 @@ Today the product focuses on **review, import, diff inspection, and approval UX*
* **Environment scan:** The app must surface Claude CLI, Codex CLI, and Git availability plus optional manual override paths.
* **Manual override behavior:** A manual path is only considered healthy after the backend successfully probes it as an executable.
* **Theme controls:** The workspace must support Dracula, Light, and System themes.
+* **Project-scoped AI settings:** Model selection, reasoning profile, PRD prompt, spec prompt, and configured document paths must be saved per project in `.specforge/settings.json`.
* **Git diff visibility:** The review diff should include staged, unstaged, and untracked changes when a repository is available. Sample diff content is acceptable only when the repository is effectively clean or when running in browser fallback mode.
### 4.4. Approval and Execution UX
diff --git a/docs/SPEC.md b/docs/SPEC.md
index f66f4f6..ab0182e 100644
--- a/docs/SPEC.md
+++ b/docs/SPEC.md
@@ -32,20 +32,17 @@ The webview must never execute shell commands or arbitrary file reads directly.
## 3. Default State And Stores
-### 3.1. Bundled review docs
+### 3.1. Setup-first startup
-On startup, the app loads:
+On startup, the app routes to a project configuration screen. The user selects a workspace folder, and the desktop runtime either loads an existing `.specforge/settings.json` or prepares default project settings that can be saved into that file.
-* `docs/PRD.md`
-* `docs/SPEC.md`
-
-These bundled documents are the default contents of the PRD and spec panes until the user imports replacements or opens a workspace that clears one of those documents.
+The review workspace no longer boots with bundled `docs/PRD.md` / `docs/SPEC.md` content by default.
### 3.2. Zustand stores
-* **`useProjectStore`:** PRD/spec content, approval mode, selected model, selected range, annotations, and open workspace file tabs.
+* **`useProjectStore`:** PRD/spec content, approval mode, selected model/reasoning, saved prompt templates, configured document paths, annotations, and open workspace file tabs.
* **`useAgentStore`:** Simulated run status, streamed output, current milestone, pending diff, and summary text.
-* **`useSettingsStore`:** Theme, CLI override paths, environment scan results, and the current workspace tree entries.
+* **`useSettingsStore`:** Theme, CLI override paths, last opened project path, environment scan results, and the current workspace tree entries.
## 4. Import And Workspace Flows
@@ -56,35 +53,29 @@ The desktop runtime currently exposes two import paths:
* **User-facing import:** `pick_document()` opens a native file picker for `.md` and `.pdf`, parses the chosen file in Rust, and returns a `WorkspaceDocument`. The PRD and spec panes trigger this from their own header controls.
* **Reserved path import:** `parse_document(filePath)` still accepts only repository-relative paths that stay inside the project root, but it is not currently surfaced in the main review UI.
-### 4.2. Browser import fallback
-
-Browser mode keeps a file-input fallback:
-
-* Direct document import supports **Markdown only**.
-* Browser-side folder import can discover PRD/spec matches, but PDF parsing is intentionally unavailable there.
+### 4.2. Project setup, workspace scan, and file opens
-### 4.3. Workspace scan and file opens
-
-* `open_workspace_folder()` opens a native folder picker, walks the chosen directory with `.gitignore` awareness, and returns workspace entries plus detected PRD/spec documents.
+* `pick_project_folder()` opens a native folder picker, walks the chosen directory with `.gitignore` awareness, loads `.specforge/settings.json` when it exists, and returns a project-context payload for the setup flow.
+* `load_project_context(folderPath)` reloads an already-known project folder and rehydrates the workspace plus saved project settings.
+* `save_project_settings(folderPath, settings)` writes `.specforge/settings.json` inside the selected project.
* The backend stores the active workspace root and its relative-path-to-file map in shared state.
* `read_workspace_file(filePath)` now treats `filePath` as a **workspace-relative path only** and resolves it through the active workspace map.
* Files outside the active workspace must be rejected even if the frontend passes an absolute path or traversal sequence.
-* When a scanned workspace does not contain `PRD.md`/`PRD.pdf` or `spec.md`/`spec.pdf`, the frontend must clear the prior document content instead of leaving stale content visible.
+* When the configured PRD/spec files do not exist yet, the frontend clears the prior document content instead of leaving stale content visible.
### 4.4. Empty document and spec generation flow
* When `prdContent` is empty and the PRD pane is in preview mode, the left pane swaps to a dedicated PRD empty state while preserving preview/load/edit controls in the header.
+* The PRD empty state includes a note field, shows the saved default PRD prompt from `.specforge/settings.json`, and explains that the note is appended after that prompt before generation.
+* `generate_prd_document(...)` writes Markdown to the configured PRD path inside the workspace.
* When `specContent` is empty, the spec pane keeps the same preview/load/edit controls in its header area.
* If `specContent` is empty and `prdContent` is present, the spec pane swaps to a dedicated generation state with a prompt textarea and generate button.
* If both `prdContent` and `specContent` are empty in preview mode, the spec pane shows a blocked state that asks for a PRD before generation while still allowing `Load Spec`.
-* The generate action sends the current PRD, the user's note, the selected model, and the selected reasoning profile through `src/lib/runtime.ts`.
-* `generate_spec_document(...)` runs the selected Claude CLI or Codex CLI in non-interactive mode from a temporary folder, resolves the active PRD path, and writes the returned markdown into a sibling `SPEC.md`/`spec.md` file beside that PRD.
+* The spec empty state shows the saved default spec prompt from `.specforge/settings.json` and explains that the note is appended after that prompt before generation.
+* The generate actions send the current prompt template, note, selected model, selected reasoning profile, and configured output path through `src/lib/runtime.ts`.
+* `generate_spec_document(...)` runs the selected Claude CLI or Codex CLI in non-interactive mode from a temporary folder and writes the returned markdown into the configured spec path inside the workspace.
* The saved spec document metadata is returned to the frontend so the spec pane reflects the on-disk path immediately; execution remains a separate simulated flow.
-### 4.5. Browser `.gitignore` behavior
-
-Browser folder imports normalize root-prefixed paths and apply root plus nested `.gitignore` rules before building the workspace tree.
-
## 5. Tauri Command Surface
The current Tauri commands are:
@@ -92,11 +83,15 @@ The current Tauri commands are:
* `run_environment_scan(claudePath?: string, codexPath?: string)`
* `parse_document(filePath: string)`
* `pick_document()`
+* `pick_project_folder()`
+* `load_project_context(folderPath: string)`
+* `save_project_settings(folderPath: string, settings: ProjectSettings)`
* `open_workspace_folder()`
* `read_workspace_file(filePath: string)`
* `get_workspace_snapshot()`
* `git_get_diff()`
-* `generate_spec_document(prdPath: string, prdContent: string, userPrompt: string, provider: string, model: string, reasoning: string, claudePath?: string, codexPath?: string)`
+* `generate_prd_document(workspaceRoot: string, outputPath: string, promptTemplate: string, userPrompt: string, provider: string, model: string, reasoning: string, claudePath?: string, codexPath?: string)`
+* `generate_spec_document(workspaceRoot: string, outputPath: string, prdContent: string, promptTemplate: string, userPrompt: string, provider: string, model: string, reasoning: string, claudePath?: string, codexPath?: string)`
* `spawn_cli_agent(specPayload: string, mode: string, model: string, reasoning: string)`
* `approve_action()`
* `kill_agent_process()`
@@ -126,18 +121,19 @@ The current execution runtime is **simulated**:
This is a review-and-approval shell, not a real CLI orchestration engine yet.
-The spec generation flow is separate: it uses the configured Claude/Codex CLI to draft markdown, saves that markdown next to the active PRD, and loads the saved file into the spec pane. It does not replace the simulated execution loop.
+The PRD/spec generation flows are separate from execution: they use the configured Claude/Codex CLI to draft markdown, save that markdown to the configured project-relative Markdown targets, and load the saved file into the review pane. They do not replace the simulated execution loop.
## 7. Environment And Settings
* CLI health is derived from executable probing, not just path existence.
* Manual override paths can be relative to the repo or absolute on disk.
-* Theme preference is stored in browser local storage and resolved into Dracula, Light, or System behavior in the webview.
+* Theme preference plus CLI override paths are stored in browser local storage.
+* The last opened project path is stored in browser local storage so the desktop app can restore project setup on the next launch.
+* Project-specific model/reasoning defaults, prompt templates, and document paths are stored in `.specforge/settings.json` inside the selected workspace.
* The review sidebar now presents only agent configuration controls plus an MCP summary list derived from the current runtime/tool health.
## 8. Known Limits
* Opened workspace file tabs are editable in-memory only; there is no save-to-disk flow.
-* Browser mode does not parse PDFs.
-* Browser mode does not support AI-backed spec generation.
+* The current project-setup flow expects the desktop runtime for real `.specforge/settings.json` persistence.
* The app presents model and approval controls, but the current run loop is simulated rather than connected to real workspace-mutating Claude/Codex execution.
diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs
index 76c063c..a9bf34d 100644
--- a/src-tauri/src/lib.rs
+++ b/src-tauri/src/lib.rs
@@ -1,7 +1,7 @@
use git2::{DiffFormat, DiffOptions, Repository};
use ignore::WalkBuilder;
use lopdf::Document;
-use serde::Serialize;
+use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
fs,
@@ -73,6 +73,32 @@ struct WorkspaceDocument {
file_name: String,
}
+#[derive(Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+struct ProjectSettings {
+ selected_model: String,
+ selected_reasoning: String,
+ prd_prompt: String,
+ spec_prompt: String,
+ prd_path: String,
+ spec_path: String,
+ supporting_document_paths: Vec,
+}
+
+#[derive(Clone, Serialize)]
+#[serde(rename_all = "camelCase")]
+struct ProjectContextPayload {
+ root_name: String,
+ root_path: String,
+ settings_path: String,
+ has_saved_settings: bool,
+ settings: ProjectSettings,
+ entries: Vec,
+ ignored_file_count: usize,
+ prd_document: Option,
+ spec_document: Option,
+}
+
#[derive(Clone, Serialize)]
#[serde(rename_all = "camelCase")]
struct WorkspaceScanResult {
@@ -125,6 +151,36 @@ index 0000000..forge42 100644
+ Introduce PRD/spec review workspace with execution controls
+ Add Dracula-first theme tokens and persisted preferences
+ Surface CLI health, diff approvals, and terminal streaming"#;
+const SPECFORGE_SETTINGS_RELATIVE_PATH: &str = ".specforge/settings.json";
+const DEFAULT_PROJECT_PRD_PATH: &str = "docs/PRD.md";
+const DEFAULT_PROJECT_SPEC_PATH: &str = "docs/SPEC.md";
+const DEFAULT_PRD_PROMPT: &str = r#"Act as an Expert Senior Product Manager. Your goal is to help me write a comprehensive, well-structured Product Requirements Document (PRD) for a new [product / feature / app] called [Project Name].
+
+I have some initial ideas, but I want to make sure the PRD is thorough. Before you draft the full document, please ask me a series of clarifying questions to gather the necessary context.
+
+Please ask about:
+- The core problem we are solving
+- The target audience/user personas
+- Key features and user flows
+- Success metrics (KPIs)
+- Technical or timeline constraints
+
+Ask me these questions one or two at a time so I do not get overwhelmed. Once you have enough context, we will move on to drafting the actual PRD."#;
+const DEFAULT_SPEC_PROMPT: &str = r#"Act as an Expert Software Architect and Tech Lead. I have attached the Product Requirements Document (PRD) for our upcoming project.
+
+Your task is to analyze this PRD and draft a comprehensive Technical Specification Document.
+
+Please structure the spec with the following sections:
+
+1. High-Level Architecture: A conceptual overview of how the system components will interact.
+2. Tech Stack & Tooling: Define the frontend, backend, and infrastructure.
+3. Data Models & Database Schema: Define the core entities, their attributes, and relationships.
+4. API Contracts: Outline the primary endpoints (methods, routes, request/response structures) needed to support the user flows.
+5. Component & State Management: How data will flow through the application and how the UI will be structured.
+6. Security & Edge Cases: Potential vulnerabilities, error handling, and performance bottlenecks.
+7. Engineering Milestones: Break the implementation down into logical, phased deliverables.
+
+Before writing the full document, please provide a brief bulleted summary of your proposed technical approach, and ask me up to 3 clarifying questions about any technical constraints or non-functional requirements that might be missing from the PRD."#;
#[tauri::command]
fn run_environment_scan(
@@ -170,6 +226,74 @@ fn pick_document() -> Result, String> {
}))
}
+#[tauri::command]
+fn pick_project_folder(state: State) -> Result, String> {
+ let Some(folder_path) = rfd::FileDialog::new().pick_folder() else {
+ return Ok(None);
+ };
+
+ load_project_context_from_folder(&state, &folder_path).map(Some)
+}
+
+#[tauri::command]
+fn load_project_context(
+ state: State,
+ folder_path: String,
+) -> Result {
+ let trimmed_path = folder_path.trim();
+
+ if trimmed_path.is_empty() {
+ return Err(String::from("A workspace folder path is required."));
+ }
+
+ load_project_context_from_folder(&state, &PathBuf::from(trimmed_path))
+}
+
+#[tauri::command]
+fn save_project_settings(
+ folder_path: String,
+ settings: ProjectSettings,
+) -> Result {
+ let trimmed_path = folder_path.trim();
+
+ if trimmed_path.is_empty() {
+ return Err(String::from("A workspace folder path is required."));
+ }
+
+ let workspace_root =
+ canonicalize_existing_path(&PathBuf::from(trimmed_path)).map_err(|error| {
+ format!(
+ "Unable to resolve the selected workspace folder {}: {error}",
+ trimmed_path
+ )
+ })?;
+ let default_settings = build_default_project_settings(&workspace_root, None, None);
+ let normalized_settings =
+ normalize_project_settings(&workspace_root, default_settings, Some(settings))?;
+ let settings_path = workspace_root.join(SPECFORGE_SETTINGS_RELATIVE_PATH);
+ let settings_directory = settings_path
+ .parent()
+ .ok_or_else(|| String::from("Unable to resolve the .specforge directory."))?;
+
+ fs::create_dir_all(settings_directory).map_err(|error| {
+ format!(
+ "Unable to create the project settings directory {}: {error}",
+ settings_directory.display()
+ )
+ })?;
+ let settings_json = serde_json::to_string_pretty(&normalized_settings)
+ .map_err(|error| format!("Unable to encode project settings: {error}"))?;
+
+ fs::write(&settings_path, settings_json.as_bytes()).map_err(|error| {
+ format!(
+ "Unable to write project settings to {}: {error}",
+ settings_path.display()
+ )
+ })?;
+
+ Ok(normalized_settings)
+}
+
#[tauri::command]
fn open_workspace_folder(state: State) -> Result, String> {
let Some(folder_path) = rfd::FileDialog::new().pick_folder() else {
@@ -369,6 +493,235 @@ fn scan_workspace_folder(root: &Path) -> Result {
})
}
+fn load_project_context_from_folder(
+ state: &State,
+ folder_path: &Path,
+) -> Result {
+ let scanned_workspace = scan_workspace_folder(folder_path)?;
+ let settings_path = scanned_workspace
+ .context
+ .root
+ .join(SPECFORGE_SETTINGS_RELATIVE_PATH);
+ let default_settings = build_default_project_settings(
+ &scanned_workspace.context.root,
+ scanned_workspace.result.prd_document.as_ref(),
+ scanned_workspace.result.spec_document.as_ref(),
+ );
+ let (settings, has_saved_settings) = read_project_settings(
+ &settings_path,
+ &scanned_workspace.context.root,
+ default_settings,
+ )?;
+ let prd_document =
+ load_configured_workspace_document(&scanned_workspace.context.root, &settings.prd_path)?;
+ let spec_document =
+ load_configured_workspace_document(&scanned_workspace.context.root, &settings.spec_path)?;
+ let mut active_workspace = state
+ .workspace
+ .lock()
+ .map_err(|_| String::from("Workspace lock was poisoned."))?;
+ *active_workspace = Some(scanned_workspace.context);
+
+ Ok(ProjectContextPayload {
+ root_name: scanned_workspace.result.root_name,
+ root_path: active_workspace
+ .as_ref()
+ .map(|workspace| workspace.root.display().to_string())
+ .unwrap_or_default(),
+ settings_path: settings_path.display().to_string(),
+ has_saved_settings,
+ settings,
+ entries: scanned_workspace.result.entries,
+ ignored_file_count: scanned_workspace.result.ignored_file_count,
+ prd_document,
+ spec_document,
+ })
+}
+
+fn build_default_project_settings(
+ workspace_root: &Path,
+ prd_document: Option<&WorkspaceDocument>,
+ spec_document: Option<&WorkspaceDocument>,
+) -> ProjectSettings {
+ ProjectSettings {
+ selected_model: String::from("gpt-5.4"),
+ selected_reasoning: String::from("medium"),
+ prd_prompt: String::from(DEFAULT_PRD_PROMPT),
+ spec_prompt: String::from(DEFAULT_SPEC_PROMPT),
+ prd_path: derive_default_document_path(
+ workspace_root,
+ prd_document,
+ DEFAULT_PROJECT_PRD_PATH,
+ ),
+ spec_path: derive_default_document_path(
+ workspace_root,
+ spec_document,
+ DEFAULT_PROJECT_SPEC_PATH,
+ ),
+ supporting_document_paths: Vec::new(),
+ }
+}
+
+fn read_project_settings(
+ settings_path: &Path,
+ workspace_root: &Path,
+ defaults: ProjectSettings,
+) -> Result<(ProjectSettings, bool), String> {
+ if !settings_path.exists() {
+ return Ok((defaults, false));
+ }
+
+ let raw_settings = fs::read_to_string(settings_path).map_err(|error| {
+ format!(
+ "Unable to read project settings {}: {error}",
+ settings_path.display()
+ )
+ })?;
+ let parsed_settings =
+ serde_json::from_str::(&raw_settings).map_err(|error| {
+ format!(
+ "Unable to parse project settings {}: {error}",
+ settings_path.display()
+ )
+ })?;
+
+ Ok((
+ normalize_project_settings(workspace_root, defaults, Some(parsed_settings))?,
+ true,
+ ))
+}
+
+fn normalize_project_settings(
+ workspace_root: &Path,
+ defaults: ProjectSettings,
+ provided: Option,
+) -> Result {
+ let Some(provided) = provided else {
+ return Ok(defaults);
+ };
+
+ let selected_model =
+ normalize_project_model(&provided.selected_model, &defaults.selected_model);
+ let selected_reasoning =
+ normalize_project_reasoning(&provided.selected_reasoning, &defaults.selected_reasoning);
+ let normalized_prd_path =
+ normalize_project_path_or_default(workspace_root, &provided.prd_path, &defaults.prd_path)?;
+ let normalized_spec_path = normalize_project_path_or_default(
+ workspace_root,
+ &provided.spec_path,
+ &defaults.spec_path,
+ )?;
+ let supporting_document_paths = provided
+ .supporting_document_paths
+ .iter()
+ .filter_map(|entry| normalize_relative_path(entry).ok())
+ .collect::>();
+
+ Ok(ProjectSettings {
+ selected_model,
+ selected_reasoning,
+ prd_prompt: if provided.prd_prompt.trim().is_empty() {
+ defaults.prd_prompt
+ } else {
+ provided.prd_prompt.trim().to_string()
+ },
+ spec_prompt: if provided.spec_prompt.trim().is_empty() {
+ defaults.spec_prompt
+ } else {
+ provided.spec_prompt.trim().to_string()
+ },
+ prd_path: normalized_prd_path,
+ spec_path: normalized_spec_path,
+ supporting_document_paths,
+ })
+}
+
+fn normalize_project_model(value: &str, fallback: &str) -> String {
+ const VALID_MODELS: &[&str] = &[
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.3-codex",
+ "gpt-5.2",
+ "claude-opus-4-1-20250805",
+ "claude-opus-4-20250514",
+ "claude-sonnet-4-20250514",
+ "claude-3-7-sonnet-20250219",
+ "claude-3-5-sonnet-20241022",
+ "claude-3-5-sonnet-20240620",
+ "claude-3-5-haiku-20241022",
+ "claude-3-haiku-20240307",
+ ];
+
+ if VALID_MODELS.contains(&value.trim()) {
+ return value.trim().to_string();
+ }
+
+ fallback.to_string()
+}
+
+fn normalize_project_reasoning(value: &str, fallback: &str) -> String {
+ match value.trim() {
+ "low" | "medium" | "high" | "max" => value.trim().to_string(),
+ _ => fallback.to_string(),
+ }
+}
+
+fn normalize_project_path_or_default(
+ workspace_root: &Path,
+ value: &str,
+ fallback: &str,
+) -> Result {
+ if value.trim().is_empty() {
+ return Ok(fallback.to_string());
+ }
+
+ normalize_relative_path(value).map_err(|error| {
+ format!(
+ "Invalid project document path for workspace {}: {error}",
+ workspace_root.display()
+ )
+ })
+}
+
+fn derive_default_document_path(
+ workspace_root: &Path,
+ document: Option<&WorkspaceDocument>,
+ fallback: &str,
+) -> String {
+ document
+ .and_then(|entry| {
+ PathBuf::from(&entry.source_path)
+ .strip_prefix(workspace_root)
+ .ok()
+ .map(|path| path.to_string_lossy().replace('\\', "/"))
+ })
+ .unwrap_or_else(|| fallback.to_string())
+}
+
+fn load_configured_workspace_document(
+ workspace_root: &Path,
+ relative_path: &str,
+) -> Result, String> {
+ let resolved_path = resolve_relative_path_under_root(workspace_root, relative_path)?;
+
+ if !resolved_path.exists() {
+ return Ok(None);
+ }
+
+ let content = parse_workspace_document(&resolved_path)?;
+ let file_name = resolved_path
+ .file_name()
+ .and_then(|value| value.to_str())
+ .unwrap_or("Document")
+ .to_string();
+
+ Ok(Some(WorkspaceDocument {
+ content,
+ source_path: resolved_path.display().to_string(),
+ file_name,
+ }))
+}
+
#[tauri::command]
fn git_get_diff() -> Result {
let repository = Repository::discover(project_root())
@@ -408,10 +761,50 @@ fn git_get_diff() -> Result {
Ok(rendered)
}
+#[tauri::command]
+fn generate_prd_document(
+ workspace_root: String,
+ output_path: String,
+ prompt_template: String,
+ user_prompt: String,
+ provider: String,
+ model: String,
+ reasoning: String,
+ claude_path: Option,
+ codex_path: Option,
+) -> Result {
+ let trimmed_prompt = user_prompt.trim();
+
+ if trimmed_prompt.is_empty() {
+ return Err(String::from(
+ "Add the product context you want the AI to consider.",
+ ));
+ }
+
+ let prompt_payload = build_generation_prompt(&prompt_template, trimmed_prompt, &[]);
+ let generated_prd = run_generation_request(
+ &provider,
+ &model,
+ &reasoning,
+ claude_path.as_deref(),
+ codex_path.as_deref(),
+ &prompt_payload,
+ )?;
+
+ write_generated_workspace_document(
+ &workspace_root,
+ &output_path,
+ generated_prd,
+ "PRD output path",
+ )
+}
+
#[tauri::command]
fn generate_spec_document(
- prd_path: String,
+ workspace_root: String,
+ output_path: String,
prd_content: String,
+ prompt_template: String,
user_prompt: String,
provider: String,
model: String,
@@ -434,53 +827,26 @@ fn generate_spec_document(
));
}
- let prompt_payload = build_spec_generation_prompt(trimmed_prd, trimmed_prompt);
- let generated_spec = match provider.as_str() {
- "codex" => run_codex_spec_generation(
- &resolve_cli_binary("codex", codex_path.as_deref())?,
- &model,
- &reasoning,
- &prompt_payload,
- )?,
- "claude" => run_claude_spec_generation(
- &resolve_cli_binary("claude", claude_path.as_deref())?,
- &model,
- &reasoning,
- &prompt_payload,
- )?,
- _ => return Err(format!("Unsupported model provider: {provider}")),
- };
-
- let normalized_spec = strip_wrapping_code_fence(generated_spec.trim());
- let rendered_spec = format!("{}\n", normalized_spec.trim());
-
- if rendered_spec.trim().is_empty() {
- return Err(String::from(
- "The AI returned an empty specification. Adjust the prompt and try again.",
- ));
- }
-
- let resolved_prd_path = resolve_existing_document_path(&prd_path)?;
- let saved_spec_path = build_generated_spec_path(&resolved_prd_path);
-
- fs::write(&saved_spec_path, rendered_spec.as_bytes()).map_err(|error| {
- format!(
- "Unable to save the generated spec to {}: {error}",
- saved_spec_path.display()
- )
- })?;
-
- let file_name = saved_spec_path
- .file_name()
- .and_then(|value| value.to_str())
- .unwrap_or("SPEC.md")
- .to_string();
-
- Ok(WorkspaceDocument {
- content: rendered_spec,
- source_path: saved_spec_path.display().to_string(),
- file_name,
- })
+ let prompt_payload = build_generation_prompt(
+ &prompt_template,
+ trimmed_prompt,
+ &[("Attached Product Requirements Document (PRD)", trimmed_prd)],
+ );
+ let generated_spec = run_generation_request(
+ &provider,
+ &model,
+ &reasoning,
+ claude_path.as_deref(),
+ codex_path.as_deref(),
+ &prompt_payload,
+ )?;
+
+ write_generated_workspace_document(
+ &workspace_root,
+ &output_path,
+ generated_spec,
+ "SPEC output path",
+ )
}
#[tauri::command]
@@ -543,10 +909,14 @@ pub fn run() {
run_environment_scan,
parse_document,
pick_document,
+ pick_project_folder,
+ load_project_context,
+ save_project_settings,
open_workspace_folder,
read_workspace_file,
get_workspace_snapshot,
git_get_diff,
+ generate_prd_document,
generate_spec_document,
spawn_cli_agent,
approve_action,
@@ -790,54 +1160,74 @@ fn resolve_workspace_file_path(
Ok(canonical_path)
}
-fn resolve_existing_document_path(path_value: &str) -> Result {
- let trimmed_value = path_value.trim();
+fn resolve_relative_path_under_root(root: &Path, relative_path: &str) -> Result {
+ let normalized_path = normalize_relative_path(relative_path)?;
+ Ok(root.join(normalized_path))
+}
- if trimmed_value.is_empty() {
+fn write_generated_workspace_document(
+ workspace_root: &str,
+ output_path: &str,
+ generated_content: String,
+ field_name: &str,
+) -> Result {
+ let trimmed_root = workspace_root.trim();
+
+ if trimmed_root.is_empty() {
+ return Err(String::from("A workspace root is required."));
+ }
+
+ let canonical_root = canonicalize_existing_path(&PathBuf::from(trimmed_root))
+ .map_err(|error| format!("Unable to resolve workspace root {}: {error}", trimmed_root))?;
+ let resolved_output_path = resolve_relative_path_under_root(&canonical_root, output_path)
+ .map_err(|error| format!("{field_name} is invalid: {error}"))?;
+ let rendered_document = format!(
+ "{}\n",
+ strip_wrapping_code_fence(generated_content.trim()).trim()
+ );
+
+ if rendered_document.trim().is_empty() {
return Err(String::from(
- "A PRD path is required to save the generated spec.",
+ "The AI returned an empty document. Adjust the prompt and try again.",
));
}
- let candidate = PathBuf::from(trimmed_value);
+ if resolved_output_path
+ .extension()
+ .and_then(|value| value.to_str())
+ .map(|value| !value.eq_ignore_ascii_case("md"))
+ .unwrap_or(true)
+ {
+ return Err(format!(
+ "{field_name} must point to a Markdown file inside the selected workspace."
+ ));
+ }
- if candidate.is_absolute() {
- return canonicalize_existing_path(&candidate).map_err(|error| {
+ if let Some(parent_directory) = resolved_output_path.parent() {
+ fs::create_dir_all(parent_directory).map_err(|error| {
format!(
- "Unable to resolve PRD path {}: {error}",
- candidate.display()
+ "Unable to create the document folder {}: {error}",
+ parent_directory.display()
)
- });
+ })?;
}
- resolve_project_document_path(trimmed_value)
-}
-
-fn build_generated_spec_path(prd_path: &Path) -> PathBuf {
- let file_name = prd_path
- .file_name()
- .and_then(|value| value.to_str())
- .map(derive_spec_file_name)
- .unwrap_or_else(|| String::from("SPEC.md"));
-
- prd_path
- .parent()
- .unwrap_or_else(|| Path::new("."))
- .join(file_name)
-}
-
-fn derive_spec_file_name(prd_file_name: &str) -> String {
- let normalized = prd_file_name.to_ascii_lowercase();
-
- if normalized == "prd.md" || normalized == "prd.pdf" {
- return if prd_file_name == prd_file_name.to_ascii_lowercase() {
- String::from("spec.md")
- } else {
- String::from("SPEC.md")
- };
- }
+ fs::write(&resolved_output_path, rendered_document.as_bytes()).map_err(|error| {
+ format!(
+ "Unable to save the generated document to {}: {error}",
+ resolved_output_path.display()
+ )
+ })?;
- String::from("SPEC.md")
+ Ok(WorkspaceDocument {
+ content: rendered_document,
+ source_path: resolved_output_path.display().to_string(),
+ file_name: resolved_output_path
+ .file_name()
+ .and_then(|value| value.to_str())
+ .unwrap_or("Document.md")
+ .to_string(),
+ })
}
fn resolve_override_path(path_value: &str) -> PathBuf {
@@ -957,25 +1347,59 @@ fn resolve_cli_binary(binary_name: &str, override_path: Option<&str>) -> Result<
})
}
-fn build_spec_generation_prompt(prd_content: &str, user_prompt: &str) -> String {
- format!(
- concat!(
- "You are drafting a technical specification document from a PRD and operator notes.\n",
- "Return only the final markdown for the SPEC document.\n",
- "Do not use shell commands, tools, or external files. Work only from the provided text.\n",
- "Use concrete technical detail, clear section headings, and explicit assumptions when the PRD leaves a gap.\n",
- "Include architecture, workflows, data/contracts, constraints, edge cases, and acceptance criteria when relevant.\n\n",
- "# Operator Notes\n",
- "{user_prompt}\n\n",
- "# PRD\n",
- "{prd_content}\n"
+fn build_generation_prompt(
+ prompt_template: &str,
+ user_prompt: &str,
+ attachments: &[(&str, &str)],
+) -> String {
+ let mut prompt = String::new();
+ prompt.push_str(prompt_template.trim());
+ prompt.push_str("\n\n");
+ prompt.push_str("Additional operator context:\n");
+ prompt.push_str(user_prompt.trim());
+
+ for (label, content) in attachments {
+ let trimmed_content = content.trim();
+
+ if trimmed_content.is_empty() {
+ continue;
+ }
+
+ prompt.push_str("\n\n");
+ prompt.push_str(label);
+ prompt.push_str(":\n");
+ prompt.push_str(trimmed_content);
+ }
+
+ prompt
+}
+
+fn run_generation_request(
+ provider: &str,
+ model: &str,
+ reasoning: &str,
+ claude_path: Option<&str>,
+ codex_path: Option<&str>,
+ prompt_payload: &str,
+) -> Result {
+ match provider {
+ "codex" => run_codex_generation(
+ &resolve_cli_binary("codex", codex_path)?,
+ model,
+ reasoning,
+ prompt_payload,
),
- user_prompt = user_prompt,
- prd_content = prd_content
- )
+ "claude" => run_claude_generation(
+ &resolve_cli_binary("claude", claude_path)?,
+ model,
+ reasoning,
+ prompt_payload,
+ ),
+ _ => Err(format!("Unsupported model provider: {provider}")),
+ }
}
-fn run_codex_spec_generation(
+fn run_codex_generation(
binary_path: &Path,
model: &str,
reasoning: &str,
@@ -1030,7 +1454,7 @@ fn run_codex_spec_generation(
result
}
-fn run_claude_spec_generation(
+fn run_claude_generation(
binary_path: &Path,
model: &str,
reasoning: &str,
@@ -1044,9 +1468,7 @@ fn run_claude_spec_generation(
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.arg("--print")
- .arg(
- "Using the piped PRD and operator notes, write a complete technical specification in markdown and return only the specification.",
- )
+ .arg("Respond to the request provided on stdin.")
.arg("--model")
.arg(model)
.arg("--output-format")
diff --git a/src/App.tsx b/src/App.tsx
index 0dc6d75..b5771f1 100644
--- a/src/App.tsx
+++ b/src/App.tsx
@@ -1,6 +1,6 @@
import {
- useCallback,
startTransition,
+ useCallback,
useDeferredValue,
useEffect,
useMemo,
@@ -12,74 +12,79 @@ import {
Navigate,
Route,
Routes,
- useLocation
+ useLocation,
+ useNavigate
} from "react-router-dom";
-import bundledPrd from "../docs/PRD.md?raw";
-import bundledSpec from "../docs/SPEC.md?raw";
import { AppRail } from "./components/AppRail";
import {
- DocumentTarget,
FallbackStep,
WorkspaceFileSource,
- WorkspaceSelectionPayload,
buildFallbackSteps,
clearFallbackTimer,
- collectWorkspaceFiles,
filterWorkspaceEntries,
- getDirectoryPicker,
- isDirectoryPickerAbort,
isOpenableWorkspacePath,
- normalizeWorkspacePath,
resolveTheme,
runFallbackStep,
- stampLog
+ stampLog,
+ type DocumentTarget
} from "./lib/appShell";
import {
getModelLabel,
- getModelOptions,
getModelProvider,
getReasoningLabel
} from "./lib/agentConfig";
+import {
+ DEFAULT_PROJECT_PRD_PATH,
+ DEFAULT_PROJECT_SPEC_PATH,
+ SPECFORGE_SETTINGS_RELATIVE_PATH,
+ formatSupportingDocumentPaths,
+ normalizeProjectRelativePath,
+ normalizeProjectSettings,
+ parseSupportingDocumentPaths
+} from "./lib/projectConfig";
import {
DEFAULT_PENDING_DIFF,
approveAgentAction,
emergencyStop,
+ generatePrdDocument,
generateSpecDocument,
getGitDiff,
getWorkspaceSnapshot,
isTauriRuntime,
- openWorkspaceFolder,
- parseDocument,
+ loadProjectContext,
pickDocument,
+ pickProjectFolder,
readWorkspaceFile,
runEnvironmentScan,
+ saveProjectSettings,
startAgentRun,
subscribeToAgentEvents
} from "./lib/runtime";
import {
- buildWorkspaceImportSnapshot,
- filterWorkspaceFiles,
- findProjectDocuments,
isOpenableTextFile,
parseWorkspaceDocument,
parseWorkspaceTextFile,
type ImportableFile
} from "./lib/workspaceImport";
+import { ConfigurationScreen } from "./screens/ConfigurationScreen";
+import { PrdScreen } from "./screens/PrdScreen";
+import { SettingsScreen } from "./screens/SettingsScreen";
import { useAgentStore } from "./store/useAgentStore";
import { useProjectStore } from "./store/useProjectStore";
import { useSettingsStore } from "./store/useSettingsStore";
-import { PrdScreen } from "./screens/PrdScreen";
-import { SettingsScreen } from "./screens/SettingsScreen";
import type {
EnvironmentStatus,
- ModelProvider
+ ModelProvider,
+ ProjectContext
} from "./types";
function App() {
const location = useLocation();
- const isSettingsRoute = location.pathname === "/settings";
+ const navigate = useNavigate();
+ const isReviewRoute = location.pathname === "/review";
const desktopRuntime = isTauriRuntime();
+
const agentStatus = useAgentStore((state) => state.status);
const terminalOutput = useAgentStore((state) => state.terminalOutput);
const pendingDiff = useAgentStore((state) => state.pendingDiff);
@@ -95,61 +100,80 @@ function App() {
const annotations = useProjectStore((state) => state.annotations);
const activeTab = useProjectStore((state) => state.activeTab);
const autonomyMode = useProjectStore((state) => state.autonomyMode);
+ const configuredPrdPath = useProjectStore((state) => state.configuredPrdPath);
+ const configuredSpecPath = useProjectStore((state) => state.configuredSpecPath);
const isSpecApproved = useProjectStore((state) => state.isSpecApproved);
const openEditorTabs = useProjectStore((state) => state.openEditorTabs);
const prdContent = useProjectStore((state) => state.prdContent);
const prdPaneMode = useProjectStore((state) => state.prdPaneMode);
const prdPath = useProjectStore((state) => state.prdPath);
- const reviewPrompt = useProjectStore((state) => state.reviewPrompt);
+ const prdPromptTemplate = useProjectStore((state) => state.prdPromptTemplate);
const selectedModel = useProjectStore((state) => state.selectedModel);
const selectedReasoning = useProjectStore((state) => state.selectedReasoning);
const selectedSpecRange = useProjectStore((state) => state.selectedSpecRange);
const specContent = useProjectStore((state) => state.specContent);
const specPaneMode = useProjectStore((state) => state.specPaneMode);
const specPath = useProjectStore((state) => state.specPath);
+ const specPromptTemplate = useProjectStore((state) => state.specPromptTemplate);
+ const supportingDocumentPaths = useProjectStore((state) => state.supportingDocumentPaths);
const approveSpec = useProjectStore((state) => state.approveSpec);
- const applyRefinement = useProjectStore((state) => state.applyRefinement);
const closeEditorTab = useProjectStore((state) => state.closeEditorTab);
const openEditorTab = useProjectStore((state) => state.openEditorTab);
const resetWorkspaceContext = useProjectStore((state) => state.resetWorkspaceContext);
const setActiveTab = useProjectStore((state) => state.setActiveTab);
const setAutonomyMode = useProjectStore((state) => state.setAutonomyMode);
+ const setConfiguredPrdPath = useProjectStore((state) => state.setConfiguredPrdPath);
+ const setConfiguredSpecPath = useProjectStore((state) => state.setConfiguredSpecPath);
const setPrdContent = useProjectStore((state) => state.setPrdContent);
const setPrdPaneMode = useProjectStore((state) => state.setPrdPaneMode);
+ const setPrdPromptTemplate = useProjectStore((state) => state.setPrdPromptTemplate);
+ const setProjectSettings = useProjectStore((state) => state.setProjectSettings);
const setReasoningProfile = useProjectStore((state) => state.setReasoningProfile);
- const setReviewPrompt = useProjectStore((state) => state.setReviewPrompt);
const setSelectedModel = useProjectStore((state) => state.setSelectedModel);
const setSelectedSpecRange = useProjectStore((state) => state.setSelectedSpecRange);
const setSpecContent = useProjectStore((state) => state.setSpecContent);
const setSpecPaneMode = useProjectStore((state) => state.setSpecPaneMode);
+ const setSpecPromptTemplate = useProjectStore((state) => state.setSpecPromptTemplate);
+ const setSupportingDocumentPaths = useProjectStore((state) => state.setSupportingDocumentPaths);
const updateEditorTabContent = useProjectStore((state) => state.updateEditorTabContent);
const claudePath = useSettingsStore((state) => state.claudePath);
const codexPath = useSettingsStore((state) => state.codexPath);
const environment = useSettingsStore((state) => state.environment);
+ const lastProjectPath = useSettingsStore((state) => state.lastProjectPath);
const theme = useSettingsStore((state) => state.theme);
const workspaceEntries = useSettingsStore((state) => state.workspaceEntries);
const setClaudePath = useSettingsStore((state) => state.setClaudePath);
const setCodexPath = useSettingsStore((state) => state.setCodexPath);
const setEnvironment = useSettingsStore((state) => state.setEnvironment);
+ const setLastProjectPath = useSettingsStore((state) => state.setLastProjectPath);
const setTheme = useSettingsStore((state) => state.setTheme);
const setWorkspaceEntries = useSettingsStore((state) => state.setWorkspaceEntries);
+
const [commandSearch, setCommandSearch] = useState("");
- const [importPath, setImportPath] = useState("docs/PRD.md");
- const [importTarget, setImportTarget] = useState("prd");
- const [importError, setImportError] = useState("");
const [isImporting, setIsImporting] = useState(false);
const [isSearchOpen, setIsSearchOpen] = useState(false);
+ const [isProjectLoading, setIsProjectLoading] = useState(false);
+ const [isProjectSaving, setIsProjectSaving] = useState(false);
const [latestDiff, setLatestDiff] = useState(DEFAULT_PENDING_DIFF);
- const [systemPrefersDark, setSystemPrefersDark] = useState(true);
- const [workspaceRootName, setWorkspaceRootName] = useState("SpecForge");
+ const [projectConfigPath, setProjectConfigPath] = useState("");
+ const [projectErrorMessage, setProjectErrorMessage] = useState("");
+ const [projectRootName, setProjectRootName] = useState("No project selected");
+ const [projectRootPath, setProjectRootPath] = useState("");
+ const [projectStatusMessage, setProjectStatusMessage] = useState("");
const [workspaceNotice, setWorkspaceNotice] = useState(
- "Open a folder to scan for PRD/spec files and build the workspace tree."
+ "Finish the setup flow to load a project workspace."
);
- const [hasOpenedWorkspaceFolder, setHasOpenedWorkspaceFolder] = useState(false);
+ const [hasSavedProjectSettings, setHasSavedProjectSettings] = useState(false);
+ const [hasSelectedProject, setHasSelectedProject] = useState(false);
+ const [hasAttemptedProjectRestore, setHasAttemptedProjectRestore] = useState(!desktopRuntime);
+ const [systemPrefersDark, setSystemPrefersDark] = useState(true);
const [workspaceFiles, setWorkspaceFiles] = useState>({});
+ const [prdGenerationPrompt, setPrdGenerationPrompt] = useState("");
+ const [prdGenerationError, setPrdGenerationError] = useState("");
const [specGenerationPrompt, setSpecGenerationPrompt] = useState("");
const [specGenerationError, setSpecGenerationError] = useState("");
+
const searchInputRef = useRef(null);
const fileInputRef = useRef(null);
const folderInputRef = useRef(null);
@@ -157,9 +181,11 @@ function App() {
const fallbackTimerRef = useRef(null);
const fallbackStepsRef = useRef([]);
const fallbackIndexRef = useRef(0);
- const hasInitializedDocumentsRef = useRef(false);
const hasScannedEnvironmentRef = useRef(false);
+ const projectSaveTimerRef = useRef(null);
+ const pendingProjectReloadRef = useRef(false);
const deferredSearch = useDeferredValue(commandSearch);
+
const filteredWorkspaceEntries = useMemo(
() => filterWorkspaceEntries(workspaceEntries, deferredSearch),
[deferredSearch, workspaceEntries]
@@ -168,10 +194,7 @@ function App() {
() => getModelProvider(selectedModel),
[selectedModel]
);
- const selectedSpecText = useMemo(
- () => selectedSpecRange?.text.trim() || "",
- [selectedSpecRange]
- );
+ const isGeneratingPrd = agentStatus === "generating_prd";
const isGeneratingSpec = agentStatus === "generating_spec";
const visibleDiff = pendingDiff ?? latestDiff;
const resolvedTheme = useMemo(
@@ -193,55 +216,155 @@ function App() {
}, [environment.claude.status, environment.codex.status]);
const mcpItems = useMemo(
() => [
- {
- name: environment.codex.name,
- detail: environment.codex.detail,
- status: environment.codex.status
- },
- {
- name: environment.claude.name,
- detail: environment.claude.detail,
- status: environment.claude.status
- },
- {
- name: environment.git.name,
- detail: environment.git.detail,
- status: environment.git.status
- }
+ { name: environment.codex.name, detail: environment.codex.detail, status: environment.codex.status },
+ { name: environment.claude.name, detail: environment.claude.detail, status: environment.claude.status },
+ { name: environment.git.name, detail: environment.git.detail, status: environment.git.status }
],
[environment]
);
- const selectedProviderStatus = selectedModelProvider === "claude" ? environment.claude : environment.codex;
+ const selectedProviderStatus =
+ selectedModelProvider === "claude" ? environment.claude : environment.codex;
+ const currentProjectSettings = useMemo(
+ () =>
+ normalizeProjectSettings({
+ selectedModel,
+ selectedReasoning,
+ prdPrompt: prdPromptTemplate,
+ specPrompt: specPromptTemplate,
+ prdPath: configuredPrdPath || DEFAULT_PROJECT_PRD_PATH,
+ specPath: configuredSpecPath || DEFAULT_PROJECT_SPEC_PATH,
+ supportingDocumentPaths
+ }),
+ [
+ configuredPrdPath,
+ configuredSpecPath,
+ prdPromptTemplate,
+ selectedModel,
+ selectedReasoning,
+ specPromptTemplate,
+ supportingDocumentPaths
+ ]
+ );
+ const configPathDisplay = useMemo(() => {
+ if (projectConfigPath.trim()) {
+ return projectConfigPath;
+ }
+
+ if (projectRootPath.trim()) {
+ return `${projectRootPath.replace(/\\/g, "/")}/${SPECFORGE_SETTINGS_RELATIVE_PATH}`;
+ }
+
+ return SPECFORGE_SETTINGS_RELATIVE_PATH;
+ }, [projectConfigPath, projectRootPath]);
+ const supportingDocumentsValue = useMemo(
+ () => formatSupportingDocumentPaths(supportingDocumentPaths),
+ [supportingDocumentPaths]
+ );
+
+ const canGeneratePrd = useMemo(
+ () =>
+ desktopRuntime &&
+ !isGeneratingPrd &&
+ projectRootPath.trim().length > 0 &&
+ configuredPrdPath.trim().length > 0 &&
+ prdGenerationPrompt.trim().length > 0,
+ [
+ configuredPrdPath,
+ desktopRuntime,
+ isGeneratingPrd,
+ prdGenerationPrompt,
+ projectRootPath
+ ]
+ );
const canGenerateSpec = useMemo(
() =>
desktopRuntime &&
!isGeneratingSpec &&
+ projectRootPath.trim().length > 0 &&
prdContent.trim().length > 0 &&
+ configuredSpecPath.trim().length > 0 &&
specGenerationPrompt.trim().length > 0,
- [desktopRuntime, isGeneratingSpec, prdContent, specGenerationPrompt]
+ [
+ configuredSpecPath,
+ desktopRuntime,
+ isGeneratingSpec,
+ prdContent,
+ projectRootPath,
+ specGenerationPrompt
+ ]
);
+ const prdGenerationHelperText = useMemo(() => {
+ if (!desktopRuntime) {
+ return "AI PRD generation requires the desktop runtime.";
+ }
+
+ if (!projectRootPath.trim()) {
+ return "Choose a project folder in setup before generating a PRD.";
+ }
+
+ if (!configuredPrdPath.trim()) {
+ return "Configure a PRD path in setup or settings first.";
+ }
+
+ if (!configuredPrdPath.toLowerCase().endsWith(".md")) {
+ return "Configure the PRD path as a Markdown file if you want generated output saved into the workspace.";
+ }
+
+ if (!prdGenerationPrompt.trim()) {
+ return "Add the product context you want to append after the saved PRD prompt.";
+ }
+
+ if (selectedProviderStatus.status !== "found") {
+ return `${selectedProviderStatus.name} is not currently marked ready. Update its path in Settings and refresh if generation fails.`;
+ }
+
+ return `This appends your note after the saved PRD prompt from ${configPathDisplay}, runs ${getModelLabel(selectedModel)}, and writes markdown to ${configuredPrdPath}.`;
+ }, [
+ configPathDisplay,
+ configuredPrdPath,
+ desktopRuntime,
+ prdGenerationPrompt,
+ projectRootPath,
+ selectedModel,
+ selectedProviderStatus.name,
+ selectedProviderStatus.status
+ ]);
const specGenerationHelperText = useMemo(() => {
if (!desktopRuntime) {
return "AI spec generation requires the desktop runtime.";
}
+ if (!projectRootPath.trim()) {
+ return "Choose a project folder in setup before generating a spec.";
+ }
+
if (!prdContent.trim()) {
- return "Load or write a PRD first. The generator combines that PRD with your note.";
+ return "Load or generate a PRD first. The spec generator appends your note after the saved spec prompt and includes the current PRD content.";
+ }
+
+ if (!configuredSpecPath.trim()) {
+ return "Configure a spec path in setup or settings first.";
+ }
+
+ if (!configuredSpecPath.toLowerCase().endsWith(".md")) {
+ return "Configure the spec path as a Markdown file if you want generated output saved into the workspace.";
}
if (!specGenerationPrompt.trim()) {
- return "Add the technical guidance you want the AI to consider.";
+ return "Add the technical guidance you want to append after the saved spec prompt.";
}
if (selectedProviderStatus.status !== "found") {
- return `${selectedProviderStatus.name} is not currently marked ready. If generation fails, update its path in Settings and refresh.`;
+ return `${selectedProviderStatus.name} is not currently marked ready. Update its path in Settings and refresh if generation fails.`;
}
- return `This sends the current PRD and your note to ${getModelLabel(selectedModel)}, saves the generated markdown next to the PRD, and loads that saved file into the spec pane.`;
+ return `This appends your note after the saved spec prompt from ${configPathDisplay}, includes the current PRD content, and writes markdown to ${configuredSpecPath}.`;
}, [
+ configPathDisplay,
+ configuredSpecPath,
desktopRuntime,
prdContent,
- selectedModel,
+ projectRootPath,
selectedProviderStatus.name,
selectedProviderStatus.status,
specGenerationPrompt
@@ -254,13 +377,15 @@ function App() {
claudePath,
codexPath
}).catch(() => previousEnvironment ?? environment),
- getWorkspaceSnapshot().catch(() => workspaceEntries),
+ hasSelectedProject
+ ? Promise.resolve(workspaceEntries)
+ : getWorkspaceSnapshot().catch(() => workspaceEntries),
getGitDiff().catch(() => DEFAULT_PENDING_DIFF)
]);
setEnvironment(nextEnvironment);
- if (!hasOpenedWorkspaceFolder) {
+ if (!hasSelectedProject) {
setWorkspaceEntries(snapshotEntries);
}
@@ -270,7 +395,7 @@ function App() {
claudePath,
codexPath,
environment,
- hasOpenedWorkspaceFolder,
+ hasSelectedProject,
setEnvironment,
setWorkspaceEntries,
workspaceEntries
@@ -282,161 +407,194 @@ function App() {
startTransition(() => {
if (target === "prd") {
setPrdContent(content, path);
+ setPrdPaneMode("preview");
return;
}
setSpecContent(content, path);
- setSpecPaneMode("edit");
+ setSpecPaneMode("preview");
});
- if (target === "spec") {
- setSpecGenerationPrompt("");
- setSpecGenerationError("");
+ if (target === "prd") {
+ setPrdGenerationPrompt("");
+ setPrdGenerationError("");
+ return;
}
+
+ setSpecGenerationPrompt("");
+ setSpecGenerationError("");
},
- [setPrdContent, setSpecContent, setSpecPaneMode]
+ [setPrdContent, setPrdPaneMode, setSpecContent, setSpecPaneMode]
);
- const applyWorkspaceSelection = useCallback(
- ({
- rootName,
- entries,
- ignoredFileCount,
- files,
- prdDocument,
- specDocument
- }: WorkspaceSelectionPayload) => {
- const loadedDocuments: string[] = [];
+ const applyProjectContext = useCallback(
+ (context: ProjectContext, options?: { navigateToReview?: boolean }) => {
+ const nextWorkspaceFiles = Object.fromEntries(
+ context.entries
+ .filter((entry) => entry.kind === "file")
+ .map((entry) => [
+ entry.path,
+ {
+ kind: "desktop",
+ fileName: entry.name
+ } satisfies WorkspaceFileSource
+ ])
+ );
resetWorkspaceContext();
- setWorkspaceEntries(entries);
- setWorkspaceRootName(rootName);
- setHasOpenedWorkspaceFolder(true);
- setWorkspaceFiles(files);
+ setProjectRootName(context.rootName);
+ setProjectRootPath(context.rootPath);
+ setProjectConfigPath(context.settingsPath);
+ setHasSelectedProject(true);
+ setHasSavedProjectSettings(context.hasSavedSettings);
+ setWorkspaceEntries(context.entries);
+ setWorkspaceFiles(nextWorkspaceFiles);
+ setLastProjectPath(context.rootPath);
+ setProjectSettings(context.settings);
+ setPrdGenerationPrompt("");
+ setPrdGenerationError("");
setSpecGenerationPrompt("");
setSpecGenerationError("");
+ setProjectStatusMessage(
+ context.hasSavedSettings
+ ? `Loaded project settings from ${context.settingsPath}.`
+ : `Selected ${context.rootName}. Save the setup to create ${context.settingsPath}.`
+ );
+ setProjectErrorMessage("");
+ setWorkspaceNotice(buildWorkspaceNotice(context));
startTransition(() => {
- setPrdContent(prdDocument?.content ?? "", prdDocument?.sourcePath ?? "PRD.md");
- setSpecContent(specDocument?.content ?? "", specDocument?.sourcePath ?? "spec.md");
- setSpecPaneMode(specDocument ? "edit" : "preview");
+ setPrdContent(context.prdDocument?.content ?? "", context.prdDocument?.sourcePath ?? context.settings.prdPath);
+ setSpecContent(context.specDocument?.content ?? "", context.specDocument?.sourcePath ?? context.settings.specPath);
+ setPrdPaneMode("preview");
+ setSpecPaneMode("preview");
});
- if (prdDocument) {
- loadedDocuments.push(prdDocument.fileName);
- }
-
- if (specDocument) {
- loadedDocuments.push(specDocument.fileName);
- }
-
- const missingDocuments = [
- prdDocument ? null : "PRD",
- specDocument ? null : "spec"
- ].filter((value): value is string => value !== null);
- const missingDocumentNotice = formatMissingWorkspaceDocuments(missingDocuments);
-
- if (loadedDocuments.length > 0) {
- setWorkspaceNotice(
- `Loaded ${loadedDocuments.join(" and ")} from ${rootName}.${missingDocumentNotice}${ignoredFileCount > 0 ? ` Ignored ${ignoredFileCount} file(s) from .gitignore.` : ""}`
- );
- appendTerminalOutput(
- stampLog(
- "workspace",
- `Loaded workspace folder ${rootName} and detected ${loadedDocuments.join(", ")}.${missingDocumentNotice}`
- )
- );
- return;
+ if (options?.navigateToReview) {
+ navigate("/review");
}
-
- setWorkspaceNotice(
- `${rootName} opened successfully, but no matching PRD/spec files were found.${ignoredFileCount > 0 ? ` Ignored ${ignoredFileCount} file(s) from .gitignore.` : ""}`
- );
},
[
- appendTerminalOutput,
+ navigate,
resetWorkspaceContext,
+ setLastProjectPath,
setPrdContent,
+ setPrdPaneMode,
+ setProjectSettings,
setSpecContent,
setSpecPaneMode,
setWorkspaceEntries
]
);
- const importWorkspaceFiles = useCallback(
- async (files: ImportableFile[]) => {
- if (files.length === 0) {
+ const saveCurrentProjectSettings = useCallback(
+ async ({
+ reloadProject = false,
+ navigateToReview = false
+ }: {
+ reloadProject?: boolean;
+ navigateToReview?: boolean;
+ } = {}) => {
+ if (!desktopRuntime) {
+ setProjectErrorMessage("Project configuration requires the desktop runtime.");
return;
}
- const filteredFiles = await filterWorkspaceFiles(files);
- const snapshot = buildWorkspaceImportSnapshot(filteredFiles);
- const matches = findProjectDocuments(filteredFiles);
- const ignoredFileCount = files.length - filteredFiles.length;
- const nextWorkspaceFiles = filteredFiles.reduce>(
- (accumulator, file) => {
- const normalizedPath = normalizeWorkspacePath(
- file.webkitRelativePath || file.name,
- snapshot.rootName
- );
- accumulator[normalizedPath] = {
- kind: "browser",
- file
- };
- return accumulator;
- },
- {}
- );
+ if (!projectRootPath.trim()) {
+ setProjectErrorMessage("Choose a project folder before saving.");
+ return;
+ }
+
+ setProjectErrorMessage("");
+ setProjectStatusMessage("");
+ setIsProjectSaving(true);
try {
- const [prdDocument, specDocument] = await Promise.all([
- matches.prdFile ? parseWorkspaceDocument(matches.prdFile) : Promise.resolve(null),
- matches.specFile ? parseWorkspaceDocument(matches.specFile) : Promise.resolve(null)
- ]);
-
- applyWorkspaceSelection({
- rootName: snapshot.rootName,
- entries: snapshot.entries,
- ignoredFileCount,
- files: nextWorkspaceFiles,
- prdDocument: prdDocument
- ? {
- content: prdDocument.content,
- sourcePath: prdDocument.sourcePath,
- fileName: matches.prdFile?.name ?? prdDocument.sourcePath
- }
- : null,
- specDocument: specDocument
- ? {
- content: specDocument.content,
- sourcePath: specDocument.sourcePath,
- fileName: matches.specFile?.name ?? specDocument.sourcePath
- }
- : null
+ const savedSettings = await saveProjectSettings({
+ folderPath: projectRootPath,
+ settings: currentProjectSettings
});
+
+ setProjectSettings(savedSettings);
+ setHasSavedProjectSettings(true);
+ setProjectStatusMessage(`Saved project settings to ${configPathDisplay}.`);
+
+ if (reloadProject || navigateToReview) {
+ const reloadedContext = await loadProjectContext(projectRootPath);
+ applyProjectContext(reloadedContext, { navigateToReview });
+ }
} catch (error) {
- setWorkspaceNotice(
- error instanceof Error
- ? `${snapshot.rootName} opened, but document parsing failed: ${error.message}`
- : `${snapshot.rootName} opened, but one of the detected documents could not be parsed.`
+ setProjectErrorMessage(
+ error instanceof Error ? error.message : "Unable to save the current project settings."
);
+ } finally {
+ setIsProjectSaving(false);
+ }
+ },
+ [
+ applyProjectContext,
+ configPathDisplay,
+ currentProjectSettings,
+ desktopRuntime,
+ projectRootPath,
+ setProjectSettings
+ ]
+ );
+
+ const scheduleProjectSettingsSave = useCallback(
+ (reloadProject = false) => {
+ if (!desktopRuntime || !hasSavedProjectSettings || !projectRootPath.trim()) {
+ return;
+ }
+
+ pendingProjectReloadRef.current = pendingProjectReloadRef.current || reloadProject;
+
+ if (projectSaveTimerRef.current !== null) {
+ window.clearTimeout(projectSaveTimerRef.current);
}
+
+ projectSaveTimerRef.current = window.setTimeout(() => {
+ const shouldReload = pendingProjectReloadRef.current;
+ pendingProjectReloadRef.current = false;
+ projectSaveTimerRef.current = null;
+ void saveCurrentProjectSettings({ reloadProject: shouldReload });
+ }, 700);
},
- [applyWorkspaceSelection]
+ [desktopRuntime, hasSavedProjectSettings, projectRootPath, saveCurrentProjectSettings]
);
- const handlePathImport = useCallback(async () => {
- setIsImporting(true);
- setImportError("");
+ const handlePickProjectFolder = useCallback(async () => {
+ if (!desktopRuntime) {
+ setProjectErrorMessage("Project configuration requires the desktop runtime.");
+ return;
+ }
+
+ setProjectErrorMessage("");
+ setProjectStatusMessage("");
+ setIsProjectLoading(true);
try {
- assignDocument(importTarget, await parseDocument(importPath), importPath);
+ const nextProjectContext = await pickProjectFolder();
+
+ if (!nextProjectContext) {
+ return;
+ }
+
+ applyProjectContext(nextProjectContext, {
+ navigateToReview: nextProjectContext.hasSavedSettings
+ });
+
+ if (!nextProjectContext.hasSavedSettings) {
+ navigate("/");
+ }
} catch (error) {
- setImportError(error instanceof Error ? error.message : "Unable to parse the requested document.");
+ setProjectErrorMessage(
+ error instanceof Error ? error.message : "Unable to open the selected project folder."
+ );
} finally {
- setIsImporting(false);
+ setIsProjectLoading(false);
}
- }, [assignDocument, importPath, importTarget]);
+ }, [applyProjectContext, desktopRuntime, navigate]);
const handleFileSelection = useCallback(
async (event: ChangeEvent) => {
@@ -449,11 +607,15 @@ function App() {
try {
const document = await parseWorkspaceDocument(file);
assignDocument(pendingImportTargetRef.current, document.content, document.sourcePath);
- setImportError("");
} catch (error) {
- setImportError(
- error instanceof Error ? error.message : "The selected file could not be imported."
- );
+ const message =
+ error instanceof Error ? error.message : "The selected file could not be imported.";
+
+ if (pendingImportTargetRef.current === "prd") {
+ setPrdGenerationError(message);
+ } else {
+ setSpecGenerationError(message);
+ }
} finally {
event.target.value = "";
}
@@ -462,14 +624,8 @@ function App() {
);
const handleWorkspaceFolderSelection = useCallback(
- async (event: ChangeEvent) => {
- try {
- await importWorkspaceFiles(Array.from(event.target.files ?? []) as ImportableFile[]);
- } finally {
- event.target.value = "";
- }
- },
- [importWorkspaceFiles]
+ (_event: ChangeEvent) => undefined,
+ []
);
const handleWorkspaceFileOpen = useCallback(
@@ -519,6 +675,40 @@ function App() {
[openEditorTab, workspaceFiles]
);
+ const handleOpenImportFile = useCallback(
+ async (target: DocumentTarget) => {
+ pendingImportTargetRef.current = target;
+
+ if (desktopRuntime) {
+ setIsImporting(true);
+
+ try {
+ const document = await pickDocument();
+
+ if (document) {
+ assignDocument(target, document.content, document.sourcePath);
+ }
+ } catch (error) {
+ const message =
+ error instanceof Error ? error.message : "The selected file could not be imported.";
+
+ if (target === "prd") {
+ setPrdGenerationError(message);
+ } else {
+ setSpecGenerationError(message);
+ }
+ } finally {
+ setIsImporting(false);
+ }
+
+ return;
+ }
+
+ fileInputRef.current?.click();
+ },
+ [assignDocument, desktopRuntime]
+ );
+
const handleApproveSpec = useCallback(() => {
if (!specContent.trim()) {
return;
@@ -565,14 +755,14 @@ function App() {
}, [
appendTerminalOutput,
autonomyMode,
+ desktopRuntime,
isSpecApproved,
resetRun,
+ selectedModel,
+ selectedReasoning,
setActiveTab,
setAgentStatus,
setCurrentMilestone,
- desktopRuntime,
- selectedModel,
- selectedReasoning,
specContent
]);
@@ -630,19 +820,21 @@ function App() {
appendTerminalOutput(stampLog("halt", "Emergency stop triggered. Agent loop is paused."));
}, [appendTerminalOutput, desktopRuntime, setAgentStatus, setExecutionSummary, setPendingDiff]);
- const handleCommandSearchChange = useCallback(
- (event: ChangeEvent) => setCommandSearch(event.target.value),
- []
+ const handlePrdContentChange = useCallback(
+ (value: string) => setPrdContent(value, prdPath),
+ [prdPath, setPrdContent]
);
- const closeWorkspaceSearch = useCallback(() => {
- setIsSearchOpen(false);
- setCommandSearch("");
- }, []);
+ const handleSpecContentChange = useCallback(
+ (value: string) => {
+ if (value.trim()) {
+ setSpecGenerationError("");
+ }
- const handleImportTargetChange = useCallback((target: DocumentTarget) => {
- setImportTarget(target);
- }, []);
+ setSpecContent(value, specPath);
+ },
+ [setSpecContent, specPath]
+ );
const handleSpecSelect = useCallback(
(event: ChangeEvent) => {
@@ -660,156 +852,109 @@ function App() {
[setSelectedSpecRange]
);
- const handleOpenImportFile = useCallback(async (target: DocumentTarget) => {
- pendingImportTargetRef.current = target;
- setImportTarget(target);
-
- if (desktopRuntime) {
- setIsImporting(true);
- setImportError("");
-
- try {
- const document = await pickDocument();
+ const handlePrdGenerationPromptChange = useCallback((value: string) => {
+ setPrdGenerationPrompt(value);
- if (document) {
- assignDocument(target, document.content, document.sourcePath);
- }
- } catch (error) {
- setImportError(
- error instanceof Error ? error.message : "The selected file could not be imported."
- );
- } finally {
- setIsImporting(false);
- }
-
- return;
+ if (prdGenerationError) {
+ setPrdGenerationError("");
}
- fileInputRef.current?.click();
- }, [assignDocument, desktopRuntime]);
-
- const handleOpenWorkspaceFolder = useCallback(async () => {
- if (desktopRuntime) {
- try {
- const workspaceFolder = await openWorkspaceFolder();
-
- if (!workspaceFolder) {
- return;
- }
-
- const nextWorkspaceFiles = Object.fromEntries(
- workspaceFolder.entries
- .filter((entry) => entry.kind === "file")
- .map((entry) => [
- entry.path,
- {
- kind: "desktop",
- fileName: entry.name
- } satisfies WorkspaceFileSource
- ])
- );
-
- applyWorkspaceSelection({
- rootName: workspaceFolder.rootName,
- entries: workspaceFolder.entries,
- ignoredFileCount: workspaceFolder.ignoredFileCount,
- files: nextWorkspaceFiles,
- prdDocument: workspaceFolder.prdDocument,
- specDocument: workspaceFolder.specDocument
- });
- return;
- } catch (error) {
- setWorkspaceNotice(
- error instanceof Error
- ? `Workspace import failed: ${error.message}`
- : "Workspace import failed."
- );
- return;
- }
+ if (agentStatus === "error") {
+ setAgentStatus("idle");
}
+ }, [agentStatus, prdGenerationError, setAgentStatus]);
- const pickDirectory = getDirectoryPicker();
+ const handleSpecGenerationPromptChange = useCallback((value: string) => {
+ setSpecGenerationPrompt(value);
- if (pickDirectory) {
- try {
- const directoryHandle = await pickDirectory({ mode: "read" });
- await importWorkspaceFiles(await collectWorkspaceFiles(directoryHandle));
- return;
- } catch (error) {
- if (isDirectoryPickerAbort(error)) {
- return;
- }
- }
+ if (specGenerationError) {
+ setSpecGenerationError("");
}
- folderInputRef.current?.click();
- }, [applyWorkspaceSelection, desktopRuntime, importWorkspaceFiles]);
-
- const handleRefresh = useCallback(() => {
- void refreshDiagnostics();
- }, [refreshDiagnostics]);
-
- const handlePathImportClick = useCallback(() => {
- void handlePathImport();
- }, [handlePathImport]);
-
- const handleOpenPrdImportClick = useCallback(() => {
- void handleOpenImportFile("prd");
- }, [handleOpenImportFile]);
-
- const handleOpenSpecImportClick = useCallback(() => {
- void handleOpenImportFile("spec");
- }, [handleOpenImportFile]);
-
- const handleStartBuildClick = useCallback(() => {
- void handleStartBuild();
- }, [handleStartBuild]);
-
- const handleApproveExecutionGateClick = useCallback(() => {
- void handleApproveExecutionGate();
- }, [handleApproveExecutionGate]);
+ if (agentStatus === "error") {
+ setAgentStatus("idle");
+ }
+ }, [agentStatus, setAgentStatus, specGenerationError]);
- const handleEmergencyStopClick = useCallback(() => {
- void handleEmergencyStop();
- }, [handleEmergencyStop]);
+ const handleGeneratePrd = useCallback(async () => {
+ const trimmedPrompt = prdGenerationPrompt.trim();
- const handleWorkspaceFileOpenClick = useCallback(
- (path: string) => {
- void handleWorkspaceFileOpen(path);
- },
- [handleWorkspaceFileOpen]
- );
+ if (!desktopRuntime) {
+ setPrdGenerationError("AI PRD generation requires the desktop runtime.");
+ return;
+ }
- const handlePrdContentChange = useCallback(
- (value: string) => setPrdContent(value, prdPath),
- [prdPath, setPrdContent]
- );
+ if (!projectRootPath.trim()) {
+ setPrdGenerationError("Choose a project folder before generating a PRD.");
+ return;
+ }
- const handleSpecContentChange = useCallback(
- (value: string) => {
- if (value.trim()) {
- setSpecGenerationError("");
- }
+ if (!currentProjectSettings.prdPath.toLowerCase().endsWith(".md")) {
+ setPrdGenerationError("Configure the PRD path as a Markdown file before generating.");
+ return;
+ }
- setSpecContent(value, specPath);
- },
- [setSpecContent, specPath]
- );
+ if (!trimmedPrompt) {
+ setPrdGenerationError("Add the product context you want the AI to consider.");
+ return;
+ }
- const handleSpecGenerationPromptChange = useCallback(
- (value: string) => {
- setSpecGenerationPrompt(value);
+ setPrdGenerationError("");
+ setAgentStatus("generating_prd");
+ appendTerminalOutput(
+ stampLog(
+ "prd",
+ `Generating a PRD draft with ${getModelLabel(selectedModel)} (${getReasoningLabel(selectedModel, selectedReasoning)} reasoning).`
+ )
+ );
- if (specGenerationError) {
- setSpecGenerationError("");
- }
+ try {
+ const generatedPrd = await generatePrdDocument({
+ workspaceRoot: projectRootPath,
+ outputPath: currentProjectSettings.prdPath,
+ promptTemplate: currentProjectSettings.prdPrompt,
+ userPrompt: trimmedPrompt,
+ provider: selectedModelProvider,
+ model: selectedModel,
+ reasoning: selectedReasoning,
+ claudePath,
+ codexPath
+ });
- if (agentStatus === "error") {
- setAgentStatus("idle");
- }
- },
- [agentStatus, setAgentStatus, specGenerationError]
- );
+ startTransition(() => {
+ setPrdContent(generatedPrd.content, generatedPrd.sourcePath);
+ setPrdPaneMode("preview");
+ });
+ setPrdGenerationPrompt("");
+ setAgentStatus("idle");
+ appendTerminalOutput(
+ stampLog(
+ "prd",
+ `PRD draft generated, saved to ${generatedPrd.fileName}, and loaded into the review pane.`
+ )
+ );
+ } catch (error) {
+ const message = error instanceof Error ? error.message : "Unable to generate a PRD.";
+ setPrdGenerationError(message);
+ setAgentStatus("error");
+ appendTerminalOutput(stampLog("error", message));
+ }
+ }, [
+ appendTerminalOutput,
+ claudePath,
+ codexPath,
+ currentProjectSettings.prdPath,
+ currentProjectSettings.prdPrompt,
+ desktopRuntime,
+ prdGenerationPrompt,
+ projectRootPath,
+ selectedModel,
+ selectedModelProvider,
+ selectedReasoning,
+ setAgentStatus,
+ setPrdContent,
+ setPrdPaneMode
+ ]);
const handleGenerateSpec = useCallback(async () => {
const trimmedPrompt = specGenerationPrompt.trim();
@@ -819,8 +964,18 @@ function App() {
return;
}
+ if (!projectRootPath.trim()) {
+ setSpecGenerationError("Choose a project folder before generating a spec.");
+ return;
+ }
+
if (!prdContent.trim()) {
- setSpecGenerationError("Load or write a PRD before generating a specification.");
+ setSpecGenerationError("Load or generate a PRD before drafting a specification.");
+ return;
+ }
+
+ if (!currentProjectSettings.specPath.toLowerCase().endsWith(".md")) {
+ setSpecGenerationError("Configure the spec path as a Markdown file before generating.");
return;
}
@@ -840,8 +995,10 @@ function App() {
try {
const generatedSpec = await generateSpecDocument({
- prdPath,
+ workspaceRoot: projectRootPath,
+ outputPath: currentProjectSettings.specPath,
prdContent,
+ promptTemplate: currentProjectSettings.specPrompt,
userPrompt: trimmedPrompt,
provider: selectedModelProvider,
model: selectedModel,
@@ -865,7 +1022,6 @@ function App() {
} catch (error) {
const message =
error instanceof Error ? error.message : "Unable to generate a specification.";
-
setSpecGenerationError(message);
setAgentStatus("error");
appendTerminalOutput(stampLog("error", message));
@@ -874,9 +1030,11 @@ function App() {
appendTerminalOutput,
claudePath,
codexPath,
+ currentProjectSettings.specPath,
+ currentProjectSettings.specPrompt,
desktopRuntime,
- prdPath,
prdContent,
+ projectRootPath,
selectedModel,
selectedModelProvider,
selectedReasoning,
@@ -886,22 +1044,90 @@ function App() {
specGenerationPrompt
]);
+ const handleProjectModelChange = useCallback((model: typeof selectedModel) => {
+ setSelectedModel(model);
+ scheduleProjectSettingsSave(false);
+ }, [scheduleProjectSettingsSave, setSelectedModel]);
+
+ const handleProjectReasoningChange = useCallback((reasoning: typeof selectedReasoning) => {
+ setReasoningProfile(reasoning);
+ scheduleProjectSettingsSave(false);
+ }, [scheduleProjectSettingsSave, setReasoningProfile]);
+
+ const handlePrdPromptTemplateChange = useCallback((value: string) => {
+ setPrdPromptTemplate(value);
+ scheduleProjectSettingsSave(false);
+ }, [scheduleProjectSettingsSave, setPrdPromptTemplate]);
+
+ const handleSpecPromptTemplateChange = useCallback((value: string) => {
+ setSpecPromptTemplate(value);
+ scheduleProjectSettingsSave(false);
+ }, [scheduleProjectSettingsSave, setSpecPromptTemplate]);
+
+ const handleConfiguredPrdPathChange = useCallback((value: string) => {
+ setConfiguredPrdPath(normalizeProjectRelativePath(value));
+ scheduleProjectSettingsSave(true);
+ }, [scheduleProjectSettingsSave, setConfiguredPrdPath]);
+
+ const handleConfiguredSpecPathChange = useCallback((value: string) => {
+ setConfiguredSpecPath(normalizeProjectRelativePath(value));
+ scheduleProjectSettingsSave(true);
+ }, [scheduleProjectSettingsSave, setConfiguredSpecPath]);
+
+ const handleSupportingDocumentsChange = useCallback((value: string) => {
+ setSupportingDocumentPaths(parseSupportingDocumentPaths(value));
+ scheduleProjectSettingsSave(false);
+ }, [scheduleProjectSettingsSave, setSupportingDocumentPaths]);
+
+ const handleCommandSearchChange = useCallback(
+ (event: ChangeEvent) => setCommandSearch(event.target.value),
+ []
+ );
+
+ const closeWorkspaceSearch = useCallback(() => {
+ setIsSearchOpen(false);
+ setCommandSearch("");
+ }, []);
+
+ const handleRefresh = useCallback(() => {
+ void refreshDiagnostics();
+ }, [refreshDiagnostics]);
+
+ const handleOpenPrdImportClick = useCallback(() => {
+ void handleOpenImportFile("prd");
+ }, [handleOpenImportFile]);
+
+ const handleOpenSpecImportClick = useCallback(() => {
+ void handleOpenImportFile("spec");
+ }, [handleOpenImportFile]);
+
+ const handleStartBuildClick = useCallback(() => {
+ void handleStartBuild();
+ }, [handleStartBuild]);
+
+ const handleApproveExecutionGateClick = useCallback(() => {
+ void handleApproveExecutionGate();
+ }, [handleApproveExecutionGate]);
+
+ const handleEmergencyStopClick = useCallback(() => {
+ void handleEmergencyStop();
+ }, [handleEmergencyStop]);
+
+ const handleWorkspaceFileOpenClick = useCallback((path: string) => {
+ void handleWorkspaceFileOpen(path);
+ }, [handleWorkspaceFileOpen]);
+
+ const handleGeneratePrdClick = useCallback(() => {
+ void handleGeneratePrd();
+ }, [handleGeneratePrd]);
+
const handleGenerateSpecClick = useCallback(() => {
void handleGenerateSpec();
}, [handleGenerateSpec]);
- useEffect(() => {
- if (hasInitializedDocumentsRef.current) {
- return;
- }
-
- hasInitializedDocumentsRef.current = true;
- startTransition(() => {
- setPrdContent(bundledPrd, "docs/PRD.md");
- setSpecContent(bundledSpec, "docs/SPEC.md");
- });
- }, [setPrdContent, setSpecContent]);
-
+ const handleSaveConfigurationAndContinue = useCallback(() => {
+ void saveCurrentProjectSettings({ reloadProject: true, navigateToReview: true });
+ }, [saveCurrentProjectSettings]);
useEffect(() => {
if (typeof window === "undefined" || !window.matchMedia) {
return;
@@ -919,24 +1145,6 @@ function App() {
document.documentElement.classList.toggle("dark", resolvedTheme === "dracula");
}, [resolvedTheme]);
- useEffect(() => {
- if (configuredModelProviders.length !== 1) {
- return;
- }
-
- const onlyConfiguredProvider = configuredModelProviders[0];
-
- if (getModelProvider(selectedModel) === onlyConfiguredProvider) {
- return;
- }
-
- const nextModel = getModelOptions(onlyConfiguredProvider)[0]?.value;
-
- if (nextModel) {
- setSelectedModel(nextModel);
- }
- }, [configuredModelProviders, selectedModel, setSelectedModel]);
-
useEffect(() => {
const handleKeyDown = (event: globalThis.KeyboardEvent) => {
if (event.defaultPrevented || event.isComposing) {
@@ -950,19 +1158,19 @@ function App() {
event.key.toLowerCase() === "f";
if (isFindShortcut) {
- event.preventDefault();
-
- if (!isSettingsRoute) {
- setIsSearchOpen((currentValue) => {
- if (currentValue) {
- setCommandSearch("");
- return false;
- }
-
- return true;
- });
+ if (!isReviewRoute) {
+ return;
}
+ event.preventDefault();
+ setIsSearchOpen((currentValue) => {
+ if (currentValue) {
+ setCommandSearch("");
+ return false;
+ }
+
+ return true;
+ });
return;
}
@@ -974,16 +1182,16 @@ function App() {
window.addEventListener("keydown", handleKeyDown);
return () => window.removeEventListener("keydown", handleKeyDown);
- }, [closeWorkspaceSearch, isSearchOpen, isSettingsRoute]);
+ }, [closeWorkspaceSearch, isReviewRoute, isSearchOpen]);
useEffect(() => {
- if (isSettingsRoute && isSearchOpen) {
+ if (!isReviewRoute && isSearchOpen) {
closeWorkspaceSearch();
}
- }, [closeWorkspaceSearch, isSearchOpen, isSettingsRoute]);
+ }, [closeWorkspaceSearch, isReviewRoute, isSearchOpen]);
useEffect(() => {
- if (!isSearchOpen || isSettingsRoute) {
+ if (!isSearchOpen || !isReviewRoute) {
return;
}
@@ -993,7 +1201,7 @@ function App() {
});
return () => window.cancelAnimationFrame(focusFrame);
- }, [isSearchOpen, isSettingsRoute]);
+ }, [isReviewRoute, isSearchOpen]);
useEffect(() => {
if (hasScannedEnvironmentRef.current) {
@@ -1004,9 +1212,58 @@ function App() {
void refreshDiagnostics(environment);
}, [environment, refreshDiagnostics]);
+ useEffect(() => {
+ if (hasAttemptedProjectRestore || !desktopRuntime) {
+ return;
+ }
+
+ if (!lastProjectPath.trim()) {
+ setHasAttemptedProjectRestore(true);
+ return;
+ }
+
+ let isDisposed = false;
+ setIsProjectLoading(true);
+
+ void loadProjectContext(lastProjectPath)
+ .then((context) => {
+ if (isDisposed) {
+ return;
+ }
+
+ applyProjectContext(context);
+ })
+ .catch(() => {
+ if (isDisposed) {
+ return;
+ }
+
+ setLastProjectPath("");
+ })
+ .finally(() => {
+ if (isDisposed) {
+ return;
+ }
+
+ setIsProjectLoading(false);
+ setHasAttemptedProjectRestore(true);
+ });
+
+ return () => {
+ isDisposed = true;
+ };
+ }, [
+ applyProjectContext,
+ desktopRuntime,
+ hasAttemptedProjectRestore,
+ lastProjectPath,
+ setLastProjectPath
+ ]);
+
useEffect(() => {
let unlisten: (() => void) | undefined;
let isDisposed = false;
+
void subscribeToAgentEvents({
onLine: appendTerminalOutput,
onState: (payload) => {
@@ -1028,12 +1285,147 @@ function App() {
isDisposed = true;
unlisten?.();
clearFallbackTimer(fallbackTimerRef);
+
+ if (projectSaveTimerRef.current !== null) {
+ window.clearTimeout(projectSaveTimerRef.current);
+ projectSaveTimerRef.current = null;
+ }
};
}, [appendTerminalOutput, applyAgentEvent]);
+ const reviewScreen = hasSavedProjectSettings ? (
+ 0,
+ onFileOpen: handleWorkspaceFileOpenClick,
+ onFolderChange: handleWorkspaceFolderSelection,
+ onOpenFolder: handlePickProjectFolder,
+ workspaceEntries: filteredWorkspaceEntries,
+ workspaceNotice,
+ workspaceRootName: projectRootName
+ }}
+ isSearchOpen={isSearchOpen}
+ isSpecApproved={isSpecApproved}
+ mainWorkspaceProps={{
+ activeTab,
+ agentStatus,
+ canGeneratePrd,
+ canGenerateSpec,
+ configPath: configPathDisplay,
+ executionSummary,
+ isGeneratingPrd,
+ isGeneratingSpec,
+ isSpecApproved,
+ onActiveTabChange: setActiveTab,
+ onApproveExecutionGate: handleApproveExecutionGateClick,
+ onApproveSpec: handleApproveSpec,
+ onEditorTabChange: updateEditorTabContent,
+ onEditorTabClose: closeEditorTab,
+ onEmergencyStop: handleEmergencyStopClick,
+ onGeneratePrd: handleGeneratePrdClick,
+ onGenerateSpec: handleGenerateSpecClick,
+ onLoadPrd: handleOpenPrdImportClick,
+ onLoadSpec: handleOpenSpecImportClick,
+ onPrdContentChange: handlePrdContentChange,
+ onPrdGenerationPromptChange: handlePrdGenerationPromptChange,
+ onPrdPaneModeChange: setPrdPaneMode,
+ onSpecContentChange: handleSpecContentChange,
+ onSpecGenerationPromptChange: handleSpecGenerationPromptChange,
+ onSpecPaneModeChange: setSpecPaneMode,
+ onSpecSelect: handleSpecSelect,
+ openEditorTabs,
+ prdContent,
+ prdGenerationError,
+ prdGenerationHelperText,
+ prdGenerationPrompt,
+ prdPaneMode,
+ prdPath,
+ prdPromptTemplate,
+ specContent,
+ specGenerationError,
+ specGenerationHelperText,
+ specGenerationPrompt,
+ specPaneMode,
+ specPath,
+ specPromptTemplate,
+ terminalOutput,
+ visibleDiff,
+ workspaceRootName: projectRootName
+ }}
+ onCommandSearchChange={handleCommandSearchChange}
+ onRefresh={handleRefresh}
+ onStartBuild={handleStartBuildClick}
+ searchInputRef={searchInputRef}
+ workspaceRootName={projectRootName}
+ />
+ ) : hasAttemptedProjectRestore ? (
+
+ ) : (
+
+ Loading project configuration...
+
+ );
+
+ const settingsScreen = hasSavedProjectSettings ? (
+
+ ) : hasAttemptedProjectRestore ? (
+
+ ) : (
+
+ Loading project configuration...
+
+ );
+
return (
-
+
+
0,
- onFileOpen: handleWorkspaceFileOpenClick,
- onFolderChange: handleWorkspaceFolderSelection,
- onOpenFolder: handleOpenWorkspaceFolder,
- workspaceEntries: filteredWorkspaceEntries,
- workspaceNotice,
- workspaceRootName
- }}
- isSearchOpen={isSearchOpen}
- isSpecApproved={isSpecApproved}
- mainWorkspaceProps={{
- activeTab,
- agentStatus,
- canGenerateSpec,
- executionSummary,
- isSpecApproved,
- isGeneratingSpec,
- onApproveSpec: handleApproveSpec,
- onEditorTabChange: updateEditorTabContent,
- onEditorTabClose: closeEditorTab,
- onActiveTabChange: setActiveTab,
- onApproveExecutionGate: handleApproveExecutionGateClick,
- onEmergencyStop: handleEmergencyStopClick,
- onGenerateSpec: handleGenerateSpecClick,
- onLoadPrd: handleOpenPrdImportClick,
- onLoadSpec: handleOpenSpecImportClick,
- openEditorTabs,
- onPrdContentChange: handlePrdContentChange,
- onPrdPaneModeChange: setPrdPaneMode,
- onSpecContentChange: handleSpecContentChange,
- onSpecGenerationPromptChange: handleSpecGenerationPromptChange,
- onSpecPaneModeChange: setSpecPaneMode,
- onSpecSelect: handleSpecSelect,
- prdContent,
- prdPaneMode,
- prdPath,
- specGenerationError,
- specGenerationHelperText,
- specGenerationPrompt,
- specContent,
- specPaneMode,
- specPath,
- terminalOutput,
- visibleDiff,
- workspaceRootName
- }}
- onCommandSearchChange={handleCommandSearchChange}
+
}
path="/"
/>
-
- }
- path="/settings"
- />
+
+
} path="*" />
@@ -1147,15 +1486,15 @@ function App() {
export default App;
-function formatMissingWorkspaceDocuments(missingDocuments: string[]) {
- if (missingDocuments.length === 0) {
- return "";
- }
+function buildWorkspaceNotice(context: ProjectContext) {
+ const loadedDocuments = [
+ context.prdDocument?.fileName ? `PRD: ${context.prdDocument.fileName}` : null,
+ context.specDocument?.fileName ? `SPEC: ${context.specDocument.fileName}` : null
+ ].filter((value): value is string => value !== null);
- if (missingDocuments.length === 1) {
- return ` No matching ${missingDocuments[0]} file was found.`;
+ if (loadedDocuments.length === 0) {
+ return `${context.rootName} is configured. No document exists yet at ${context.settings.prdPath} or ${context.settings.specPath}.`;
}
- const finalDocument = missingDocuments[missingDocuments.length - 1];
- return ` No matching ${missingDocuments.slice(0, -1).join(" or ")} or ${finalDocument} files were found.`;
+ return `${context.rootName} is configured. Loaded ${loadedDocuments.join(" and ")} from the saved project paths.`;
}
diff --git a/src/components/AppRail.tsx b/src/components/AppRail.tsx
index b9b3e34..ea078a2 100644
--- a/src/components/AppRail.tsx
+++ b/src/components/AppRail.tsx
@@ -7,17 +7,35 @@ import {
} from "iconoir-react";
import { NavLink } from "react-router-dom";
-export function AppRail() {
+interface AppRailProps {
+ hasProjectConfigured: boolean;
+}
+
+export function AppRail({ hasProjectConfigured }: AppRailProps) {
return (
SF
-
-
+
+
+ {hasProjectConfigured ? (
+
+
+
+ ) : (
+
+
+
+ )}
+
diff --git a/src/components/CliHealthCard.tsx b/src/components/CliHealthCard.tsx
new file mode 100644
index 0000000..f18a373
--- /dev/null
+++ b/src/components/CliHealthCard.tsx
@@ -0,0 +1,44 @@
+import { CheckCircle, WarningTriangle } from "iconoir-react";
+import { memo } from "react";
+
+import type { CliStatus } from "../types";
+
+interface CliHealthCardProps {
+ entry: CliStatus;
+}
+
+export const CliHealthCard = memo(function CliHealthCard({ entry }: CliHealthCardProps) {
+ return (
+
+
+ {entry.status === "found" ? (
+
+ ) : (
+
+ )}
+
+
{entry.name}
+
{formatCliHealth(entry.status)}
+
+
+ {entry.detail}
+ {entry.path ? (
+
+ {entry.path}
+
+ ) : null}
+
+ );
+});
+
+function formatCliHealth(status: CliStatus["status"]) {
+ if (status === "found") {
+ return "Ready";
+ }
+
+ if (status === "unauthorized") {
+ return "Needs authentication";
+ }
+
+ return "Missing";
+}
diff --git a/src/components/MainWorkspace.tsx b/src/components/MainWorkspace.tsx
index 6d15c91..f432bb5 100644
--- a/src/components/MainWorkspace.tsx
+++ b/src/components/MainWorkspace.tsx
@@ -8,6 +8,7 @@ import { DocumentActionBar } from "./DocumentActionBar";
import { DocumentEmptyState } from "./DocumentEmptyState";
import { DocumentPane } from "./DocumentPane";
import { ExecutionPanel } from "./ExecutionPanel";
+import { PrdEmptyState } from "./PrdEmptyState";
import { SpecEmptyState } from "./SpecEmptyState";
import { WorkspaceTabBar } from "./WorkspaceTabBar";
import type {
@@ -28,11 +29,19 @@ interface MainWorkspaceProps {
prdPaneMode: PaneMode;
specPaneMode: PaneMode;
isSpecApproved: boolean;
+ canGeneratePrd: boolean;
+ isGeneratingPrd: boolean;
+ prdGenerationPrompt: string;
+ prdGenerationError: string;
+ prdGenerationHelperText: string;
canGenerateSpec: boolean;
isGeneratingSpec: boolean;
specGenerationPrompt: string;
specGenerationError: string;
specGenerationHelperText: string;
+ prdPromptTemplate: string;
+ specPromptTemplate: string;
+ configPath: string;
terminalOutput: string[];
executionSummary: string | null;
visibleDiff: string;
@@ -45,6 +54,8 @@ interface MainWorkspaceProps {
onLoadPrd: () => void;
onLoadSpec: () => void;
onApproveSpec: () => void;
+ onPrdGenerationPromptChange: (value: string) => void;
+ onGeneratePrd: () => void;
onSpecGenerationPromptChange: (value: string) => void;
onGenerateSpec: () => void;
onSpecSelect: (event: ChangeEvent) => void;
@@ -65,11 +76,19 @@ export const MainWorkspace = memo(function MainWorkspace({
prdPaneMode,
specPaneMode,
isSpecApproved,
+ canGeneratePrd,
+ isGeneratingPrd,
+ prdGenerationPrompt,
+ prdGenerationError,
+ prdGenerationHelperText,
canGenerateSpec,
isGeneratingSpec,
specGenerationPrompt,
specGenerationError,
specGenerationHelperText,
+ prdPromptTemplate,
+ specPromptTemplate,
+ configPath,
terminalOutput,
executionSummary,
visibleDiff,
@@ -82,6 +101,8 @@ export const MainWorkspace = memo(function MainWorkspace({
onLoadPrd,
onLoadSpec,
onApproveSpec,
+ onPrdGenerationPromptChange,
+ onGeneratePrd,
onSpecGenerationPromptChange,
onGenerateSpec,
onSpecSelect,
@@ -177,10 +198,16 @@ export const MainWorkspace = memo(function MainWorkspace({
/>
{showPrdEmptyState ? (
- }
+
) : (
@@ -224,12 +251,14 @@ export const MainWorkspace = memo(function MainWorkspace({
) : (
)}
diff --git a/src/components/PrdEmptyState.tsx b/src/components/PrdEmptyState.tsx
new file mode 100644
index 0000000..9cdaaea
--- /dev/null
+++ b/src/components/PrdEmptyState.tsx
@@ -0,0 +1,71 @@
+import { Page } from "iconoir-react";
+import { memo } from "react";
+
+import { DocumentEmptyState } from "./DocumentEmptyState";
+
+interface PrdEmptyStateProps {
+ prompt: string;
+ error: string;
+ helperText: string;
+ isGenerating: boolean;
+ canGenerate: boolean;
+ templatePrompt: string;
+ configPath: string;
+ onPromptChange: (value: string) => void;
+ onGenerate: () => void;
+}
+
+export const PrdEmptyState = memo(function PrdEmptyState({
+ prompt,
+ error,
+ helperText,
+ isGenerating,
+ canGenerate,
+ templatePrompt,
+ configPath,
+ onPromptChange,
+ onGenerate
+}: PrdEmptyStateProps) {
+ return (
+ }
+ >
+
- Environment and Theme Setup
+ Machine and Project Preferences
- Configure Claude CLI, Codex CLI, and the active theme in one place. The workspace
- view stays focused on review and execution.
+ CLI overrides and theme stay local to this machine. Prompt templates, AI defaults, and
+ document paths are saved inside the selected project at{" "}
+ {configPath || ".specforge/settings.json"}.
+ {projectStatusMessage ? (
+
+ {projectStatusMessage}
+
+ ) : null}
+ {projectErrorMessage ? (
+
+ {projectErrorMessage}
+
+ ) : null}
@@ -54,7 +108,7 @@ export const SettingsView = memo(function SettingsView({
Claude CLI
-
+
Binary path override
@@ -71,10 +125,10 @@ export const SettingsView = memo(function SettingsView({
-
+
Codex CLI
-
+
Binary path override
@@ -101,9 +155,7 @@ export const SettingsView = memo(function SettingsView({
{ id: "system", label: "System", meta: "Follow the OS appearance" }
].map((entry) => (
onThemeChange(entry.id as ThemeMode)}
type="button"
@@ -115,40 +167,43 @@ export const SettingsView = memo(function SettingsView({
-
-
-
-
- Review Flow Defaults
-
-
-
-
PRD and spec files are picked separately from the control deck.
-
Stepped and milestone modes pause execution at approval boundaries.
-
God Mode runs end to end unless a fatal error stops the agent loop.
-
The Dracula theme remains the workspace default and is managed here.
-
-
+
- Workspace Conventions
+ Workspace Notes
-
- When you open a workspace folder from the right sidebar, SpecForge scans for the first
- matching document set using this priority:
-
-
`PRD.md`, then `PRD.pdf`
-
`spec.md`, then `spec.pdf`
+
Project-specific AI settings live inside the selected workspace.
+
Manual PRD/spec edits still remain in-memory until a generate action writes a file.
+
CLI overrides and theme remain machine-local and do not touch `.specforge/settings.json`.
-
- Matching files are loaded directly into the review panes so the workspace is ready
- without a second import step.
-
{annotations.length > 0 ? (
@@ -178,46 +233,6 @@ export const SettingsView = memo(function SettingsView({
);
});
-const EnvironmentCard = memo(function EnvironmentCard({
- entry
-}: {
- entry: EnvironmentStatus["claude"];
-}) {
- return (
-
-
- {entry.status === "found" ? (
-
- ) : (
-
- )}
-
-
{entry.name}
-
{formatHealth(entry.status)}
-
-
- {entry.detail}
- {entry.path ? (
-
- {entry.path}
-
- ) : null}
-
- );
-});
-
-function formatHealth(status: EnvironmentStatus["claude"]["status"]) {
- if (status === "found") {
- return "Ready";
- }
-
- if (status === "unauthorized") {
- return "Needs authentication";
- }
-
- return "Missing";
-}
-
function getAnnotationClassName(tone: "info" | "warning" | "success") {
const toneClass =
tone === "info"
diff --git a/src/components/SpecEmptyState.tsx b/src/components/SpecEmptyState.tsx
index d3e09b2..160caf1 100644
--- a/src/components/SpecEmptyState.tsx
+++ b/src/components/SpecEmptyState.tsx
@@ -9,6 +9,8 @@ interface SpecEmptyStateProps {
helperText: string;
isGenerating: boolean;
canGenerate: boolean;
+ templatePrompt: string;
+ configPath: string;
onPromptChange: (value: string) => void;
onGenerate: () => void;
}
@@ -19,6 +21,8 @@ export const SpecEmptyState = memo(function SpecEmptyState({
helperText,
isGenerating,
canGenerate,
+ templatePrompt,
+ configPath,
onPromptChange,
onGenerate
}: SpecEmptyStateProps) {
@@ -28,27 +32,37 @@ export const SpecEmptyState = memo(function SpecEmptyState({
heading="No spec file detected"
icon={ }
>
- onPromptChange(event.target.value)}
- placeholder="Tell the AI about architecture, integrations, constraints, edge cases, or anything the PRD does not spell out."
- value={prompt}
- />
+ onPromptChange(event.target.value)}
+ placeholder="Describe the preferred tech stack, architecture boundaries, integrations, data constraints, and any non-functional requirements the PRD does not fully cover."
+ value={prompt}
+ />
-
-
{helperText}
-
-
- {isGenerating ? "Generating..." : "Generate Spec"}
-
-
+
+
+ The text above is appended after the saved default spec prompt from{" "}
+ {configPath || ".specforge/settings.json"}.
+
+
+ {templatePrompt}
+
+
- {error ? {error}
: null}
+
+
{helperText}
+
+
+ {isGenerating ? "Generating..." : "Generate Spec"}
+
+
+
+ {error ? {error}
: null}
);
});
diff --git a/src/components/StatusPill.tsx b/src/components/StatusPill.tsx
index ef89fe9..8b8426b 100644
--- a/src/components/StatusPill.tsx
+++ b/src/components/StatusPill.tsx
@@ -9,6 +9,7 @@ interface StatusPillProps {
const STATUS_CLASS_MAP: Record = {
idle: "text-[var(--text-subtle)]",
+ generating_prd: "text-[var(--accent-2)]",
generating_spec: "text-[var(--accent-2)]",
executing: "text-[var(--accent-2)]",
awaiting_approval: "text-[var(--warning)]",
diff --git a/src/lib/appShell.ts b/src/lib/appShell.ts
index c421934..8a41102 100644
--- a/src/lib/appShell.ts
+++ b/src/lib/appShell.ts
@@ -103,6 +103,14 @@ export function formatAgentStatus(status: AgentStatus) {
return "Awaiting approval";
}
+ if (status === "generating_prd") {
+ return "Generating PRD";
+ }
+
+ if (status === "generating_spec") {
+ return "Generating spec";
+ }
+
return `${status[0]?.toUpperCase()}${status.slice(1).replace("_", " ")}`;
}
diff --git a/src/lib/projectConfig.ts b/src/lib/projectConfig.ts
new file mode 100644
index 0000000..b87ec2f
--- /dev/null
+++ b/src/lib/projectConfig.ts
@@ -0,0 +1,114 @@
+import { DEFAULT_MODEL_ID, DEFAULT_REASONING_PROFILE, normalizeReasoningProfile } from "./agentConfig";
+import type { ModelId, ProjectSettings, ReasoningProfileId } from "../types";
+
+export const SPECFORGE_DIRECTORY_NAME = ".specforge";
+export const SPECFORGE_SETTINGS_FILE_NAME = "settings.json";
+export const SPECFORGE_SETTINGS_RELATIVE_PATH = `${SPECFORGE_DIRECTORY_NAME}/${SPECFORGE_SETTINGS_FILE_NAME}`;
+export const DEFAULT_PROJECT_PRD_PATH = "docs/PRD.md";
+export const DEFAULT_PROJECT_SPEC_PATH = "docs/SPEC.md";
+
+export const DEFAULT_PRD_PROMPT = `Act as an Expert Senior Product Manager. Your goal is to help me write a comprehensive, well-structured Product Requirements Document (PRD) for a new [product / feature / app] called [Project Name].
+
+I have some initial ideas, but I want to make sure the PRD is thorough. Before you draft the full document, please ask me a series of clarifying questions to gather the necessary context.
+
+Please ask about:
+- The core problem we are solving
+- The target audience/user personas
+- Key features and user flows
+- Success metrics (KPIs)
+- Technical or timeline constraints
+
+Ask me these questions one or two at a time so I do not get overwhelmed. Once you have enough context, we will move on to drafting the actual PRD.`;
+
+export const DEFAULT_SPEC_PROMPT = `Act as an Expert Software Architect and Tech Lead. I have attached the Product Requirements Document (PRD) for our upcoming project.
+
+Your task is to analyze this PRD and draft a comprehensive Technical Specification Document.
+
+Please structure the spec with the following sections:
+
+1. High-Level Architecture: A conceptual overview of how the system components will interact.
+2. Tech Stack & Tooling: Define the frontend, backend, and infrastructure.
+3. Data Models & Database Schema: Define the core entities, their attributes, and relationships.
+4. API Contracts: Outline the primary endpoints (methods, routes, request/response structures) needed to support the user flows.
+5. Component & State Management: How data will flow through the application and how the UI will be structured.
+6. Security & Edge Cases: Potential vulnerabilities, error handling, and performance bottlenecks.
+7. Engineering Milestones: Break the implementation down into logical, phased deliverables.
+
+Before writing the full document, please provide a brief bulleted summary of your proposed technical approach, and ask me up to 3 clarifying questions about any technical constraints or non-functional requirements that might be missing from the PRD.`;
+
+const VALID_REASONING_PROFILES = new Set(["low", "medium", "high", "max"]);
+const VALID_MODEL_IDS = new Set([
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.3-codex",
+ "gpt-5.2",
+ "claude-opus-4-1-20250805",
+ "claude-opus-4-20250514",
+ "claude-sonnet-4-20250514",
+ "claude-3-7-sonnet-20250219",
+ "claude-3-5-sonnet-20241022",
+ "claude-3-5-sonnet-20240620",
+ "claude-3-5-haiku-20241022",
+ "claude-3-haiku-20240307"
+]);
+
+export function buildDefaultProjectSettings(): ProjectSettings {
+ return {
+ selectedModel: DEFAULT_MODEL_ID,
+ selectedReasoning: DEFAULT_REASONING_PROFILE,
+ prdPrompt: DEFAULT_PRD_PROMPT,
+ specPrompt: DEFAULT_SPEC_PROMPT,
+ prdPath: DEFAULT_PROJECT_PRD_PATH,
+ specPath: DEFAULT_PROJECT_SPEC_PATH,
+ supportingDocumentPaths: []
+ };
+}
+
+export function normalizeProjectSettings(
+ value?: Partial | null
+): ProjectSettings {
+ const defaults = buildDefaultProjectSettings();
+ const selectedModel = isModelId(value?.selectedModel) ? value.selectedModel : defaults.selectedModel;
+ const selectedReasoning = normalizeReasoningProfile(
+ selectedModel,
+ isReasoningProfileId(value?.selectedReasoning)
+ ? value.selectedReasoning
+ : defaults.selectedReasoning
+ );
+
+ return {
+ selectedModel,
+ selectedReasoning,
+ prdPrompt: value?.prdPrompt?.trim() || defaults.prdPrompt,
+ specPrompt: value?.specPrompt?.trim() || defaults.specPrompt,
+ prdPath: normalizeProjectRelativePath(value?.prdPath) || defaults.prdPath,
+ specPath: normalizeProjectRelativePath(value?.specPath) || defaults.specPath,
+ supportingDocumentPaths: normalizeSupportingDocumentPaths(value?.supportingDocumentPaths)
+ };
+}
+
+export function normalizeProjectRelativePath(value?: string | null) {
+ return value?.trim().replace(/\\/g, "/").replace(/^\/+/, "") ?? "";
+}
+
+export function normalizeSupportingDocumentPaths(value?: string[] | null) {
+ return (value ?? [])
+ .map((entry) => normalizeProjectRelativePath(entry))
+ .filter((entry, index, entries) => entry.length > 0 && entries.indexOf(entry) === index);
+}
+
+export function formatSupportingDocumentPaths(paths: string[]) {
+ return paths.join("\n");
+}
+
+export function parseSupportingDocumentPaths(value: string) {
+ return normalizeSupportingDocumentPaths(value.split(/\r?\n/));
+}
+
+function isModelId(value?: string | null): value is ModelId {
+ return Boolean(value && VALID_MODEL_IDS.has(value as ModelId));
+}
+
+function isReasoningProfileId(value?: string | null): value is ReasoningProfileId {
+ return Boolean(value && VALID_REASONING_PROFILES.has(value as ReasoningProfileId));
+}
diff --git a/src/lib/runtime.ts b/src/lib/runtime.ts
index 37e6371..7a938ed 100644
--- a/src/lib/runtime.ts
+++ b/src/lib/runtime.ts
@@ -7,6 +7,8 @@ import type {
EnvironmentStatus,
ModelId,
ModelProvider,
+ ProjectContext,
+ ProjectSettings,
ReasoningProfileId,
WorkspaceDocument,
WorkspaceScanResult,
@@ -84,6 +86,36 @@ export async function pickDocument(): Promise {
return invoke("pick_document");
}
+export async function pickProjectFolder(): Promise {
+ if (!isTauriRuntime()) {
+ return null;
+ }
+
+ return invoke("pick_project_folder");
+}
+
+export async function loadProjectContext(folderPath: string): Promise {
+ if (!isTauriRuntime()) {
+ throw new Error("Project configuration requires the desktop runtime.");
+ }
+
+ return invoke("load_project_context", { folderPath });
+}
+
+export async function saveProjectSettings(payload: {
+ folderPath: string;
+ settings: ProjectSettings;
+}): Promise {
+ if (!isTauriRuntime()) {
+ throw new Error("Project configuration requires the desktop runtime.");
+ }
+
+ return invoke("save_project_settings", {
+ folderPath: payload.folderPath,
+ settings: payload.settings
+ });
+}
+
export async function openWorkspaceFolder(): Promise {
if (!isTauriRuntime()) {
return null;
@@ -134,9 +166,39 @@ export async function startAgentRun(
await invoke("spawn_cli_agent", { specPayload, mode, model, reasoning });
}
+export async function generatePrdDocument(payload: {
+ workspaceRoot: string;
+ outputPath: string;
+ promptTemplate: string;
+ userPrompt: string;
+ provider: ModelProvider;
+ model: ModelId;
+ reasoning: ReasoningProfileId;
+ claudePath?: string;
+ codexPath?: string;
+}): Promise {
+ if (!isTauriRuntime()) {
+ throw new Error("AI PRD generation requires the desktop runtime.");
+ }
+
+ return invoke("generate_prd_document", {
+ workspaceRoot: payload.workspaceRoot,
+ outputPath: payload.outputPath,
+ promptTemplate: payload.promptTemplate,
+ userPrompt: payload.userPrompt,
+ provider: payload.provider,
+ model: payload.model,
+ reasoning: payload.reasoning,
+ claudePath: emptyToNull(payload.claudePath),
+ codexPath: emptyToNull(payload.codexPath)
+ });
+}
+
export async function generateSpecDocument(payload: {
- prdPath: string;
+ workspaceRoot: string;
+ outputPath: string;
prdContent: string;
+ promptTemplate: string;
userPrompt: string;
provider: ModelProvider;
model: ModelId;
@@ -149,8 +211,10 @@ export async function generateSpecDocument(payload: {
}
return invoke("generate_spec_document", {
- prdPath: payload.prdPath,
+ workspaceRoot: payload.workspaceRoot,
+ outputPath: payload.outputPath,
prdContent: payload.prdContent,
+ promptTemplate: payload.promptTemplate,
userPrompt: payload.userPrompt,
provider: payload.provider,
model: payload.model,
diff --git a/src/screens/ConfigurationScreen.tsx b/src/screens/ConfigurationScreen.tsx
new file mode 100644
index 0000000..f2c9832
--- /dev/null
+++ b/src/screens/ConfigurationScreen.tsx
@@ -0,0 +1,276 @@
+import { Folder, Refresh, Terminal } from "iconoir-react";
+
+import { CliHealthCard } from "../components/CliHealthCard";
+import { ProjectAiSettingsCard } from "../components/ProjectAiSettingsCard";
+import { ProjectDocumentsCard } from "../components/ProjectDocumentsCard";
+import type { EnvironmentStatus, ModelId, ReasoningProfileId } from "../types";
+
+interface ConfigurationScreenProps {
+ desktopRuntime: boolean;
+ environment: EnvironmentStatus;
+ claudePath: string;
+ codexPath: string;
+ workspaceRootName: string;
+ workspaceRootPath: string;
+ settingsPath: string;
+ hasSavedSettings: boolean;
+ isProjectLoading: boolean;
+ isSaving: boolean;
+ statusMessage: string;
+ errorMessage: string;
+ selectedModel: ModelId;
+ selectedReasoning: ReasoningProfileId;
+ prdPrompt: string;
+ specPrompt: string;
+ prdPath: string;
+ specPath: string;
+ supportingDocumentsValue: string;
+ onPickFolder: () => void;
+ onRefresh: () => void;
+ onContinue: () => void;
+ onClaudePathChange: (value: string) => void;
+ onCodexPathChange: (value: string) => void;
+ onModelChange: (model: ModelId) => void;
+ onReasoningChange: (reasoning: ReasoningProfileId) => void;
+ onPrdPromptChange: (value: string) => void;
+ onSpecPromptChange: (value: string) => void;
+ onPrdPathChange: (value: string) => void;
+ onSpecPathChange: (value: string) => void;
+ onSupportingDocumentsChange: (value: string) => void;
+}
+
+export function ConfigurationScreen({
+ desktopRuntime,
+ environment,
+ claudePath,
+ codexPath,
+ workspaceRootName,
+ workspaceRootPath,
+ settingsPath,
+ hasSavedSettings,
+ isProjectLoading,
+ isSaving,
+ statusMessage,
+ errorMessage,
+ selectedModel,
+ selectedReasoning,
+ prdPrompt,
+ specPrompt,
+ prdPath,
+ specPath,
+ supportingDocumentsValue,
+ onPickFolder,
+ onRefresh,
+ onContinue,
+ onClaudePathChange,
+ onCodexPathChange,
+ onModelChange,
+ onReasoningChange,
+ onPrdPromptChange,
+ onSpecPromptChange,
+ onPrdPathChange,
+ onSpecPathChange,
+ onSupportingDocumentsChange
+}: ConfigurationScreenProps) {
+ const canContinue = desktopRuntime && workspaceRootPath.length > 0 && !isSaving;
+
+ return (
+
+
+
+
+
+
+ Project Setup
+
+
+ Configure SpecForge Before Review Starts
+
+
+ Choose the project folder, verify the available CLIs, set the default AI prompts
+ and model behavior, and point SpecForge at the PRD/spec files you want this
+ workspace to use.
+
+
+
+
+
+ Refresh
+
+
+
+ {statusMessage ? (
+ {statusMessage}
+ ) : null}
+ {errorMessage ? (
+ {errorMessage}
+ ) : null}
+
+
+
+
+
+
+
+
+ {isProjectLoading ? "Opening..." : "Select Folder"}
+
+
+
+
+
Workspace: {workspaceRootName || "No folder selected yet"}
+
Path: {workspaceRootPath || "Pick a folder to begin"}
+
Settings file: {settingsPath || ".specforge/settings.json"}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Claude CLI override
+ onClaudePathChange(event.target.value)}
+ placeholder="Optional manual path"
+ value={claudePath}
+ />
+
+
+
+ Codex CLI override
+ onCodexPathChange(event.target.value)}
+ placeholder="Optional manual path"
+ value={codexPath}
+ />
+
+
+
+
+
+
+
+
+
+
+
+
+ Ready To Continue
+
+
+
+ {desktopRuntime
+ ? "Save the current project settings into `.specforge/settings.json` and continue into the review workspace."
+ : "The desktop runtime is required to create `.specforge/settings.json` inside the selected project folder."}
+
+
+
+
+ {isSaving
+ ? "Saving..."
+ : hasSavedSettings
+ ? "Save Changes and Continue"
+ : "Create .specforge and Continue"}
+
+
+
+
+
+ );
+}
+
+function StepHeading({
+ number,
+ title,
+ description
+}: {
+ number: string;
+ title: string;
+ description: string;
+}) {
+ return (
+
+
+ {number}
+
+
+
{title}
+
{description}
+
+
+ );
+}
+
+const PANEL_CLASS =
+ "grid gap-4 rounded-[1.5rem] border border-[var(--border-strong)] bg-[var(--bg-panel)] p-5 shadow-[var(--shadow)] backdrop-blur-[30px]";
+
+const FIELD_LABEL_CLASS =
+ "text-sm font-medium leading-6 text-[var(--text-subtle)]";
+
+const INPUT_CLASS =
+ "w-full rounded-[1rem] border border-[var(--border-soft)] bg-black/20 px-4 py-3 text-[15px] text-[var(--text-main)] outline-none transition focus:border-[var(--accent)]";
+
+const SECONDARY_BUTTON_CLASS =
+ "inline-flex items-center justify-center gap-2 rounded-[1rem] border border-[var(--border-soft)] bg-white/5 px-4 py-3 font-medium text-[var(--text-main)] transition hover:-translate-y-0.5 hover:bg-white/8";
+
+const PRIMARY_BUTTON_CLASS =
+ "inline-flex items-center justify-center gap-2 rounded-[1rem] border-0 bg-[linear-gradient(135deg,var(--accent),#ff79c6)] px-4 py-3 font-semibold text-[#15131c] transition hover:-translate-y-0.5 hover:opacity-95";
diff --git a/src/store/useProjectStore.ts b/src/store/useProjectStore.ts
index 5640933..1383143 100644
--- a/src/store/useProjectStore.ts
+++ b/src/store/useProjectStore.ts
@@ -7,11 +7,13 @@ import {
getReasoningLabel,
normalizeReasoningProfile
} from "../lib/agentConfig";
+import { buildDefaultProjectSettings, normalizeProjectSettings } from "../lib/projectConfig";
import type {
AutonomyMode,
EditorTab,
ModelId,
PaneMode,
+ ProjectSettings,
ReasoningProfileId,
SelectionRange,
SpecAnnotation,
@@ -23,6 +25,11 @@ interface ProjectState {
specContent: string;
prdPath: string;
specPath: string;
+ configuredPrdPath: string;
+ configuredSpecPath: string;
+ supportingDocumentPaths: string[];
+ prdPromptTemplate: string;
+ specPromptTemplate: string;
selectedModel: ModelId;
selectedReasoning: ReasoningProfileId;
autonomyMode: AutonomyMode;
@@ -34,6 +41,12 @@ interface ProjectState {
annotations: SpecAnnotation[];
isSpecApproved: boolean;
openEditorTabs: EditorTab[];
+ setProjectSettings: (settings: Partial) => void;
+ setConfiguredPrdPath: (path: string) => void;
+ setConfiguredSpecPath: (path: string) => void;
+ setSupportingDocumentPaths: (paths: string[]) => void;
+ setPrdPromptTemplate: (prompt: string) => void;
+ setSpecPromptTemplate: (prompt: string) => void;
setPrdContent: (content: string, path?: string) => void;
setSpecContent: (content: string, path?: string) => void;
setSelectedModel: (model: ModelId) => void;
@@ -110,12 +123,9 @@ function createEditorTabId(path: string) {
}
export const useProjectStore = create((set, get) => ({
+ ...buildInitialProjectState(),
prdContent: "",
specContent: "",
- prdPath: "docs/PRD.md",
- specPath: "docs/SPEC.md",
- selectedModel: DEFAULT_MODEL_ID,
- selectedReasoning: DEFAULT_REASONING_PROFILE,
autonomyMode: "milestone",
activeTab: "review",
prdPaneMode: "preview",
@@ -125,6 +135,34 @@ export const useProjectStore = create((set, get) => ({
annotations: buildInitialAnnotations(),
isSpecApproved: false,
openEditorTabs: [],
+ setProjectSettings: (settings) =>
+ set((state) => {
+ const nextSettings = normalizeProjectSettings({
+ selectedModel: state.selectedModel,
+ selectedReasoning: state.selectedReasoning,
+ prdPrompt: state.prdPromptTemplate,
+ specPrompt: state.specPromptTemplate,
+ prdPath: state.configuredPrdPath,
+ specPath: state.configuredSpecPath,
+ supportingDocumentPaths: state.supportingDocumentPaths,
+ ...settings
+ });
+
+ return {
+ configuredPrdPath: nextSettings.prdPath,
+ configuredSpecPath: nextSettings.specPath,
+ prdPromptTemplate: nextSettings.prdPrompt,
+ specPromptTemplate: nextSettings.specPrompt,
+ selectedModel: nextSettings.selectedModel,
+ selectedReasoning: nextSettings.selectedReasoning,
+ supportingDocumentPaths: nextSettings.supportingDocumentPaths
+ };
+ }),
+ setConfiguredPrdPath: (configuredPrdPath) => set({ configuredPrdPath }),
+ setConfiguredSpecPath: (configuredSpecPath) => set({ configuredSpecPath }),
+ setSupportingDocumentPaths: (supportingDocumentPaths) => set({ supportingDocumentPaths }),
+ setPrdPromptTemplate: (prdPromptTemplate) => set({ prdPromptTemplate }),
+ setSpecPromptTemplate: (specPromptTemplate) => set({ specPromptTemplate }),
setPrdContent: (prdContent, path) =>
set({
prdContent,
@@ -236,3 +274,19 @@ export const useProjectStore = create((set, get) => ({
});
}
}));
+
+function buildInitialProjectState() {
+ const defaults = normalizeProjectSettings();
+
+ return {
+ configuredPrdPath: defaults.prdPath,
+ configuredSpecPath: defaults.specPath,
+ prdPath: defaults.prdPath,
+ specPath: defaults.specPath,
+ prdPromptTemplate: defaults.prdPrompt,
+ specPromptTemplate: defaults.specPrompt,
+ selectedModel: DEFAULT_MODEL_ID,
+ selectedReasoning: DEFAULT_REASONING_PROFILE,
+ supportingDocumentPaths: defaults.supportingDocumentPaths
+ };
+}
diff --git a/src/store/useSettingsStore.ts b/src/store/useSettingsStore.ts
index 681a756..63d1e54 100644
--- a/src/store/useSettingsStore.ts
+++ b/src/store/useSettingsStore.ts
@@ -6,11 +6,13 @@ interface SettingsState {
theme: ThemeMode;
claudePath: string;
codexPath: string;
+ lastProjectPath: string;
environment: EnvironmentStatus;
workspaceEntries: WorkspaceEntry[];
setTheme: (theme: ThemeMode) => void;
setClaudePath: (path: string) => void;
setCodexPath: (path: string) => void;
+ setLastProjectPath: (path: string) => void;
setEnvironment: (environment: EnvironmentStatus) => void;
setWorkspaceEntries: (entries: WorkspaceEntry[]) => void;
}
@@ -19,6 +21,7 @@ interface PersistedSettings {
theme: ThemeMode;
claudePath: string;
codexPath: string;
+ lastProjectPath: string;
}
const SETTINGS_STORAGE_KEY = "specforge.settings";
@@ -52,7 +55,8 @@ function readPersistedSettings(): PersistedSettings {
return {
theme: "dracula",
claudePath: "",
- codexPath: ""
+ codexPath: "",
+ lastProjectPath: ""
};
}
@@ -63,7 +67,8 @@ function readPersistedSettings(): PersistedSettings {
return {
theme: "dracula",
claudePath: "",
- codexPath: ""
+ codexPath: "",
+ lastProjectPath: ""
};
}
@@ -72,13 +77,15 @@ function readPersistedSettings(): PersistedSettings {
return {
theme: parsedValue.theme ?? "dracula",
claudePath: parsedValue.claudePath ?? "",
- codexPath: parsedValue.codexPath ?? ""
+ codexPath: parsedValue.codexPath ?? "",
+ lastProjectPath: parsedValue.lastProjectPath ?? ""
};
} catch {
return {
theme: "dracula",
claudePath: "",
- codexPath: ""
+ codexPath: "",
+ lastProjectPath: ""
};
}
}
@@ -97,6 +104,7 @@ export const useSettingsStore = create((set, get) => ({
theme: persistedSettings.theme,
claudePath: persistedSettings.claudePath,
codexPath: persistedSettings.codexPath,
+ lastProjectPath: persistedSettings.lastProjectPath,
environment: createEnvironmentPlaceholder(),
workspaceEntries: [],
setTheme: (theme) => {
@@ -104,7 +112,8 @@ export const useSettingsStore = create((set, get) => ({
persistSettings({
theme,
claudePath: get().claudePath,
- codexPath: get().codexPath
+ codexPath: get().codexPath,
+ lastProjectPath: get().lastProjectPath
});
},
setClaudePath: (claudePath) => {
@@ -112,7 +121,8 @@ export const useSettingsStore = create((set, get) => ({
persistSettings({
theme: get().theme,
claudePath,
- codexPath: get().codexPath
+ codexPath: get().codexPath,
+ lastProjectPath: get().lastProjectPath
});
},
setCodexPath: (codexPath) => {
@@ -120,7 +130,17 @@ export const useSettingsStore = create((set, get) => ({
persistSettings({
theme: get().theme,
claudePath: get().claudePath,
- codexPath
+ codexPath,
+ lastProjectPath: get().lastProjectPath
+ });
+ },
+ setLastProjectPath: (lastProjectPath) => {
+ set({ lastProjectPath });
+ persistSettings({
+ theme: get().theme,
+ claudePath: get().claudePath,
+ codexPath: get().codexPath,
+ lastProjectPath
});
},
setEnvironment: (environment) => set({ environment }),
diff --git a/src/types.ts b/src/types.ts
index 394c651..fc154d2 100644
--- a/src/types.ts
+++ b/src/types.ts
@@ -20,6 +20,7 @@ export type WorkspaceTab = WorkspaceBaseTab | `file:${string}`;
export type PaneMode = "preview" | "edit";
export type AgentStatus =
| "idle"
+ | "generating_prd"
| "generating_spec"
| "executing"
| "awaiting_approval"
@@ -69,6 +70,28 @@ export interface WorkspaceDocument {
fileName: string;
}
+export interface ProjectSettings {
+ selectedModel: ModelId;
+ selectedReasoning: ReasoningProfileId;
+ prdPrompt: string;
+ specPrompt: string;
+ prdPath: string;
+ specPath: string;
+ supportingDocumentPaths: string[];
+}
+
+export interface ProjectContext {
+ rootName: string;
+ rootPath: string;
+ settingsPath: string;
+ hasSavedSettings: boolean;
+ settings: ProjectSettings;
+ entries: WorkspaceEntry[];
+ ignoredFileCount: number;
+ prdDocument: WorkspaceDocument | null;
+ specDocument: WorkspaceDocument | null;
+}
+
export interface WorkspaceScanResult {
rootName: string;
entries: WorkspaceEntry[];
From dfb83690635c18fb790a0ce98665aca444c660c4 Mon Sep 17 00:00:00 2001
From: matheusBBarni
Date: Sat, 11 Apr 2026 11:04:50 -0300
Subject: [PATCH 02/32] Refactor Tauri lib into focused Rust modules
---
src-tauri/src/agent.rs | 339 ++++++
src-tauri/src/constants.rs | 40 +
src-tauri/src/documents.rs | 209 ++++
src-tauri/src/environment.rs | 121 +++
src-tauri/src/generation.rs | 317 ++++++
src-tauri/src/git.rs | 53 +
src-tauri/src/lib.rs | 1872 +---------------------------------
src-tauri/src/models.rs | 103 ++
src-tauri/src/paths.rs | 93 ++
src-tauri/src/project.rs | 284 ++++++
src-tauri/src/state.rs | 35 +
src-tauri/src/workspace.rs | 334 ++++++
src/App.tsx | 4 +-
13 files changed, 1951 insertions(+), 1853 deletions(-)
create mode 100644 src-tauri/src/agent.rs
create mode 100644 src-tauri/src/constants.rs
create mode 100644 src-tauri/src/documents.rs
create mode 100644 src-tauri/src/environment.rs
create mode 100644 src-tauri/src/generation.rs
create mode 100644 src-tauri/src/git.rs
create mode 100644 src-tauri/src/models.rs
create mode 100644 src-tauri/src/paths.rs
create mode 100644 src-tauri/src/project.rs
create mode 100644 src-tauri/src/state.rs
create mode 100644 src-tauri/src/workspace.rs
diff --git a/src-tauri/src/agent.rs b/src-tauri/src/agent.rs
new file mode 100644
index 0000000..d5ac04f
--- /dev/null
+++ b/src-tauri/src/agent.rs
@@ -0,0 +1,339 @@
+use crate::constants::SAMPLE_DIFF;
+use crate::models::{AgentStateEvent, ApprovalWaitOutcome, SimulatedStep, StopState};
+use crate::state::{ExecutionRuntime, SharedState};
+use std::sync::Arc;
+use std::thread;
+use std::time::Duration;
+use tauri::{AppHandle, Emitter, State};
+
+#[tauri::command]
+pub(crate) fn spawn_cli_agent(
+ app: AppHandle,
+ state: State,
+ spec_payload: String,
+ mode: String,
+ model: String,
+ reasoning: String,
+) -> Result<(), String> {
+ let runtime = state.runtime.clone();
+ let run_id = {
+ let mut control = runtime
+ .control
+ .lock()
+ .map_err(|_| String::from("Execution lock was poisoned."))?;
+ control.run_id = control.run_id.wrapping_add(1);
+ control.awaiting_approval = false;
+ control.stop_requested = false;
+ control.run_id
+ };
+
+ thread::spawn(move || {
+ run_simulated_agent(app, runtime, run_id, spec_payload, mode, model, reasoning);
+ });
+
+ Ok(())
+}
+
+#[tauri::command]
+pub(crate) fn approve_action(state: State) -> Result<(), String> {
+ let mut control = state
+ .runtime
+ .control
+ .lock()
+ .map_err(|_| String::from("Execution lock was poisoned."))?;
+ control.awaiting_approval = false;
+ state.runtime.signal.notify_all();
+ Ok(())
+}
+
+#[tauri::command]
+pub(crate) fn kill_agent_process(state: State) -> Result<(), String> {
+ let mut control = state
+ .runtime
+ .control
+ .lock()
+ .map_err(|_| String::from("Execution lock was poisoned."))?;
+ control.stop_requested = true;
+ control.awaiting_approval = false;
+ state.runtime.signal.notify_all();
+ Ok(())
+}
+
+pub(crate) fn run_simulated_agent(
+ app: AppHandle,
+ runtime: Arc,
+ run_id: u64,
+ spec_payload: String,
+ mode: String,
+ model: String,
+ reasoning: String,
+) {
+ let heading_count = spec_payload
+ .lines()
+ .filter(|line| line.trim_start().starts_with('#'))
+ .count();
+ let steps = build_simulated_steps(heading_count, &mode, &model, &reasoning);
+ emit_state(&app, "executing", Some("Pre-flight Check"), None, None);
+
+ for step in steps {
+ match stop_state(&runtime, run_id) {
+ StopState::Continue => {}
+ StopState::StopRequested => {
+ emit_line(
+ &app,
+ "Execution interrupted before the next step could run.",
+ );
+ emit_state(
+ &app,
+ "halted",
+ Some(step.milestone),
+ None,
+ Some("Execution interrupted by the operator."),
+ );
+ return;
+ }
+ StopState::Replaced => return,
+ }
+
+ thread::sleep(Duration::from_millis(step.delay_ms));
+ match stop_state(&runtime, run_id) {
+ StopState::Continue => {}
+ StopState::StopRequested => {
+ emit_line(
+ &app,
+ "Execution interrupted before the next step could run.",
+ );
+ emit_state(
+ &app,
+ "halted",
+ Some(step.milestone),
+ None,
+ Some("Execution interrupted by the operator."),
+ );
+ return;
+ }
+ StopState::Replaced => return,
+ }
+ emit_state(&app, "executing", Some(step.milestone), None, None);
+ emit_line(&app, &step.line);
+
+ if step.gate {
+ let summary = if mode == "stepped" {
+ "Stepped approval required before the next write action."
+ } else {
+ "Milestone boundary reached. Review the diff before execution resumes."
+ };
+
+ match wait_for_approval(&app, &runtime, run_id, step.milestone, summary) {
+ Ok(ApprovalWaitOutcome::Approved) => {}
+ Ok(ApprovalWaitOutcome::StopRequested) => {
+ emit_line(&app, "Execution interrupted during approval gate.");
+ emit_state(
+ &app,
+ "halted",
+ Some(step.milestone),
+ None,
+ Some("Execution interrupted by the operator."),
+ );
+ return;
+ }
+ Ok(ApprovalWaitOutcome::Replaced) => return,
+ Err(message) => {
+ emit_line(&app, &message);
+ emit_state(
+ &app,
+ "error",
+ Some(step.milestone),
+ None,
+ Some("Approval synchronization failed."),
+ );
+ return;
+ }
+ }
+
+ emit_line(&app, "Approval received. Resuming the agent loop.");
+ }
+ }
+
+ if !matches!(stop_state(&runtime, run_id), StopState::Continue) {
+ return;
+ }
+
+ emit_line(
+ &app,
+ "Execution complete. Final diff is ready for inspection.",
+ );
+ emit_state(
+ &app,
+ "completed",
+ Some("Execution Complete"),
+ Some(SAMPLE_DIFF),
+ Some("Simulated agent execution completed successfully."),
+ );
+}
+
+pub(crate) fn build_simulated_steps(
+ heading_count: usize,
+ mode: &str,
+ model: &str,
+ reasoning: &str,
+) -> Vec {
+ let mut steps = vec![
+ SimulatedStep {
+ delay_ms: 450,
+ line: format!(
+ "Loaded approved specification with {heading_count} markdown headings into {model} using the {reasoning} reasoning profile."
+ ),
+ milestone: "Pre-flight Check",
+ gate: false,
+ },
+ SimulatedStep {
+ delay_ms: 650,
+ line: String::from(
+ "Scanning CLI availability and staging the current repository diff.",
+ ),
+ milestone: "Pre-flight Check",
+ gate: false,
+ },
+ SimulatedStep {
+ delay_ms: 750,
+ line: String::from(
+ "Mapping milestones for review UI, Zustand stores, and Tauri commands.",
+ ),
+ milestone: "Milestone Planning",
+ gate: false,
+ },
+ ];
+
+ if mode == "stepped" {
+ steps.push(SimulatedStep {
+ delay_ms: 650,
+ line: String::from(
+ "A write action is ready to execute against the approved specification.",
+ ),
+ milestone: "Stepped Approval",
+ gate: true,
+ });
+ }
+
+ steps.extend([
+ SimulatedStep {
+ delay_ms: 700,
+ line: String::from(
+ "Applying Dracula theme tokens and composing the review workspace shell.",
+ ),
+ milestone: "Compose Review Workspace",
+ gate: false,
+ },
+ SimulatedStep {
+ delay_ms: 650,
+ line: String::from(
+ "Wiring project, settings, and agent stores into the execution dashboard.",
+ ),
+ milestone: "Compose Review Workspace",
+ gate: false,
+ },
+ ]);
+
+ if mode == "milestone" {
+ steps.push(SimulatedStep {
+ delay_ms: 650,
+ line: String::from("The first milestone is complete and ready for diff review."),
+ milestone: "Milestone Approval",
+ gate: true,
+ });
+ }
+
+ steps.extend([
+ SimulatedStep {
+ delay_ms: 650,
+ line: String::from("Streaming terminal telemetry and enabling approval controls."),
+ milestone: "Execution Dashboard",
+ gate: false,
+ },
+ SimulatedStep {
+ delay_ms: 550,
+ line: String::from("Packaging a final summary for IDE handoff."),
+ milestone: "Execution Dashboard",
+ gate: false,
+ },
+ ]);
+
+ steps
+}
+
+pub(crate) fn wait_for_approval(
+ app: &AppHandle,
+ runtime: &Arc,
+ run_id: u64,
+ milestone: &str,
+ summary: &str,
+) -> Result {
+ emit_state(
+ app,
+ "awaiting_approval",
+ Some(milestone),
+ Some(SAMPLE_DIFF),
+ Some(summary),
+ );
+
+ let mut control = runtime
+ .control
+ .lock()
+ .map_err(|_| String::from("Execution lock was poisoned."))?;
+ control.awaiting_approval = true;
+ runtime.signal.notify_all();
+
+ while control.run_id == run_id && control.awaiting_approval && !control.stop_requested {
+ control = runtime
+ .signal
+ .wait(control)
+ .map_err(|_| String::from("Execution lock was poisoned."))?;
+ }
+
+ if control.stop_requested {
+ return Ok(ApprovalWaitOutcome::StopRequested);
+ }
+
+ if control.run_id != run_id {
+ return Ok(ApprovalWaitOutcome::Replaced);
+ }
+
+ Ok(ApprovalWaitOutcome::Approved)
+}
+
+pub(crate) fn stop_state(runtime: &Arc, run_id: u64) -> StopState {
+ runtime
+ .control
+ .lock()
+ .map(|control| {
+ if control.stop_requested {
+ StopState::StopRequested
+ } else if control.run_id != run_id {
+ StopState::Replaced
+ } else {
+ StopState::Continue
+ }
+ })
+ .unwrap_or(StopState::StopRequested)
+}
+
+pub(crate) fn emit_line(app: &AppHandle, line: &str) {
+ let _ = app.emit("cli-output", line.to_string());
+}
+
+pub(crate) fn emit_state(
+ app: &AppHandle,
+ status: &str,
+ current_milestone: Option<&str>,
+ pending_diff: Option<&str>,
+ summary: Option<&str>,
+) {
+ let payload = AgentStateEvent {
+ status: status.to_string(),
+ current_milestone: current_milestone.map(|value| value.to_string()),
+ pending_diff: pending_diff.map(|value| value.to_string()),
+ summary: summary.map(|value| value.to_string()),
+ };
+ let _ = app.emit("agent-state", payload);
+}
diff --git a/src-tauri/src/constants.rs b/src-tauri/src/constants.rs
new file mode 100644
index 0000000..04ca111
--- /dev/null
+++ b/src-tauri/src/constants.rs
@@ -0,0 +1,40 @@
+pub(crate) const SAMPLE_DIFF: &str = r#"diff --git a/src/App.tsx b/src/App.tsx
+index 0000000..forge42 100644
+--- a/src/App.tsx
++++ b/src/App.tsx
+@@
+- Render placeholder starter card
++ Introduce PRD/spec review workspace with execution controls
++ Add Dracula-first theme tokens and persisted preferences
++ Surface CLI health, diff approvals, and terminal streaming"#;
+
+pub(crate) const SPECFORGE_SETTINGS_RELATIVE_PATH: &str = ".specforge/settings.json";
+pub(crate) const DEFAULT_PROJECT_PRD_PATH: &str = "docs/PRD.md";
+pub(crate) const DEFAULT_PROJECT_SPEC_PATH: &str = "docs/SPEC.md";
+pub(crate) const DEFAULT_PRD_PROMPT: &str = r#"Act as an Expert Senior Product Manager. Your goal is to help me write a comprehensive, well-structured Product Requirements Document (PRD) for a new [product / feature / app] called [Project Name].
+
+I have some initial ideas, but I want to make sure the PRD is thorough. Before you draft the full document, please ask me a series of clarifying questions to gather the necessary context.
+
+Please ask about:
+- The core problem we are solving
+- The target audience/user personas
+- Key features and user flows
+- Success metrics (KPIs)
+- Technical or timeline constraints
+
+Ask me these questions one or two at a time so I do not get overwhelmed. Once you have enough context, we will move on to drafting the actual PRD."#;
+pub(crate) const DEFAULT_SPEC_PROMPT: &str = r#"Act as an Expert Software Architect and Tech Lead. I have attached the Product Requirements Document (PRD) for our upcoming project.
+
+Your task is to analyze this PRD and draft a comprehensive Technical Specification Document.
+
+Please structure the spec with the following sections:
+
+1. High-Level Architecture: A conceptual overview of how the system components will interact.
+2. Tech Stack & Tooling: Define the frontend, backend, and infrastructure.
+3. Data Models & Database Schema: Define the core entities, their attributes, and relationships.
+4. API Contracts: Outline the primary endpoints (methods, routes, request/response structures) needed to support the user flows.
+5. Component & State Management: How data will flow through the application and how the UI will be structured.
+6. Security & Edge Cases: Potential vulnerabilities, error handling, and performance bottlenecks.
+7. Engineering Milestones: Break the implementation down into logical, phased deliverables.
+
+Before writing the full document, please provide a brief bulleted summary of your proposed technical approach, and ask me up to 3 clarifying questions about any technical constraints or non-functional requirements that might be missing from the PRD."#;
diff --git a/src-tauri/src/documents.rs b/src-tauri/src/documents.rs
new file mode 100644
index 0000000..0305108
--- /dev/null
+++ b/src-tauri/src/documents.rs
@@ -0,0 +1,209 @@
+use crate::{
+ models::WorkspaceDocument,
+ paths::{
+ canonicalize_existing_path, resolve_project_document_path, resolve_relative_path_under_root,
+ },
+};
+use lopdf::Document;
+use std::{
+ fs,
+ path::{Path, PathBuf},
+};
+
+#[tauri::command]
+pub(crate) fn parse_document(file_path: String) -> Result {
+ let resolved_path = resolve_project_document_path(&file_path)?;
+ parse_supported_document(&resolved_path)
+}
+
+#[tauri::command]
+pub(crate) fn pick_document() -> Result, String> {
+ let Some(file_path) = rfd::FileDialog::new()
+ .add_filter("Documents", &["md", "pdf"])
+ .pick_file()
+ else {
+ return Ok(None);
+ };
+
+ let resolved_path = canonicalize_existing_path(&file_path)
+ .map_err(|error| format!("Unable to prepare selected document: {error}"))?;
+ let content = parse_supported_document(&resolved_path)?;
+ let file_name = resolved_path
+ .file_name()
+ .and_then(|value| value.to_str())
+ .unwrap_or("Document")
+ .to_string();
+
+ Ok(Some(WorkspaceDocument {
+ content,
+ source_path: resolved_path.display().to_string(),
+ file_name,
+ }))
+}
+
+pub(crate) fn load_configured_workspace_document(
+ workspace_root: &Path,
+ relative_path: &str,
+) -> Result , String> {
+ let resolved_path = resolve_relative_path_under_root(workspace_root, relative_path)?;
+
+ if !resolved_path.exists() {
+ return Ok(None);
+ }
+
+ let content = parse_workspace_document(&resolved_path)?;
+ let file_name = resolved_path
+ .file_name()
+ .and_then(|value| value.to_str())
+ .unwrap_or("Document")
+ .to_string();
+
+ Ok(Some(WorkspaceDocument {
+ content,
+ source_path: resolved_path.display().to_string(),
+ file_name,
+ }))
+}
+
+pub(crate) fn write_generated_workspace_document(
+ workspace_root: &str,
+ output_path: &str,
+ generated_content: String,
+ field_name: &str,
+) -> Result {
+ let trimmed_root = workspace_root.trim();
+
+ if trimmed_root.is_empty() {
+ return Err(String::from("A workspace root is required."));
+ }
+
+ let canonical_root = canonicalize_existing_path(&PathBuf::from(trimmed_root))
+ .map_err(|error| format!("Unable to resolve workspace root {}: {error}", trimmed_root))?;
+ let resolved_output_path = resolve_relative_path_under_root(&canonical_root, output_path)
+ .map_err(|error| format!("{field_name} is invalid: {error}"))?;
+ let rendered_document = format!(
+ "{}\n",
+ strip_wrapping_code_fence(generated_content.trim()).trim()
+ );
+
+ if rendered_document.trim().is_empty() {
+ return Err(String::from(
+ "The AI returned an empty document. Adjust the prompt and try again.",
+ ));
+ }
+
+ if resolved_output_path
+ .extension()
+ .and_then(|value| value.to_str())
+ .map(|value| !value.eq_ignore_ascii_case("md"))
+ .unwrap_or(true)
+ {
+ return Err(format!(
+ "{field_name} must point to a Markdown file inside the selected workspace."
+ ));
+ }
+
+ if let Some(parent_directory) = resolved_output_path.parent() {
+ fs::create_dir_all(parent_directory).map_err(|error| {
+ format!(
+ "Unable to create the document folder {}: {error}",
+ parent_directory.display()
+ )
+ })?;
+ }
+
+ fs::write(&resolved_output_path, rendered_document.as_bytes()).map_err(|error| {
+ format!(
+ "Unable to save the generated document to {}: {error}",
+ resolved_output_path.display()
+ )
+ })?;
+
+ Ok(WorkspaceDocument {
+ content: rendered_document,
+ source_path: resolved_output_path.display().to_string(),
+ file_name: resolved_output_path
+ .file_name()
+ .and_then(|value| value.to_str())
+ .unwrap_or("Document.md")
+ .to_string(),
+ })
+}
+
+pub(crate) fn parse_workspace_document(path: &Path) -> Result {
+ match path
+ .extension()
+ .and_then(|extension| extension.to_str())
+ .map(|extension| extension.to_ascii_lowercase())
+ .as_deref()
+ {
+ Some("pdf") => read_pdf_text(path),
+ _ => fs::read_to_string(path).map_err(|error| {
+ format!(
+ "Unable to read workspace document {}: {error}",
+ path.display()
+ )
+ }),
+ }
+}
+
+pub(crate) fn parse_supported_document(path: &Path) -> Result {
+ match path
+ .extension()
+ .and_then(|extension| extension.to_str())
+ .map(|extension| extension.to_ascii_lowercase())
+ .as_deref()
+ {
+ Some("md") => fs::read_to_string(path).map_err(|error| {
+ format!(
+ "Unable to read markdown document {}: {error}",
+ path.display()
+ )
+ }),
+ Some("pdf") => read_pdf_text(path),
+ _ => Err(String::from("Only .md and .pdf documents are supported.")),
+ }
+}
+
+fn read_pdf_text(path: &Path) -> Result {
+ let document = Document::load(path)
+ .map_err(|error| format!("Unable to open PDF document {}: {error}", path.display()))?;
+ let mut page_numbers = document.get_pages().keys().copied().collect::>();
+ page_numbers.sort_unstable();
+
+ document.extract_text(&page_numbers).map_err(|error| {
+ format!(
+ "Unable to extract PDF text from {}: {error}",
+ path.display()
+ )
+ })
+}
+
+fn strip_wrapping_code_fence(content: &str) -> String {
+ let trimmed = content.trim();
+
+ if !trimmed.starts_with("```") {
+ return trimmed.to_string();
+ }
+
+ let mut lines = trimmed.lines();
+ let Some(first_line) = lines.next() else {
+ return String::new();
+ };
+
+ if !first_line.trim_start().starts_with("```") {
+ return trimmed.to_string();
+ }
+
+ let remaining_lines = lines.collect::>();
+
+ if remaining_lines
+ .last()
+ .map(|line| !line.trim_start().starts_with("```"))
+ .unwrap_or(true)
+ {
+ return trimmed.to_string();
+ }
+
+ remaining_lines[..remaining_lines.len().saturating_sub(1)].join("\n")
+}
diff --git a/src-tauri/src/environment.rs b/src-tauri/src/environment.rs
new file mode 100644
index 0000000..ed7401d
--- /dev/null
+++ b/src-tauri/src/environment.rs
@@ -0,0 +1,121 @@
+use crate::models::{CliStatus, EnvironmentStatus};
+use crate::paths::resolve_override_path;
+use std::path::Path;
+use std::process::Command;
+use std::time::{SystemTime, UNIX_EPOCH};
+
+#[tauri::command]
+pub(crate) fn run_environment_scan(
+ claude_path: Option,
+ codex_path: Option,
+) -> Result {
+ Ok(EnvironmentStatus {
+ scanned_at: current_timestamp(),
+ claude: inspect_binary("Claude CLI", "claude", claude_path.as_deref()),
+ codex: inspect_binary("Codex CLI", "codex", codex_path.as_deref()),
+ git: inspect_binary("Git", "git", None),
+ })
+}
+
+pub(crate) fn current_timestamp() -> String {
+ SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .map(|duration| duration.as_secs().to_string())
+ .unwrap_or_else(|_| String::from("0"))
+}
+
+pub(crate) fn inspect_binary(
+ display_name: &str,
+ binary_name: &str,
+ override_path: Option<&str>,
+) -> CliStatus {
+ let resolved_path = override_path
+ .and_then(|value| {
+ let candidate = resolve_override_path(value);
+ candidate.exists().then_some(candidate)
+ })
+ .or_else(|| which::which(binary_name).ok());
+
+ if let Some(path) = resolved_path {
+ match probe_binary_version(&path) {
+ Ok(version_detail) => {
+ let detail = if override_path.is_some() {
+ format!("Using manual override. {version_detail}")
+ } else {
+ format!("Detected on PATH. {version_detail}")
+ };
+
+ return CliStatus {
+ name: display_name.to_string(),
+ status: String::from("found"),
+ path: Some(path.display().to_string()),
+ detail,
+ };
+ }
+ Err(error) if override_path.is_some() => {
+ return CliStatus {
+ name: display_name.to_string(),
+ status: String::from("missing"),
+ path: None,
+ detail: format!(
+ "Manual override could not be executed at {}: {error}",
+ path.display()
+ ),
+ };
+ }
+ Err(_) => {}
+ }
+ }
+
+ CliStatus {
+ name: display_name.to_string(),
+ status: String::from("missing"),
+ path: None,
+ detail: String::from("Binary not found. Add a manual path or install it on PATH."),
+ }
+}
+
+pub(crate) fn resolve_cli_binary(
+ binary_name: &str,
+ override_path: Option<&str>,
+) -> Result {
+ if let Some(path_value) = override_path {
+ let candidate = resolve_override_path(path_value);
+
+ if !candidate.exists() {
+ return Err(format!(
+ "The configured {binary_name} path does not exist: {}",
+ candidate.display()
+ ));
+ }
+
+ return Ok(candidate);
+ }
+
+ which::which(binary_name).map_err(|_| {
+ format!(
+ "{binary_name} was not found on PATH. Set a manual binary path in Settings and refresh."
+ )
+ })
+}
+
+fn probe_binary_version(path: &Path) -> Result {
+ let output = Command::new(path)
+ .arg("--version")
+ .output()
+ .map_err(|error| error.to_string())?;
+ let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string();
+ let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string();
+
+ if !stdout.is_empty() {
+ return Ok(stdout);
+ }
+
+ if !stderr.is_empty() {
+ return Ok(stderr);
+ }
+
+ Ok(String::from(
+ "Binary detected. Version probe returned no output.",
+ ))
+}
diff --git a/src-tauri/src/generation.rs b/src-tauri/src/generation.rs
new file mode 100644
index 0000000..2b6e7e1
--- /dev/null
+++ b/src-tauri/src/generation.rs
@@ -0,0 +1,317 @@
+use crate::documents::write_generated_workspace_document;
+use crate::environment::resolve_cli_binary;
+use crate::models::WorkspaceDocument;
+use std::fs;
+use std::io::Write;
+use std::path::PathBuf;
+use std::process::{Command, Stdio};
+use std::time::{SystemTime, UNIX_EPOCH};
+
+#[tauri::command]
+pub(crate) fn generate_prd_document(
+ workspace_root: String,
+ output_path: String,
+ prompt_template: String,
+ user_prompt: String,
+ provider: String,
+ model: String,
+ reasoning: String,
+ claude_path: Option,
+ codex_path: Option,
+) -> Result {
+ let trimmed_prompt = user_prompt.trim();
+
+ if trimmed_prompt.is_empty() {
+ return Err(String::from(
+ "Add the product context you want the AI to consider.",
+ ));
+ }
+
+ let prompt_payload = build_generation_prompt(&prompt_template, trimmed_prompt, &[]);
+ let generated_prd = run_generation_request(
+ &provider,
+ &model,
+ &reasoning,
+ claude_path.as_deref(),
+ codex_path.as_deref(),
+ &prompt_payload,
+ )?;
+
+ write_generated_workspace_document(
+ &workspace_root,
+ &output_path,
+ generated_prd,
+ "PRD output path",
+ )
+}
+
+#[tauri::command]
+pub(crate) fn generate_spec_document(
+ workspace_root: String,
+ output_path: String,
+ prd_content: String,
+ prompt_template: String,
+ user_prompt: String,
+ provider: String,
+ model: String,
+ reasoning: String,
+ claude_path: Option,
+ codex_path: Option,
+) -> Result {
+ let trimmed_prd = prd_content.trim();
+ let trimmed_prompt = user_prompt.trim();
+
+ if trimmed_prd.is_empty() {
+ return Err(String::from(
+ "Load or write a PRD before generating a specification.",
+ ));
+ }
+
+ if trimmed_prompt.is_empty() {
+ return Err(String::from(
+ "Add the technical guidance you want the AI to consider.",
+ ));
+ }
+
+ let prompt_payload = build_generation_prompt(
+ &prompt_template,
+ trimmed_prompt,
+ &[("Attached Product Requirements Document (PRD)", trimmed_prd)],
+ );
+ let generated_spec = run_generation_request(
+ &provider,
+ &model,
+ &reasoning,
+ claude_path.as_deref(),
+ codex_path.as_deref(),
+ &prompt_payload,
+ )?;
+
+ write_generated_workspace_document(
+ &workspace_root,
+ &output_path,
+ generated_spec,
+ "SPEC output path",
+ )
+}
+
+pub(crate) fn build_generation_prompt(
+ prompt_template: &str,
+ user_prompt: &str,
+ attachments: &[(&str, &str)],
+) -> String {
+ let mut prompt = String::new();
+ prompt.push_str(prompt_template.trim());
+ prompt.push_str("\n\n");
+ prompt.push_str("Additional operator context:\n");
+ prompt.push_str(user_prompt.trim());
+
+ for (label, content) in attachments {
+ let trimmed_content = content.trim();
+
+ if trimmed_content.is_empty() {
+ continue;
+ }
+
+ prompt.push_str("\n\n");
+ prompt.push_str(label);
+ prompt.push_str(":\n");
+ prompt.push_str(trimmed_content);
+ }
+
+ prompt
+}
+
+pub(crate) fn run_generation_request(
+ provider: &str,
+ model: &str,
+ reasoning: &str,
+ claude_path: Option<&str>,
+ codex_path: Option<&str>,
+ prompt_payload: &str,
+) -> Result {
+ match provider {
+ "codex" => run_codex_generation(
+ &resolve_cli_binary("codex", codex_path)?,
+ model,
+ reasoning,
+ prompt_payload,
+ ),
+ "claude" => run_claude_generation(
+ &resolve_cli_binary("claude", claude_path)?,
+ model,
+ reasoning,
+ prompt_payload,
+ ),
+ _ => Err(format!("Unsupported model provider: {provider}")),
+ }
+}
+
+pub(crate) fn run_codex_generation(
+ binary_path: &std::path::Path,
+ model: &str,
+ reasoning: &str,
+ prompt_payload: &str,
+) -> Result {
+ let temp_dir = create_spec_generation_temp_dir("codex")?;
+ let output_path = temp_dir.join("generated-spec.md");
+ let reasoning_effort = map_codex_reasoning(reasoning);
+
+ let mut command = Command::new(binary_path);
+ command
+ .current_dir(&temp_dir)
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .arg("exec")
+ .arg("--color")
+ .arg("never")
+ .arg("--skip-git-repo-check")
+ .arg("--sandbox")
+ .arg("read-only")
+ .arg("--model")
+ .arg(model)
+ .arg("--config")
+ .arg(format!("model_reasoning_effort=\"{reasoning_effort}\""))
+ .arg("--output-last-message")
+ .arg(&output_path);
+
+ let result = run_command_with_stdin(&mut command, "Codex CLI", prompt_payload).and_then(
+ |output| {
+ if !output.status.success() {
+ return Err(format_process_failure("Codex CLI", &output));
+ }
+
+ match fs::read_to_string(&output_path) {
+ Ok(content) => Ok(content),
+ Err(read_error) => {
+ let stdout = String::from_utf8_lossy(&output.stdout).to_string();
+
+ if !stdout.trim().is_empty() {
+ Ok(stdout)
+ } else {
+ Err(format!(
+ "Codex CLI completed, but the generated spec could not be read: {read_error}"
+ ))
+ }
+ }
+ }
+ },
+ );
+
+ let _ = fs::remove_dir_all(&temp_dir);
+ result
+}
+
+pub(crate) fn run_claude_generation(
+ binary_path: &std::path::Path,
+ model: &str,
+ reasoning: &str,
+ prompt_payload: &str,
+) -> Result {
+ let temp_dir = create_spec_generation_temp_dir("claude")?;
+ let mut command = Command::new(binary_path);
+ command
+ .current_dir(&temp_dir)
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .arg("--print")
+ .arg("Respond to the request provided on stdin.")
+ .arg("--model")
+ .arg(model)
+ .arg("--output-format")
+ .arg("text")
+ .arg("--permission-mode")
+ .arg("bypassPermissions")
+ .arg("--tools")
+ .arg("")
+ .arg("--max-turns")
+ .arg("1")
+ .arg("--no-session-persistence")
+ .arg("--effort")
+ .arg(map_claude_reasoning(reasoning));
+
+ let result =
+ run_command_with_stdin(&mut command, "Claude CLI", prompt_payload).and_then(|output| {
+ if !output.status.success() {
+ return Err(format_process_failure("Claude CLI", &output));
+ }
+
+ Ok(String::from_utf8_lossy(&output.stdout).to_string())
+ });
+
+ let _ = fs::remove_dir_all(&temp_dir);
+ result
+}
+
+pub(crate) fn create_spec_generation_temp_dir(prefix: &str) -> Result {
+ let base_dir = std::env::temp_dir().join("specforge");
+ fs::create_dir_all(&base_dir)
+ .map_err(|error| format!("Unable to prepare temporary generation folder: {error}"))?;
+ let unique_suffix = SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .map(|duration| duration.as_millis())
+ .unwrap_or_default();
+ let temp_dir = base_dir.join(format!("{prefix}-{unique_suffix}-{}", std::process::id()));
+
+ fs::create_dir_all(&temp_dir)
+ .map_err(|error| format!("Unable to prepare temporary generation folder: {error}"))?;
+
+ Ok(temp_dir)
+}
+
+pub(crate) fn run_command_with_stdin(
+ command: &mut Command,
+ display_name: &str,
+ stdin_payload: &str,
+) -> Result {
+ let mut child = command
+ .spawn()
+ .map_err(|error| format!("Unable to start {display_name}: {error}"))?;
+ let mut stdin = child
+ .stdin
+ .take()
+ .ok_or_else(|| format!("{display_name} did not expose stdin."))?;
+
+ stdin
+ .write_all(stdin_payload.as_bytes())
+ .map_err(|error| format!("Unable to send the prompt to {display_name}: {error}"))?;
+ drop(stdin);
+
+ child
+ .wait_with_output()
+ .map_err(|error| format!("{display_name} exited unexpectedly: {error}"))
+}
+
+pub(crate) fn format_process_failure(display_name: &str, output: &std::process::Output) -> String {
+ let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string();
+ let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string();
+ let details = if !stderr.is_empty() {
+ stderr
+ } else if !stdout.is_empty() {
+ stdout
+ } else {
+ format!("{display_name} exited with status {}", output.status)
+ };
+
+ format!("{display_name} failed: {details}")
+}
+
+pub(crate) fn map_codex_reasoning(reasoning: &str) -> &str {
+ match reasoning {
+ "max" => "xhigh",
+ "high" => "high",
+ "low" => "low",
+ _ => "medium",
+ }
+}
+
+pub(crate) fn map_claude_reasoning(reasoning: &str) -> &str {
+ match reasoning {
+ "max" => "high",
+ "high" => "high",
+ "low" => "low",
+ _ => "medium",
+ }
+}
diff --git a/src-tauri/src/git.rs b/src-tauri/src/git.rs
new file mode 100644
index 0000000..52ba0ae
--- /dev/null
+++ b/src-tauri/src/git.rs
@@ -0,0 +1,53 @@
+use git2::{DiffFormat, DiffOptions, Repository};
+
+use crate::{constants::SAMPLE_DIFF, paths::project_root};
+
+#[tauri::command]
+pub(crate) fn git_get_diff() -> Result {
+ let repository = Repository::discover(project_root())
+ .map_err(|error| format!("Unable to discover git repository: {error}"))?;
+ let head_tree = repository
+ .head()
+ .ok()
+ .and_then(|head| head.peel_to_tree().ok());
+ let index = repository
+ .index()
+ .map_err(|error| format!("Unable to inspect git index: {error}"))?;
+ let mut staged_options = DiffOptions::new();
+ let staged_diff = repository
+ .diff_tree_to_index(head_tree.as_ref(), Some(&index), Some(&mut staged_options))
+ .map_err(|error| format!("Unable to inspect staged diff: {error}"))?;
+ let mut workdir_options = DiffOptions::new();
+ workdir_options
+ .include_untracked(true)
+ .recurse_untracked_dirs(true)
+ .show_untracked_content(true);
+ let workdir_diff = repository
+ .diff_index_to_workdir(Some(&index), Some(&mut workdir_options))
+ .map_err(|error| format!("Unable to inspect worktree diff: {error}"))?;
+ let staged_rendered = render_diff(&staged_diff)?;
+ let workdir_rendered = render_diff(&workdir_diff)?;
+ let rendered = match (staged_rendered.trim(), workdir_rendered.trim()) {
+ ("", "") => String::new(),
+ ("", _) => workdir_rendered,
+ (_, "") => staged_rendered,
+ _ => format!("{staged_rendered}\n{workdir_rendered}"),
+ };
+
+ if rendered.trim().is_empty() {
+ return Ok(SAMPLE_DIFF.to_string());
+ }
+
+ Ok(rendered)
+}
+
+fn render_diff(diff: &git2::Diff<'_>) -> Result {
+ let mut rendered = String::new();
+ diff.print(DiffFormat::Patch, |_delta, _hunk, line| {
+ let text = String::from_utf8_lossy(line.content());
+ rendered.push_str(&text);
+ true
+ })
+ .map_err(|error| format!("Unable to render diff: {error}"))?;
+ Ok(rendered)
+}
diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs
index a9bf34d..9b229ca 100644
--- a/src-tauri/src/lib.rs
+++ b/src-tauri/src/lib.rs
@@ -1,906 +1,23 @@
-use git2::{DiffFormat, DiffOptions, Repository};
-use ignore::WalkBuilder;
-use lopdf::Document;
-use serde::{Deserialize, Serialize};
-use std::{
- collections::HashMap,
- fs,
- io::Write,
- path::{Component, Path, PathBuf},
- process::{Command, Stdio},
- sync::{Arc, Condvar, Mutex},
- thread,
- time::{Duration, SystemTime, UNIX_EPOCH},
-};
-use tauri::{AppHandle, Emitter, State};
-
-#[derive(Default)]
-struct SharedState {
- runtime: Arc,
- workspace: Mutex>,
-}
-
-#[derive(Default)]
-struct ExecutionRuntime {
- control: Mutex,
- signal: Condvar,
-}
-
-#[derive(Default)]
-struct ExecutionControl {
- run_id: u64,
- awaiting_approval: bool,
- stop_requested: bool,
-}
-
-struct WorkspaceContext {
- root: PathBuf,
- files: HashMap,
-}
-
-#[derive(Clone, Serialize)]
-#[serde(rename_all = "camelCase")]
-struct CliStatus {
- name: String,
- status: String,
- path: Option,
- detail: String,
-}
-
-#[derive(Clone, Serialize)]
-#[serde(rename_all = "camelCase")]
-struct EnvironmentStatus {
- scanned_at: String,
- claude: CliStatus,
- codex: CliStatus,
- git: CliStatus,
-}
-
-#[derive(Clone, Serialize)]
-#[serde(rename_all = "camelCase")]
-struct WorkspaceEntry {
- name: String,
- path: String,
- kind: String,
- depth: usize,
-}
-
-#[derive(Clone, Serialize)]
-#[serde(rename_all = "camelCase")]
-struct WorkspaceDocument {
- content: String,
- source_path: String,
- file_name: String,
-}
-
-#[derive(Clone, Serialize, Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct ProjectSettings {
- selected_model: String,
- selected_reasoning: String,
- prd_prompt: String,
- spec_prompt: String,
- prd_path: String,
- spec_path: String,
- supporting_document_paths: Vec,
-}
-
-#[derive(Clone, Serialize)]
-#[serde(rename_all = "camelCase")]
-struct ProjectContextPayload {
- root_name: String,
- root_path: String,
- settings_path: String,
- has_saved_settings: bool,
- settings: ProjectSettings,
- entries: Vec,
- ignored_file_count: usize,
- prd_document: Option,
- spec_document: Option,
-}
-
-#[derive(Clone, Serialize)]
-#[serde(rename_all = "camelCase")]
-struct WorkspaceScanResult {
- root_name: String,
- entries: Vec,
- ignored_file_count: usize,
- prd_document: Option,
- spec_document: Option,
-}
-
-#[derive(Clone, Serialize)]
-#[serde(rename_all = "camelCase")]
-struct AgentStateEvent {
- status: String,
- current_milestone: Option,
- pending_diff: Option,
- summary: Option,
-}
-
-struct SimulatedStep {
- delay_ms: u64,
- line: String,
- milestone: &'static str,
- gate: bool,
-}
-
-struct ScannedWorkspace {
- result: WorkspaceScanResult,
- context: WorkspaceContext,
-}
-
-enum StopState {
- Continue,
- StopRequested,
- Replaced,
-}
-
-enum ApprovalWaitOutcome {
- Approved,
- StopRequested,
- Replaced,
-}
-
-const SAMPLE_DIFF: &str = r#"diff --git a/src/App.tsx b/src/App.tsx
-index 0000000..forge42 100644
---- a/src/App.tsx
-+++ b/src/App.tsx
-@@
-- Render placeholder starter card
-+ Introduce PRD/spec review workspace with execution controls
-+ Add Dracula-first theme tokens and persisted preferences
-+ Surface CLI health, diff approvals, and terminal streaming"#;
-const SPECFORGE_SETTINGS_RELATIVE_PATH: &str = ".specforge/settings.json";
-const DEFAULT_PROJECT_PRD_PATH: &str = "docs/PRD.md";
-const DEFAULT_PROJECT_SPEC_PATH: &str = "docs/SPEC.md";
-const DEFAULT_PRD_PROMPT: &str = r#"Act as an Expert Senior Product Manager. Your goal is to help me write a comprehensive, well-structured Product Requirements Document (PRD) for a new [product / feature / app] called [Project Name].
-
-I have some initial ideas, but I want to make sure the PRD is thorough. Before you draft the full document, please ask me a series of clarifying questions to gather the necessary context.
-
-Please ask about:
-- The core problem we are solving
-- The target audience/user personas
-- Key features and user flows
-- Success metrics (KPIs)
-- Technical or timeline constraints
-
-Ask me these questions one or two at a time so I do not get overwhelmed. Once you have enough context, we will move on to drafting the actual PRD."#;
-const DEFAULT_SPEC_PROMPT: &str = r#"Act as an Expert Software Architect and Tech Lead. I have attached the Product Requirements Document (PRD) for our upcoming project.
-
-Your task is to analyze this PRD and draft a comprehensive Technical Specification Document.
-
-Please structure the spec with the following sections:
-
-1. High-Level Architecture: A conceptual overview of how the system components will interact.
-2. Tech Stack & Tooling: Define the frontend, backend, and infrastructure.
-3. Data Models & Database Schema: Define the core entities, their attributes, and relationships.
-4. API Contracts: Outline the primary endpoints (methods, routes, request/response structures) needed to support the user flows.
-5. Component & State Management: How data will flow through the application and how the UI will be structured.
-6. Security & Edge Cases: Potential vulnerabilities, error handling, and performance bottlenecks.
-7. Engineering Milestones: Break the implementation down into logical, phased deliverables.
-
-Before writing the full document, please provide a brief bulleted summary of your proposed technical approach, and ask me up to 3 clarifying questions about any technical constraints or non-functional requirements that might be missing from the PRD."#;
-
-#[tauri::command]
-fn run_environment_scan(
- claude_path: Option,
- codex_path: Option,
-) -> Result {
- Ok(EnvironmentStatus {
- scanned_at: current_timestamp(),
- claude: inspect_binary("Claude CLI", "claude", claude_path.as_deref()),
- codex: inspect_binary("Codex CLI", "codex", codex_path.as_deref()),
- git: inspect_binary("Git", "git", None),
- })
-}
-
-#[tauri::command]
-fn parse_document(file_path: String) -> Result {
- let resolved_path = resolve_project_document_path(&file_path)?;
- parse_supported_document(&resolved_path)
-}
-
-#[tauri::command]
-fn pick_document() -> Result, String> {
- let Some(file_path) = rfd::FileDialog::new()
- .add_filter("Documents", &["md", "pdf"])
- .pick_file()
- else {
- return Ok(None);
- };
-
- let resolved_path = canonicalize_existing_path(&file_path)
- .map_err(|error| format!("Unable to prepare selected document: {error}"))?;
- let content = parse_supported_document(&resolved_path)?;
- let file_name = resolved_path
- .file_name()
- .and_then(|value| value.to_str())
- .unwrap_or("Document")
- .to_string();
-
- Ok(Some(WorkspaceDocument {
- content,
- source_path: resolved_path.display().to_string(),
- file_name,
- }))
-}
-
-#[tauri::command]
-fn pick_project_folder(state: State) -> Result, String> {
- let Some(folder_path) = rfd::FileDialog::new().pick_folder() else {
- return Ok(None);
- };
-
- load_project_context_from_folder(&state, &folder_path).map(Some)
-}
-
-#[tauri::command]
-fn load_project_context(
- state: State,
- folder_path: String,
-) -> Result {
- let trimmed_path = folder_path.trim();
-
- if trimmed_path.is_empty() {
- return Err(String::from("A workspace folder path is required."));
- }
-
- load_project_context_from_folder(&state, &PathBuf::from(trimmed_path))
-}
-
-#[tauri::command]
-fn save_project_settings(
- folder_path: String,
- settings: ProjectSettings,
-) -> Result {
- let trimmed_path = folder_path.trim();
-
- if trimmed_path.is_empty() {
- return Err(String::from("A workspace folder path is required."));
- }
-
- let workspace_root =
- canonicalize_existing_path(&PathBuf::from(trimmed_path)).map_err(|error| {
- format!(
- "Unable to resolve the selected workspace folder {}: {error}",
- trimmed_path
- )
- })?;
- let default_settings = build_default_project_settings(&workspace_root, None, None);
- let normalized_settings =
- normalize_project_settings(&workspace_root, default_settings, Some(settings))?;
- let settings_path = workspace_root.join(SPECFORGE_SETTINGS_RELATIVE_PATH);
- let settings_directory = settings_path
- .parent()
- .ok_or_else(|| String::from("Unable to resolve the .specforge directory."))?;
-
- fs::create_dir_all(settings_directory).map_err(|error| {
- format!(
- "Unable to create the project settings directory {}: {error}",
- settings_directory.display()
- )
- })?;
- let settings_json = serde_json::to_string_pretty(&normalized_settings)
- .map_err(|error| format!("Unable to encode project settings: {error}"))?;
-
- fs::write(&settings_path, settings_json.as_bytes()).map_err(|error| {
- format!(
- "Unable to write project settings to {}: {error}",
- settings_path.display()
- )
- })?;
-
- Ok(normalized_settings)
-}
-
-#[tauri::command]
-fn open_workspace_folder(state: State) -> Result, String> {
- let Some(folder_path) = rfd::FileDialog::new().pick_folder() else {
- return Ok(None);
- };
-
- let scanned_workspace = scan_workspace_folder(&folder_path)?;
- let mut active_workspace = state
- .workspace
- .lock()
- .map_err(|_| String::from("Workspace lock was poisoned."))?;
- *active_workspace = Some(scanned_workspace.context);
- Ok(Some(scanned_workspace.result))
-}
-
-#[tauri::command]
-fn read_workspace_file(state: State, file_path: String) -> Result {
- let resolved_path = resolve_workspace_file_path(&state, &file_path)?;
-
- fs::read_to_string(&resolved_path).map_err(|error| {
- format!(
- "Unable to read workspace file {}: {error}",
- resolved_path.display()
- )
- })
-}
-
-#[tauri::command]
-fn get_workspace_snapshot() -> Result, String> {
- let root = project_root();
- let directory = fs::read_dir(&root)
- .map_err(|error| format!("Unable to read workspace root {}: {error}", root.display()))?;
- let mut entries = directory
- .filter_map(Result::ok)
- .map(|entry| {
- let path = entry.path();
- let metadata = entry.metadata().ok();
- let kind = if metadata.as_ref().is_some_and(|item| item.is_dir()) {
- "directory"
- } else {
- "file"
- };
-
- WorkspaceEntry {
- name: entry.file_name().to_string_lossy().to_string(),
- path: path
- .strip_prefix(&root)
- .unwrap_or(path.as_path())
- .to_string_lossy()
- .replace('\\', "/"),
- kind: kind.to_string(),
- depth: 0,
- }
- })
- .collect::>();
-
- entries.sort_by(|left, right| left.kind.cmp(&right.kind).then(left.name.cmp(&right.name)));
- Ok(entries)
-}
-
-fn scan_workspace_folder(root: &Path) -> Result {
- let canonical_root = canonicalize_existing_path(root).map_err(|error| {
- format!(
- "Unable to prepare workspace folder {}: {error}",
- root.display()
- )
- })?;
- let root_name = canonical_root
- .file_name()
- .and_then(|value| value.to_str())
- .unwrap_or("Workspace")
- .to_string();
- let mut directory_entries = HashMap::::new();
- let mut file_entries = Vec::::new();
- let mut file_paths = HashMap::::new();
- let mut file_documents = Vec::<(String, PathBuf)>::new();
-
- let walker = WalkBuilder::new(&canonical_root)
- .hidden(false)
- .git_ignore(true)
- .git_global(true)
- .git_exclude(true)
- .build();
-
- for item in walker {
- let entry = match item {
- Ok(entry) => entry,
- Err(error) => {
- return Err(format!(
- "Unable to walk workspace folder {}: {error}",
- canonical_root.display()
- ));
- }
- };
- let path = entry.path();
-
- if path == canonical_root.as_path() {
- continue;
- }
-
- let relative_path = path
- .strip_prefix(&canonical_root)
- .map_err(|error| {
- format!(
- "Unable to normalize workspace path {}: {error}",
- path.display()
- )
- })?
- .to_string_lossy()
- .replace('\\', "/");
-
- if relative_path.is_empty() {
- continue;
- }
-
- let depth = relative_path.split('/').count().saturating_sub(1);
- let file_name = path
- .file_name()
- .and_then(|value| value.to_str())
- .unwrap_or_default()
- .to_string();
-
- if entry
- .file_type()
- .map(|file_type| file_type.is_dir())
- .unwrap_or(false)
- {
- directory_entries.insert(
- relative_path.clone(),
- WorkspaceEntry {
- name: file_name,
- path: relative_path,
- kind: String::from("directory"),
- depth,
- },
- );
- continue;
- }
-
- for (index, segment) in relative_path
- .split('/')
- .collect::>()
- .iter()
- .enumerate()
- {
- if index == relative_path.split('/').count() - 1 {
- break;
- }
-
- let directory_path = relative_path
- .split('/')
- .take(index + 1)
- .collect::>()
- .join("/");
-
- directory_entries
- .entry(directory_path.clone())
- .or_insert_with(|| WorkspaceEntry {
- name: (*segment).to_string(),
- path: directory_path,
- kind: String::from("directory"),
- depth: index,
- });
- }
-
- file_entries.push(WorkspaceEntry {
- name: file_name.clone(),
- path: relative_path.clone(),
- kind: String::from("file"),
- depth,
- });
- file_paths.insert(relative_path.clone(), path.to_path_buf());
- file_documents.push((relative_path, path.to_path_buf()));
- }
-
- let mut entries = directory_entries
- .into_values()
- .chain(file_entries)
- .collect::>();
- entries.sort_by(compare_workspace_entries);
-
- let prd_document = pick_workspace_document(&file_documents, &["prd.md", "prd.pdf"])?;
- let spec_document = pick_workspace_document(&file_documents, &["spec.md", "spec.pdf"])?;
-
- Ok(ScannedWorkspace {
- result: WorkspaceScanResult {
- root_name,
- entries,
- ignored_file_count: 0,
- prd_document,
- spec_document,
- },
- context: WorkspaceContext {
- root: canonical_root,
- files: file_paths,
- },
- })
-}
-
-fn load_project_context_from_folder(
- state: &State,
- folder_path: &Path,
-) -> Result {
- let scanned_workspace = scan_workspace_folder(folder_path)?;
- let settings_path = scanned_workspace
- .context
- .root
- .join(SPECFORGE_SETTINGS_RELATIVE_PATH);
- let default_settings = build_default_project_settings(
- &scanned_workspace.context.root,
- scanned_workspace.result.prd_document.as_ref(),
- scanned_workspace.result.spec_document.as_ref(),
- );
- let (settings, has_saved_settings) = read_project_settings(
- &settings_path,
- &scanned_workspace.context.root,
- default_settings,
- )?;
- let prd_document =
- load_configured_workspace_document(&scanned_workspace.context.root, &settings.prd_path)?;
- let spec_document =
- load_configured_workspace_document(&scanned_workspace.context.root, &settings.spec_path)?;
- let mut active_workspace = state
- .workspace
- .lock()
- .map_err(|_| String::from("Workspace lock was poisoned."))?;
- *active_workspace = Some(scanned_workspace.context);
-
- Ok(ProjectContextPayload {
- root_name: scanned_workspace.result.root_name,
- root_path: active_workspace
- .as_ref()
- .map(|workspace| workspace.root.display().to_string())
- .unwrap_or_default(),
- settings_path: settings_path.display().to_string(),
- has_saved_settings,
- settings,
- entries: scanned_workspace.result.entries,
- ignored_file_count: scanned_workspace.result.ignored_file_count,
- prd_document,
- spec_document,
- })
-}
-
-fn build_default_project_settings(
- workspace_root: &Path,
- prd_document: Option<&WorkspaceDocument>,
- spec_document: Option<&WorkspaceDocument>,
-) -> ProjectSettings {
- ProjectSettings {
- selected_model: String::from("gpt-5.4"),
- selected_reasoning: String::from("medium"),
- prd_prompt: String::from(DEFAULT_PRD_PROMPT),
- spec_prompt: String::from(DEFAULT_SPEC_PROMPT),
- prd_path: derive_default_document_path(
- workspace_root,
- prd_document,
- DEFAULT_PROJECT_PRD_PATH,
- ),
- spec_path: derive_default_document_path(
- workspace_root,
- spec_document,
- DEFAULT_PROJECT_SPEC_PATH,
- ),
- supporting_document_paths: Vec::new(),
- }
-}
-
-fn read_project_settings(
- settings_path: &Path,
- workspace_root: &Path,
- defaults: ProjectSettings,
-) -> Result<(ProjectSettings, bool), String> {
- if !settings_path.exists() {
- return Ok((defaults, false));
- }
-
- let raw_settings = fs::read_to_string(settings_path).map_err(|error| {
- format!(
- "Unable to read project settings {}: {error}",
- settings_path.display()
- )
- })?;
- let parsed_settings =
- serde_json::from_str::(&raw_settings).map_err(|error| {
- format!(
- "Unable to parse project settings {}: {error}",
- settings_path.display()
- )
- })?;
-
- Ok((
- normalize_project_settings(workspace_root, defaults, Some(parsed_settings))?,
- true,
- ))
-}
-
-fn normalize_project_settings(
- workspace_root: &Path,
- defaults: ProjectSettings,
- provided: Option,
-) -> Result {
- let Some(provided) = provided else {
- return Ok(defaults);
- };
-
- let selected_model =
- normalize_project_model(&provided.selected_model, &defaults.selected_model);
- let selected_reasoning =
- normalize_project_reasoning(&provided.selected_reasoning, &defaults.selected_reasoning);
- let normalized_prd_path =
- normalize_project_path_or_default(workspace_root, &provided.prd_path, &defaults.prd_path)?;
- let normalized_spec_path = normalize_project_path_or_default(
- workspace_root,
- &provided.spec_path,
- &defaults.spec_path,
- )?;
- let supporting_document_paths = provided
- .supporting_document_paths
- .iter()
- .filter_map(|entry| normalize_relative_path(entry).ok())
- .collect::>();
-
- Ok(ProjectSettings {
- selected_model,
- selected_reasoning,
- prd_prompt: if provided.prd_prompt.trim().is_empty() {
- defaults.prd_prompt
- } else {
- provided.prd_prompt.trim().to_string()
- },
- spec_prompt: if provided.spec_prompt.trim().is_empty() {
- defaults.spec_prompt
- } else {
- provided.spec_prompt.trim().to_string()
- },
- prd_path: normalized_prd_path,
- spec_path: normalized_spec_path,
- supporting_document_paths,
- })
-}
-
-fn normalize_project_model(value: &str, fallback: &str) -> String {
- const VALID_MODELS: &[&str] = &[
- "gpt-5.4",
- "gpt-5.4-mini",
- "gpt-5.3-codex",
- "gpt-5.2",
- "claude-opus-4-1-20250805",
- "claude-opus-4-20250514",
- "claude-sonnet-4-20250514",
- "claude-3-7-sonnet-20250219",
- "claude-3-5-sonnet-20241022",
- "claude-3-5-sonnet-20240620",
- "claude-3-5-haiku-20241022",
- "claude-3-haiku-20240307",
- ];
-
- if VALID_MODELS.contains(&value.trim()) {
- return value.trim().to_string();
- }
-
- fallback.to_string()
-}
-
-fn normalize_project_reasoning(value: &str, fallback: &str) -> String {
- match value.trim() {
- "low" | "medium" | "high" | "max" => value.trim().to_string(),
- _ => fallback.to_string(),
- }
-}
-
-fn normalize_project_path_or_default(
- workspace_root: &Path,
- value: &str,
- fallback: &str,
-) -> Result {
- if value.trim().is_empty() {
- return Ok(fallback.to_string());
- }
-
- normalize_relative_path(value).map_err(|error| {
- format!(
- "Invalid project document path for workspace {}: {error}",
- workspace_root.display()
- )
- })
-}
-
-fn derive_default_document_path(
- workspace_root: &Path,
- document: Option<&WorkspaceDocument>,
- fallback: &str,
-) -> String {
- document
- .and_then(|entry| {
- PathBuf::from(&entry.source_path)
- .strip_prefix(workspace_root)
- .ok()
- .map(|path| path.to_string_lossy().replace('\\', "/"))
- })
- .unwrap_or_else(|| fallback.to_string())
-}
-
-fn load_configured_workspace_document(
- workspace_root: &Path,
- relative_path: &str,
-) -> Result, String> {
- let resolved_path = resolve_relative_path_under_root(workspace_root, relative_path)?;
-
- if !resolved_path.exists() {
- return Ok(None);
- }
-
- let content = parse_workspace_document(&resolved_path)?;
- let file_name = resolved_path
- .file_name()
- .and_then(|value| value.to_str())
- .unwrap_or("Document")
- .to_string();
-
- Ok(Some(WorkspaceDocument {
- content,
- source_path: resolved_path.display().to_string(),
- file_name,
- }))
-}
-
-#[tauri::command]
-fn git_get_diff() -> Result {
- let repository = Repository::discover(project_root())
- .map_err(|error| format!("Unable to discover git repository: {error}"))?;
- let head_tree = repository
- .head()
- .ok()
- .and_then(|head| head.peel_to_tree().ok());
- let index = repository
- .index()
- .map_err(|error| format!("Unable to inspect git index: {error}"))?;
- let mut staged_options = DiffOptions::new();
- let staged_diff = repository
- .diff_tree_to_index(head_tree.as_ref(), Some(&index), Some(&mut staged_options))
- .map_err(|error| format!("Unable to inspect staged diff: {error}"))?;
- let mut workdir_options = DiffOptions::new();
- workdir_options
- .include_untracked(true)
- .recurse_untracked_dirs(true)
- .show_untracked_content(true);
- let workdir_diff = repository
- .diff_index_to_workdir(Some(&index), Some(&mut workdir_options))
- .map_err(|error| format!("Unable to inspect worktree diff: {error}"))?;
- let staged_rendered = render_diff(&staged_diff)?;
- let workdir_rendered = render_diff(&workdir_diff)?;
- let rendered = match (staged_rendered.trim(), workdir_rendered.trim()) {
- ("", "") => String::new(),
- ("", _) => workdir_rendered,
- (_, "") => staged_rendered,
- _ => format!("{staged_rendered}\n{workdir_rendered}"),
- };
-
- if rendered.trim().is_empty() {
- return Ok(SAMPLE_DIFF.to_string());
- }
-
- Ok(rendered)
-}
-
-#[tauri::command]
-fn generate_prd_document(
- workspace_root: String,
- output_path: String,
- prompt_template: String,
- user_prompt: String,
- provider: String,
- model: String,
- reasoning: String,
- claude_path: Option,
- codex_path: Option,
-) -> Result {
- let trimmed_prompt = user_prompt.trim();
-
- if trimmed_prompt.is_empty() {
- return Err(String::from(
- "Add the product context you want the AI to consider.",
- ));
- }
-
- let prompt_payload = build_generation_prompt(&prompt_template, trimmed_prompt, &[]);
- let generated_prd = run_generation_request(
- &provider,
- &model,
- &reasoning,
- claude_path.as_deref(),
- codex_path.as_deref(),
- &prompt_payload,
- )?;
-
- write_generated_workspace_document(
- &workspace_root,
- &output_path,
- generated_prd,
- "PRD output path",
- )
-}
-
-#[tauri::command]
-fn generate_spec_document(
- workspace_root: String,
- output_path: String,
- prd_content: String,
- prompt_template: String,
- user_prompt: String,
- provider: String,
- model: String,
- reasoning: String,
- claude_path: Option,
- codex_path: Option,
-) -> Result {
- let trimmed_prd = prd_content.trim();
- let trimmed_prompt = user_prompt.trim();
-
- if trimmed_prd.is_empty() {
- return Err(String::from(
- "Load or write a PRD before generating a specification.",
- ));
- }
-
- if trimmed_prompt.is_empty() {
- return Err(String::from(
- "Add the technical guidance you want the AI to consider.",
- ));
- }
-
- let prompt_payload = build_generation_prompt(
- &prompt_template,
- trimmed_prompt,
- &[("Attached Product Requirements Document (PRD)", trimmed_prd)],
- );
- let generated_spec = run_generation_request(
- &provider,
- &model,
- &reasoning,
- claude_path.as_deref(),
- codex_path.as_deref(),
- &prompt_payload,
- )?;
-
- write_generated_workspace_document(
- &workspace_root,
- &output_path,
- generated_spec,
- "SPEC output path",
- )
-}
-
-#[tauri::command]
-fn spawn_cli_agent(
- app: AppHandle,
- state: State,
- spec_payload: String,
- mode: String,
- model: String,
- reasoning: String,
-) -> Result<(), String> {
- let runtime = state.runtime.clone();
- let run_id = {
- let mut control = runtime
- .control
- .lock()
- .map_err(|_| String::from("Execution lock was poisoned."))?;
- control.run_id = control.run_id.wrapping_add(1);
- control.awaiting_approval = false;
- control.stop_requested = false;
- control.run_id
- };
-
- thread::spawn(move || {
- run_simulated_agent(app, runtime, run_id, spec_payload, mode, model, reasoning);
- });
-
- Ok(())
-}
-
-#[tauri::command]
-fn approve_action(state: State) -> Result<(), String> {
- let mut control = state
- .runtime
- .control
- .lock()
- .map_err(|_| String::from("Execution lock was poisoned."))?;
- control.awaiting_approval = false;
- state.runtime.signal.notify_all();
- Ok(())
-}
-
-#[tauri::command]
-fn kill_agent_process(state: State) -> Result<(), String> {
- let mut control = state
- .runtime
- .control
- .lock()
- .map_err(|_| String::from("Execution lock was poisoned."))?;
- control.stop_requested = true;
- control.awaiting_approval = false;
- state.runtime.signal.notify_all();
- Ok(())
-}
+mod agent;
+mod constants;
+mod documents;
+mod environment;
+mod generation;
+mod git;
+mod models;
+mod paths;
+mod project;
+mod state;
+mod workspace;
+
+use agent::{approve_action, kill_agent_process, spawn_cli_agent};
+use documents::{parse_document, pick_document};
+use environment::run_environment_scan;
+use generation::{generate_prd_document, generate_spec_document};
+use git::git_get_diff;
+use project::{load_project_context, pick_project_folder, save_project_settings};
+use state::SharedState;
+use workspace::{get_workspace_snapshot, open_workspace_folder, read_workspace_file};
pub fn run() {
tauri::Builder::default()
@@ -925,952 +42,3 @@ pub fn run() {
.run(tauri::generate_context!())
.expect("error while running tauri application");
}
-
-fn current_timestamp() -> String {
- SystemTime::now()
- .duration_since(UNIX_EPOCH)
- .map(|duration| duration.as_secs().to_string())
- .unwrap_or_else(|_| String::from("0"))
-}
-
-fn inspect_binary(display_name: &str, binary_name: &str, override_path: Option<&str>) -> CliStatus {
- let resolved_path = override_path
- .and_then(|value| {
- let candidate = resolve_override_path(value);
- candidate.exists().then_some(candidate)
- })
- .or_else(|| which::which(binary_name).ok());
-
- if let Some(path) = resolved_path {
- match probe_binary_version(&path) {
- Ok(version_detail) => {
- let detail = if override_path.is_some() {
- format!("Using manual override. {version_detail}")
- } else {
- format!("Detected on PATH. {version_detail}")
- };
-
- return CliStatus {
- name: display_name.to_string(),
- status: String::from("found"),
- path: Some(path.display().to_string()),
- detail,
- };
- }
- Err(error) if override_path.is_some() => {
- return CliStatus {
- name: display_name.to_string(),
- status: String::from("missing"),
- path: None,
- detail: format!(
- "Manual override could not be executed at {}: {error}",
- path.display()
- ),
- };
- }
- Err(_) => {}
- }
- }
-
- CliStatus {
- name: display_name.to_string(),
- status: String::from("missing"),
- path: None,
- detail: String::from("Binary not found. Add a manual path or install it on PATH."),
- }
-}
-
-fn compare_workspace_entries(left: &WorkspaceEntry, right: &WorkspaceEntry) -> std::cmp::Ordering {
- let left_segments = left.path.split('/').collect::>();
- let right_segments = right.path.split('/').collect::>();
- let shared_length = left_segments.len().min(right_segments.len());
-
- for index in 0..shared_length {
- if left_segments[index] != right_segments[index] {
- let left_is_directory = index < left_segments.len() - 1 || left.kind == "directory";
- let right_is_directory = index < right_segments.len() - 1 || right.kind == "directory";
-
- if left_is_directory != right_is_directory {
- return if left_is_directory {
- std::cmp::Ordering::Less
- } else {
- std::cmp::Ordering::Greater
- };
- }
-
- return left_segments[index].cmp(right_segments[index]);
- }
- }
-
- if left_segments.len() != right_segments.len() {
- return left_segments.len().cmp(&right_segments.len());
- }
-
- if left.kind != right.kind {
- return if left.kind == "directory" {
- std::cmp::Ordering::Less
- } else {
- std::cmp::Ordering::Greater
- };
- }
-
- left.path.cmp(&right.path)
-}
-
-fn pick_workspace_document(
- files: &[(String, PathBuf)],
- expected_names: &[&str],
-) -> Result, String> {
- let mut ranked_files = files
- .iter()
- .filter_map(|(relative_path, absolute_path)| {
- let file_name = absolute_path
- .file_name()
- .and_then(|value| value.to_str())
- .map(|value| value.to_lowercase())?;
-
- expected_names
- .iter()
- .position(|expected_name| *expected_name == file_name)
- .map(|rank| (rank, relative_path, absolute_path))
- })
- .collect::>();
-
- ranked_files.sort_by(|left, right| {
- left.0
- .cmp(&right.0)
- .then(relative_path_depth(left.1).cmp(&relative_path_depth(right.1)))
- .then(left.1.cmp(right.1))
- });
-
- ranked_files
- .into_iter()
- .next()
- .map(|(_, relative_path, absolute_path)| {
- parse_workspace_document(absolute_path).map(|content| WorkspaceDocument {
- content,
- source_path: absolute_path.display().to_string(),
- file_name: relative_path
- .rsplit('/')
- .next()
- .unwrap_or(relative_path)
- .to_string(),
- })
- })
- .transpose()
-}
-
-fn read_pdf_text(path: &Path) -> Result {
- let document = Document::load(path)
- .map_err(|error| format!("Unable to open PDF document {}: {error}", path.display()))?;
- let mut page_numbers = document.get_pages().keys().copied().collect::>();
- page_numbers.sort_unstable();
-
- document.extract_text(&page_numbers).map_err(|error| {
- format!(
- "Unable to extract PDF text from {}: {error}",
- path.display()
- )
- })
-}
-
-fn parse_workspace_document(path: &Path) -> Result {
- match path
- .extension()
- .and_then(|extension| extension.to_str())
- .map(|extension| extension.to_ascii_lowercase())
- .as_deref()
- {
- Some("pdf") => read_pdf_text(path),
- _ => fs::read_to_string(path).map_err(|error| {
- format!(
- "Unable to read workspace document {}: {error}",
- path.display()
- )
- }),
- }
-}
-
-fn relative_path_depth(path: &str) -> usize {
- path.split('/').count()
-}
-
-fn project_root() -> PathBuf {
- let current_directory = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
-
- if current_directory
- .file_name()
- .and_then(|segment| segment.to_str())
- .is_some_and(|segment| segment.eq_ignore_ascii_case("src-tauri"))
- {
- return current_directory
- .parent()
- .map(Path::to_path_buf)
- .unwrap_or(current_directory);
- }
-
- current_directory
-}
-
-fn resolve_project_document_path(path_value: &str) -> Result {
- let normalized_path = normalize_relative_path(path_value)?;
- let project_root = canonicalize_existing_path(&project_root())
- .map_err(|error| format!("Unable to resolve project root: {error}"))?;
- let candidate = project_root.join(&normalized_path);
- let resolved_path = canonicalize_existing_path(&candidate).map_err(|error| {
- format!("Unable to resolve project document {normalized_path}: {error}")
- })?;
-
- if !resolved_path.starts_with(&project_root) {
- return Err(String::from(
- "Project document imports must stay inside the repository root.",
- ));
- }
-
- Ok(resolved_path)
-}
-
-fn resolve_workspace_file_path(
- state: &State,
- file_path: &str,
-) -> Result {
- let normalized_path = normalize_relative_path(file_path)?;
- let workspace = state
- .workspace
- .lock()
- .map_err(|_| String::from("Workspace lock was poisoned."))?;
- let active_workspace = workspace
- .as_ref()
- .ok_or_else(|| String::from("No workspace folder is currently open."))?;
- let resolved_path = active_workspace
- .files
- .get(&normalized_path)
- .ok_or_else(|| {
- format!("The file {normalized_path} is not part of the active workspace.")
- })?;
- let canonical_path = canonicalize_existing_path(resolved_path)
- .map_err(|error| format!("Unable to resolve workspace file {normalized_path}: {error}"))?;
-
- if !canonical_path.starts_with(&active_workspace.root) {
- return Err(format!(
- "Workspace file {normalized_path} resolved outside the active workspace root."
- ));
- }
-
- Ok(canonical_path)
-}
-
-fn resolve_relative_path_under_root(root: &Path, relative_path: &str) -> Result {
- let normalized_path = normalize_relative_path(relative_path)?;
- Ok(root.join(normalized_path))
-}
-
-fn write_generated_workspace_document(
- workspace_root: &str,
- output_path: &str,
- generated_content: String,
- field_name: &str,
-) -> Result {
- let trimmed_root = workspace_root.trim();
-
- if trimmed_root.is_empty() {
- return Err(String::from("A workspace root is required."));
- }
-
- let canonical_root = canonicalize_existing_path(&PathBuf::from(trimmed_root))
- .map_err(|error| format!("Unable to resolve workspace root {}: {error}", trimmed_root))?;
- let resolved_output_path = resolve_relative_path_under_root(&canonical_root, output_path)
- .map_err(|error| format!("{field_name} is invalid: {error}"))?;
- let rendered_document = format!(
- "{}\n",
- strip_wrapping_code_fence(generated_content.trim()).trim()
- );
-
- if rendered_document.trim().is_empty() {
- return Err(String::from(
- "The AI returned an empty document. Adjust the prompt and try again.",
- ));
- }
-
- if resolved_output_path
- .extension()
- .and_then(|value| value.to_str())
- .map(|value| !value.eq_ignore_ascii_case("md"))
- .unwrap_or(true)
- {
- return Err(format!(
- "{field_name} must point to a Markdown file inside the selected workspace."
- ));
- }
-
- if let Some(parent_directory) = resolved_output_path.parent() {
- fs::create_dir_all(parent_directory).map_err(|error| {
- format!(
- "Unable to create the document folder {}: {error}",
- parent_directory.display()
- )
- })?;
- }
-
- fs::write(&resolved_output_path, rendered_document.as_bytes()).map_err(|error| {
- format!(
- "Unable to save the generated document to {}: {error}",
- resolved_output_path.display()
- )
- })?;
-
- Ok(WorkspaceDocument {
- content: rendered_document,
- source_path: resolved_output_path.display().to_string(),
- file_name: resolved_output_path
- .file_name()
- .and_then(|value| value.to_str())
- .unwrap_or("Document.md")
- .to_string(),
- })
-}
-
-fn resolve_override_path(path_value: &str) -> PathBuf {
- let candidate = PathBuf::from(path_value.trim());
-
- if candidate.is_absolute() {
- return candidate;
- }
-
- project_root().join(candidate)
-}
-
-fn normalize_relative_path(path_value: &str) -> Result {
- let trimmed_value = path_value.trim();
-
- if trimmed_value.is_empty() {
- return Err(String::from("A relative path is required."));
- }
-
- let candidate = PathBuf::from(trimmed_value);
- let mut normalized_path = PathBuf::new();
-
- for component in candidate.components() {
- match component {
- Component::CurDir => {}
- Component::Normal(segment) => normalized_path.push(segment),
- Component::ParentDir => {
- return Err(String::from(
- "Parent directory traversal is not allowed for document or workspace reads.",
- ));
- }
- Component::Prefix(_) | Component::RootDir => {
- return Err(String::from("Absolute paths are not allowed here."));
- }
- }
- }
-
- if normalized_path.as_os_str().is_empty() {
- return Err(String::from("A relative path is required."));
- }
-
- Ok(normalized_path.to_string_lossy().replace('\\', "/"))
-}
-
-fn canonicalize_existing_path(path: &Path) -> std::io::Result {
- fs::canonicalize(path)
-}
-
-fn parse_supported_document(path: &Path) -> Result {
- match path
- .extension()
- .and_then(|extension| extension.to_str())
- .map(|extension| extension.to_ascii_lowercase())
- .as_deref()
- {
- Some("md") => fs::read_to_string(path).map_err(|error| {
- format!(
- "Unable to read markdown document {}: {error}",
- path.display()
- )
- }),
- Some("pdf") => read_pdf_text(path),
- _ => Err(String::from("Only .md and .pdf documents are supported.")),
- }
-}
-
-fn render_diff(diff: &git2::Diff<'_>) -> Result {
- let mut rendered = String::new();
- diff.print(DiffFormat::Patch, |_delta, _hunk, line| {
- let text = String::from_utf8_lossy(line.content());
- rendered.push_str(&text);
- true
- })
- .map_err(|error| format!("Unable to render diff: {error}"))?;
- Ok(rendered)
-}
-
-fn probe_binary_version(path: &Path) -> Result {
- let output = Command::new(path)
- .arg("--version")
- .output()
- .map_err(|error| error.to_string())?;
- let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string();
- let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string();
-
- if !stdout.is_empty() {
- return Ok(stdout);
- }
-
- if !stderr.is_empty() {
- return Ok(stderr);
- }
-
- Ok(String::from(
- "Binary detected. Version probe returned no output.",
- ))
-}
-
-fn resolve_cli_binary(binary_name: &str, override_path: Option<&str>) -> Result {
- if let Some(path_value) = override_path {
- let candidate = resolve_override_path(path_value);
-
- if !candidate.exists() {
- return Err(format!(
- "The configured {binary_name} path does not exist: {}",
- candidate.display()
- ));
- }
-
- return Ok(candidate);
- }
-
- which::which(binary_name).map_err(|_| {
- format!(
- "{binary_name} was not found on PATH. Set a manual binary path in Settings and refresh."
- )
- })
-}
-
-fn build_generation_prompt(
- prompt_template: &str,
- user_prompt: &str,
- attachments: &[(&str, &str)],
-) -> String {
- let mut prompt = String::new();
- prompt.push_str(prompt_template.trim());
- prompt.push_str("\n\n");
- prompt.push_str("Additional operator context:\n");
- prompt.push_str(user_prompt.trim());
-
- for (label, content) in attachments {
- let trimmed_content = content.trim();
-
- if trimmed_content.is_empty() {
- continue;
- }
-
- prompt.push_str("\n\n");
- prompt.push_str(label);
- prompt.push_str(":\n");
- prompt.push_str(trimmed_content);
- }
-
- prompt
-}
-
-fn run_generation_request(
- provider: &str,
- model: &str,
- reasoning: &str,
- claude_path: Option<&str>,
- codex_path: Option<&str>,
- prompt_payload: &str,
-) -> Result {
- match provider {
- "codex" => run_codex_generation(
- &resolve_cli_binary("codex", codex_path)?,
- model,
- reasoning,
- prompt_payload,
- ),
- "claude" => run_claude_generation(
- &resolve_cli_binary("claude", claude_path)?,
- model,
- reasoning,
- prompt_payload,
- ),
- _ => Err(format!("Unsupported model provider: {provider}")),
- }
-}
-
-fn run_codex_generation(
- binary_path: &Path,
- model: &str,
- reasoning: &str,
- prompt_payload: &str,
-) -> Result {
- let temp_dir = create_spec_generation_temp_dir("codex")?;
- let output_path = temp_dir.join("generated-spec.md");
- let reasoning_effort = map_codex_reasoning(reasoning);
-
- let mut command = Command::new(binary_path);
- command
- .current_dir(&temp_dir)
- .stdin(Stdio::piped())
- .stdout(Stdio::piped())
- .stderr(Stdio::piped())
- .arg("exec")
- .arg("--color")
- .arg("never")
- .arg("--skip-git-repo-check")
- .arg("--sandbox")
- .arg("read-only")
- .arg("--model")
- .arg(model)
- .arg("--config")
- .arg(format!("model_reasoning_effort=\"{reasoning_effort}\""))
- .arg("--output-last-message")
- .arg(&output_path);
-
- let result = run_command_with_stdin(&mut command, "Codex CLI", prompt_payload)
- .and_then(|output| {
- if !output.status.success() {
- return Err(format_process_failure("Codex CLI", &output));
- }
-
- match fs::read_to_string(&output_path) {
- Ok(content) => Ok(content),
- Err(read_error) => {
- let stdout = String::from_utf8_lossy(&output.stdout).to_string();
-
- if !stdout.trim().is_empty() {
- Ok(stdout)
- } else {
- Err(format!(
- "Codex CLI completed, but the generated spec could not be read: {read_error}"
- ))
- }
- }
- }
- });
-
- let _ = fs::remove_dir_all(&temp_dir);
- result
-}
-
-fn run_claude_generation(
- binary_path: &Path,
- model: &str,
- reasoning: &str,
- prompt_payload: &str,
-) -> Result {
- let temp_dir = create_spec_generation_temp_dir("claude")?;
- let mut command = Command::new(binary_path);
- command
- .current_dir(&temp_dir)
- .stdin(Stdio::piped())
- .stdout(Stdio::piped())
- .stderr(Stdio::piped())
- .arg("--print")
- .arg("Respond to the request provided on stdin.")
- .arg("--model")
- .arg(model)
- .arg("--output-format")
- .arg("text")
- .arg("--permission-mode")
- .arg("bypassPermissions")
- .arg("--tools")
- .arg("")
- .arg("--max-turns")
- .arg("1")
- .arg("--no-session-persistence")
- .arg("--effort")
- .arg(map_claude_reasoning(reasoning));
-
- let result =
- run_command_with_stdin(&mut command, "Claude CLI", prompt_payload).and_then(|output| {
- if !output.status.success() {
- return Err(format_process_failure("Claude CLI", &output));
- }
-
- Ok(String::from_utf8_lossy(&output.stdout).to_string())
- });
-
- let _ = fs::remove_dir_all(&temp_dir);
- result
-}
-
-fn create_spec_generation_temp_dir(prefix: &str) -> Result {
- let base_dir = std::env::temp_dir().join("specforge");
- fs::create_dir_all(&base_dir)
- .map_err(|error| format!("Unable to prepare temporary generation folder: {error}"))?;
- let unique_suffix = SystemTime::now()
- .duration_since(UNIX_EPOCH)
- .map(|duration| duration.as_millis())
- .unwrap_or_default();
- let temp_dir = base_dir.join(format!("{prefix}-{unique_suffix}-{}", std::process::id()));
-
- fs::create_dir_all(&temp_dir)
- .map_err(|error| format!("Unable to prepare temporary generation folder: {error}"))?;
-
- Ok(temp_dir)
-}
-
-fn run_command_with_stdin(
- command: &mut Command,
- display_name: &str,
- stdin_payload: &str,
-) -> Result {
- let mut child = command
- .spawn()
- .map_err(|error| format!("Unable to start {display_name}: {error}"))?;
- let mut stdin = child
- .stdin
- .take()
- .ok_or_else(|| format!("{display_name} did not expose stdin."))?;
-
- stdin
- .write_all(stdin_payload.as_bytes())
- .map_err(|error| format!("Unable to send the prompt to {display_name}: {error}"))?;
- drop(stdin);
-
- child
- .wait_with_output()
- .map_err(|error| format!("{display_name} exited unexpectedly: {error}"))
-}
-
-fn format_process_failure(display_name: &str, output: &std::process::Output) -> String {
- let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string();
- let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string();
- let details = if !stderr.is_empty() {
- stderr
- } else if !stdout.is_empty() {
- stdout
- } else {
- format!("{display_name} exited with status {}", output.status)
- };
-
- format!("{display_name} failed: {details}")
-}
-
-fn map_codex_reasoning(reasoning: &str) -> &str {
- match reasoning {
- "max" => "xhigh",
- "high" => "high",
- "low" => "low",
- _ => "medium",
- }
-}
-
-fn map_claude_reasoning(reasoning: &str) -> &str {
- match reasoning {
- "max" => "high",
- "high" => "high",
- "low" => "low",
- _ => "medium",
- }
-}
-
-fn strip_wrapping_code_fence(content: &str) -> String {
- let trimmed = content.trim();
-
- if !trimmed.starts_with("```") {
- return trimmed.to_string();
- }
-
- let mut lines = trimmed.lines();
- let Some(first_line) = lines.next() else {
- return String::new();
- };
-
- if !first_line.trim_start().starts_with("```") {
- return trimmed.to_string();
- }
-
- let remaining_lines = lines.collect::>();
-
- if remaining_lines
- .last()
- .map(|line| !line.trim_start().starts_with("```"))
- .unwrap_or(true)
- {
- return trimmed.to_string();
- }
-
- remaining_lines[..remaining_lines.len().saturating_sub(1)].join("\n")
-}
-
-fn run_simulated_agent(
- app: AppHandle,
- runtime: Arc,
- run_id: u64,
- spec_payload: String,
- mode: String,
- model: String,
- reasoning: String,
-) {
- let heading_count = spec_payload
- .lines()
- .filter(|line| line.trim_start().starts_with('#'))
- .count();
- let steps = build_simulated_steps(heading_count, &mode, &model, &reasoning);
- emit_state(&app, "executing", Some("Pre-flight Check"), None, None);
-
- for step in steps {
- match stop_state(&runtime, run_id) {
- StopState::Continue => {}
- StopState::StopRequested => {
- emit_line(
- &app,
- "Execution interrupted before the next step could run.",
- );
- emit_state(
- &app,
- "halted",
- Some(step.milestone),
- None,
- Some("Execution interrupted by the operator."),
- );
- return;
- }
- StopState::Replaced => return,
- }
-
- thread::sleep(Duration::from_millis(step.delay_ms));
- match stop_state(&runtime, run_id) {
- StopState::Continue => {}
- StopState::StopRequested => {
- emit_line(
- &app,
- "Execution interrupted before the next step could run.",
- );
- emit_state(
- &app,
- "halted",
- Some(step.milestone),
- None,
- Some("Execution interrupted by the operator."),
- );
- return;
- }
- StopState::Replaced => return,
- }
- emit_state(&app, "executing", Some(step.milestone), None, None);
- emit_line(&app, &step.line);
-
- if step.gate {
- let summary = if mode == "stepped" {
- "Stepped approval required before the next write action."
- } else {
- "Milestone boundary reached. Review the diff before execution resumes."
- };
-
- match wait_for_approval(&app, &runtime, run_id, step.milestone, summary) {
- Ok(ApprovalWaitOutcome::Approved) => {}
- Ok(ApprovalWaitOutcome::StopRequested) => {
- emit_line(&app, "Execution interrupted during approval gate.");
- emit_state(
- &app,
- "halted",
- Some(step.milestone),
- None,
- Some("Execution interrupted by the operator."),
- );
- return;
- }
- Ok(ApprovalWaitOutcome::Replaced) => return,
- Err(message) => {
- emit_line(&app, &message);
- emit_state(
- &app,
- "error",
- Some(step.milestone),
- None,
- Some("Approval synchronization failed."),
- );
- return;
- }
- }
-
- emit_line(&app, "Approval received. Resuming the agent loop.");
- }
- }
-
- if !matches!(stop_state(&runtime, run_id), StopState::Continue) {
- return;
- }
-
- emit_line(
- &app,
- "Execution complete. Final diff is ready for inspection.",
- );
- emit_state(
- &app,
- "completed",
- Some("Execution Complete"),
- Some(SAMPLE_DIFF),
- Some("Simulated agent execution completed successfully."),
- );
-}
-
-fn build_simulated_steps(
- heading_count: usize,
- mode: &str,
- model: &str,
- reasoning: &str,
-) -> Vec {
- let mut steps = vec![
- SimulatedStep {
- delay_ms: 450,
- line: format!(
- "Loaded approved specification with {heading_count} markdown headings into {model} using the {reasoning} reasoning profile."
- ),
- milestone: "Pre-flight Check",
- gate: false,
- },
- SimulatedStep {
- delay_ms: 650,
- line: String::from(
- "Scanning CLI availability and staging the current repository diff.",
- ),
- milestone: "Pre-flight Check",
- gate: false,
- },
- SimulatedStep {
- delay_ms: 750,
- line: String::from(
- "Mapping milestones for review UI, Zustand stores, and Tauri commands.",
- ),
- milestone: "Milestone Planning",
- gate: false,
- },
- ];
-
- if mode == "stepped" {
- steps.push(SimulatedStep {
- delay_ms: 650,
- line: String::from(
- "A write action is ready to execute against the approved specification.",
- ),
- milestone: "Stepped Approval",
- gate: true,
- });
- }
-
- steps.extend([
- SimulatedStep {
- delay_ms: 700,
- line: String::from(
- "Applying Dracula theme tokens and composing the review workspace shell.",
- ),
- milestone: "Compose Review Workspace",
- gate: false,
- },
- SimulatedStep {
- delay_ms: 650,
- line: String::from(
- "Wiring project, settings, and agent stores into the execution dashboard.",
- ),
- milestone: "Compose Review Workspace",
- gate: false,
- },
- ]);
-
- if mode == "milestone" {
- steps.push(SimulatedStep {
- delay_ms: 650,
- line: String::from("The first milestone is complete and ready for diff review."),
- milestone: "Milestone Approval",
- gate: true,
- });
- }
-
- steps.extend([
- SimulatedStep {
- delay_ms: 650,
- line: String::from("Streaming terminal telemetry and enabling approval controls."),
- milestone: "Execution Dashboard",
- gate: false,
- },
- SimulatedStep {
- delay_ms: 550,
- line: String::from("Packaging a final summary for IDE handoff."),
- milestone: "Execution Dashboard",
- gate: false,
- },
- ]);
-
- steps
-}
-
-fn wait_for_approval(
- app: &AppHandle,
- runtime: &Arc,
- run_id: u64,
- milestone: &str,
- summary: &str,
-) -> Result {
- emit_state(
- app,
- "awaiting_approval",
- Some(milestone),
- Some(SAMPLE_DIFF),
- Some(summary),
- );
-
- let mut control = runtime
- .control
- .lock()
- .map_err(|_| String::from("Execution lock was poisoned."))?;
- control.awaiting_approval = true;
- runtime.signal.notify_all();
-
- while control.run_id == run_id && control.awaiting_approval && !control.stop_requested {
- control = runtime
- .signal
- .wait(control)
- .map_err(|_| String::from("Execution lock was poisoned."))?;
- }
-
- if control.stop_requested {
- return Ok(ApprovalWaitOutcome::StopRequested);
- }
-
- if control.run_id != run_id {
- return Ok(ApprovalWaitOutcome::Replaced);
- }
-
- Ok(ApprovalWaitOutcome::Approved)
-}
-
-fn stop_state(runtime: &Arc, run_id: u64) -> StopState {
- runtime
- .control
- .lock()
- .map(|control| {
- if control.stop_requested {
- StopState::StopRequested
- } else if control.run_id != run_id {
- StopState::Replaced
- } else {
- StopState::Continue
- }
- })
- .unwrap_or(StopState::StopRequested)
-}
-
-fn emit_line(app: &AppHandle, line: &str) {
- let _ = app.emit("cli-output", line.to_string());
-}
-
-fn emit_state(
- app: &AppHandle,
- status: &str,
- current_milestone: Option<&str>,
- pending_diff: Option<&str>,
- summary: Option<&str>,
-) {
- let payload = AgentStateEvent {
- status: status.to_string(),
- current_milestone: current_milestone.map(ToOwned::to_owned),
- pending_diff: pending_diff.map(ToOwned::to_owned),
- summary: summary.map(ToOwned::to_owned),
- };
- let _ = app.emit("agent-state", payload);
-}
diff --git a/src-tauri/src/models.rs b/src-tauri/src/models.rs
new file mode 100644
index 0000000..7a01f16
--- /dev/null
+++ b/src-tauri/src/models.rs
@@ -0,0 +1,103 @@
+use serde::{Deserialize, Serialize};
+
+#[derive(Clone, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct CliStatus {
+ pub(crate) name: String,
+ pub(crate) status: String,
+ pub(crate) path: Option,
+ pub(crate) detail: String,
+}
+
+#[derive(Clone, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct EnvironmentStatus {
+ pub(crate) scanned_at: String,
+ pub(crate) claude: CliStatus,
+ pub(crate) codex: CliStatus,
+ pub(crate) git: CliStatus,
+}
+
+#[derive(Clone, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct WorkspaceEntry {
+ pub(crate) name: String,
+ pub(crate) path: String,
+ pub(crate) kind: String,
+ pub(crate) depth: usize,
+}
+
+#[derive(Clone, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct WorkspaceDocument {
+ pub(crate) content: String,
+ pub(crate) source_path: String,
+ pub(crate) file_name: String,
+}
+
+#[derive(Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct ProjectSettings {
+ pub(crate) selected_model: String,
+ pub(crate) selected_reasoning: String,
+ pub(crate) prd_prompt: String,
+ pub(crate) spec_prompt: String,
+ pub(crate) prd_path: String,
+ pub(crate) spec_path: String,
+ pub(crate) supporting_document_paths: Vec,
+}
+
+#[derive(Clone, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct ProjectContextPayload {
+ pub(crate) root_name: String,
+ pub(crate) root_path: String,
+ pub(crate) settings_path: String,
+ pub(crate) has_saved_settings: bool,
+ pub(crate) settings: ProjectSettings,
+ pub(crate) entries: Vec,
+ pub(crate) ignored_file_count: usize,
+ pub(crate) prd_document: Option,
+ pub(crate) spec_document: Option,
+}
+
+#[derive(Clone, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct WorkspaceScanResult {
+ pub(crate) root_name: String,
+ pub(crate) entries: Vec,
+ pub(crate) ignored_file_count: usize,
+ pub(crate) prd_document: Option,
+ pub(crate) spec_document: Option,
+}
+
+#[derive(Clone, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct AgentStateEvent {
+ pub(crate) status: String,
+ pub(crate) current_milestone: Option,
+ pub(crate) pending_diff: Option,
+ pub(crate) summary: Option,
+}
+
+#[derive(Clone)]
+pub(crate) struct SimulatedStep {
+ pub(crate) delay_ms: u64,
+ pub(crate) line: String,
+ pub(crate) milestone: &'static str,
+ pub(crate) gate: bool,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub(crate) enum StopState {
+ Continue,
+ StopRequested,
+ Replaced,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub(crate) enum ApprovalWaitOutcome {
+ Approved,
+ StopRequested,
+ Replaced,
+}
diff --git a/src-tauri/src/paths.rs b/src-tauri/src/paths.rs
new file mode 100644
index 0000000..2545afc
--- /dev/null
+++ b/src-tauri/src/paths.rs
@@ -0,0 +1,93 @@
+use std::{
+ fs,
+ path::{Component, Path, PathBuf},
+};
+
+pub(crate) fn project_root() -> PathBuf {
+ let current_directory = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
+
+ if current_directory
+ .file_name()
+ .and_then(|segment| segment.to_str())
+ .is_some_and(|segment| segment.eq_ignore_ascii_case("src-tauri"))
+ {
+ return current_directory
+ .parent()
+ .map(Path::to_path_buf)
+ .unwrap_or(current_directory);
+ }
+
+ current_directory
+}
+
+pub(crate) fn resolve_override_path(path_value: &str) -> PathBuf {
+ let candidate = PathBuf::from(path_value.trim());
+
+ if candidate.is_absolute() {
+ return candidate;
+ }
+
+ project_root().join(candidate)
+}
+
+pub(crate) fn normalize_relative_path(path_value: &str) -> Result {
+ let trimmed_value = path_value.trim();
+
+ if trimmed_value.is_empty() {
+ return Err(String::from("A relative path is required."));
+ }
+
+ let candidate = PathBuf::from(trimmed_value);
+ let mut normalized_path = PathBuf::new();
+
+ for component in candidate.components() {
+ match component {
+ Component::CurDir => {}
+ Component::Normal(segment) => normalized_path.push(segment),
+ Component::ParentDir => {
+ return Err(String::from(
+ "Parent directory traversal is not allowed for document or workspace reads.",
+ ));
+ }
+ Component::Prefix(_) | Component::RootDir => {
+ return Err(String::from("Absolute paths are not allowed here."));
+ }
+ }
+ }
+
+ if normalized_path.as_os_str().is_empty() {
+ return Err(String::from("A relative path is required."));
+ }
+
+ Ok(normalized_path.to_string_lossy().replace('\\', "/"))
+}
+
+pub(crate) fn canonicalize_existing_path(path: &Path) -> std::io::Result {
+ fs::canonicalize(path)
+}
+
+pub(crate) fn resolve_relative_path_under_root(
+ root: &Path,
+ relative_path: &str,
+) -> Result {
+ let normalized_path = normalize_relative_path(relative_path)?;
+ Ok(root.join(normalized_path))
+}
+
+pub(crate) fn resolve_project_document_path(path_value: &str) -> Result {
+ let normalized_path = normalize_relative_path(path_value)?;
+ let project_root = canonicalize_existing_path(&project_root())
+ .map_err(|error| format!("Unable to resolve project root: {error}"))?;
+ let candidate = project_root.join(&normalized_path);
+ let resolved_path = canonicalize_existing_path(&candidate).map_err(|error| {
+ format!("Unable to resolve project document {normalized_path}: {error}")
+ })?;
+
+ if !resolved_path.starts_with(&project_root) {
+ return Err(String::from(
+ "Project document imports must stay inside the repository root.",
+ ));
+ }
+
+ Ok(resolved_path)
+}
diff --git a/src-tauri/src/project.rs b/src-tauri/src/project.rs
new file mode 100644
index 0000000..c8cedf8
--- /dev/null
+++ b/src-tauri/src/project.rs
@@ -0,0 +1,284 @@
+use crate::{
+ constants::{
+ DEFAULT_PRD_PROMPT, DEFAULT_PROJECT_PRD_PATH, DEFAULT_PROJECT_SPEC_PATH,
+ DEFAULT_SPEC_PROMPT, SPECFORGE_SETTINGS_RELATIVE_PATH,
+ },
+ documents::load_configured_workspace_document,
+ models::{ProjectContextPayload, ProjectSettings},
+ paths::{canonicalize_existing_path, normalize_relative_path},
+ state::{ScannedWorkspace, SharedState},
+ workspace::scan_workspace_folder,
+};
+use std::{
+ fs,
+ path::{Path, PathBuf},
+};
+use tauri::State;
+
+#[tauri::command]
+pub(crate) fn pick_project_folder(
+ state: State,
+) -> Result, String> {
+ let Some(folder_path) = rfd::FileDialog::new().pick_folder() else {
+ return Ok(None);
+ };
+
+ load_project_context_from_folder(&state, &folder_path).map(Some)
+}
+
+#[tauri::command]
+pub(crate) fn load_project_context(
+ state: State,
+ folder_path: String,
+) -> Result {
+ let trimmed_path = folder_path.trim();
+
+ if trimmed_path.is_empty() {
+ return Err(String::from("A workspace folder path is required."));
+ }
+
+ load_project_context_from_folder(&state, &PathBuf::from(trimmed_path))
+}
+
+#[tauri::command]
+pub(crate) fn save_project_settings(
+ folder_path: String,
+ settings: ProjectSettings,
+) -> Result {
+ let trimmed_path = folder_path.trim();
+
+ if trimmed_path.is_empty() {
+ return Err(String::from("A workspace folder path is required."));
+ }
+
+ let workspace_root =
+ canonicalize_existing_path(&PathBuf::from(trimmed_path)).map_err(|error| {
+ format!(
+ "Unable to resolve the selected workspace folder {}: {error}",
+ trimmed_path
+ )
+ })?;
+ let default_settings = build_default_project_settings(&workspace_root, None, None);
+ let normalized_settings =
+ normalize_project_settings(&workspace_root, default_settings, Some(settings))?;
+ let settings_path = workspace_root.join(SPECFORGE_SETTINGS_RELATIVE_PATH);
+ let settings_directory = settings_path
+ .parent()
+ .ok_or_else(|| String::from("Unable to resolve the .specforge directory."))?;
+
+ fs::create_dir_all(settings_directory).map_err(|error| {
+ format!(
+ "Unable to create the project settings directory {}: {error}",
+ settings_directory.display()
+ )
+ })?;
+ let settings_json = serde_json::to_string_pretty(&normalized_settings)
+ .map_err(|error| format!("Unable to encode project settings: {error}"))?;
+
+ fs::write(&settings_path, settings_json.as_bytes()).map_err(|error| {
+ format!(
+ "Unable to write project settings to {}: {error}",
+ settings_path.display()
+ )
+ })?;
+
+ Ok(normalized_settings)
+}
+
+pub(crate) fn load_project_context_from_folder(
+ state: &State,
+ folder_path: &Path,
+) -> Result {
+ let scanned_workspace = scan_workspace_folder(folder_path)?;
+ let ScannedWorkspace { result, context } = scanned_workspace;
+ let settings_path = context.root.join(SPECFORGE_SETTINGS_RELATIVE_PATH);
+ let default_settings = build_default_project_settings(
+ &context.root,
+ result.prd_document.as_ref(),
+ result.spec_document.as_ref(),
+ );
+ let (settings, has_saved_settings) =
+ read_project_settings(&settings_path, &context.root, default_settings)?;
+ let prd_document = load_configured_workspace_document(&context.root, &settings.prd_path)?;
+ let spec_document = load_configured_workspace_document(&context.root, &settings.spec_path)?;
+ let mut active_workspace = state
+ .workspace
+ .lock()
+ .map_err(|_| String::from("Workspace lock was poisoned."))?;
+ *active_workspace = Some(context);
+
+ Ok(ProjectContextPayload {
+ root_name: result.root_name,
+ root_path: active_workspace
+ .as_ref()
+ .map(|workspace| workspace.root.display().to_string())
+ .unwrap_or_default(),
+ settings_path: settings_path.display().to_string(),
+ has_saved_settings,
+ settings,
+ entries: result.entries,
+ ignored_file_count: result.ignored_file_count,
+ prd_document,
+ spec_document,
+ })
+}
+
+pub(crate) fn build_default_project_settings(
+ workspace_root: &Path,
+ prd_document: Option<&crate::models::WorkspaceDocument>,
+ spec_document: Option<&crate::models::WorkspaceDocument>,
+) -> ProjectSettings {
+ ProjectSettings {
+ selected_model: String::from("gpt-5.4"),
+ selected_reasoning: String::from("medium"),
+ prd_prompt: String::from(DEFAULT_PRD_PROMPT),
+ spec_prompt: String::from(DEFAULT_SPEC_PROMPT),
+ prd_path: derive_default_document_path(
+ workspace_root,
+ prd_document,
+ DEFAULT_PROJECT_PRD_PATH,
+ ),
+ spec_path: derive_default_document_path(
+ workspace_root,
+ spec_document,
+ DEFAULT_PROJECT_SPEC_PATH,
+ ),
+ supporting_document_paths: Vec::new(),
+ }
+}
+
+pub(crate) fn normalize_project_settings(
+ workspace_root: &Path,
+ defaults: ProjectSettings,
+ provided: Option,
+) -> Result {
+ let Some(provided) = provided else {
+ return Ok(defaults);
+ };
+
+ let selected_model =
+ normalize_project_model(&provided.selected_model, &defaults.selected_model);
+ let selected_reasoning =
+ normalize_project_reasoning(&provided.selected_reasoning, &defaults.selected_reasoning);
+ let normalized_prd_path =
+ normalize_project_path_or_default(workspace_root, &provided.prd_path, &defaults.prd_path)?;
+ let normalized_spec_path = normalize_project_path_or_default(
+ workspace_root,
+ &provided.spec_path,
+ &defaults.spec_path,
+ )?;
+ let supporting_document_paths = provided
+ .supporting_document_paths
+ .iter()
+ .filter_map(|entry| normalize_relative_path(entry).ok())
+ .collect::>();
+
+ Ok(ProjectSettings {
+ selected_model,
+ selected_reasoning,
+ prd_prompt: if provided.prd_prompt.trim().is_empty() {
+ defaults.prd_prompt
+ } else {
+ provided.prd_prompt.trim().to_string()
+ },
+ spec_prompt: if provided.spec_prompt.trim().is_empty() {
+ defaults.spec_prompt
+ } else {
+ provided.spec_prompt.trim().to_string()
+ },
+ prd_path: normalized_prd_path,
+ spec_path: normalized_spec_path,
+ supporting_document_paths,
+ })
+}
+
+pub(crate) fn normalize_project_path_or_default(
+ workspace_root: &Path,
+ value: &str,
+ fallback: &str,
+) -> Result {
+ if value.trim().is_empty() {
+ return Ok(fallback.to_string());
+ }
+
+ normalize_relative_path(value).map_err(|error| {
+ format!(
+ "Invalid project document path for workspace {}: {error}",
+ workspace_root.display()
+ )
+ })
+}
+
+pub(crate) fn derive_default_document_path(
+ workspace_root: &Path,
+ document: Option<&crate::models::WorkspaceDocument>,
+ fallback: &str,
+) -> String {
+ document
+ .and_then(|entry| {
+ PathBuf::from(&entry.source_path)
+ .strip_prefix(workspace_root)
+ .ok()
+ .map(|path| path.to_string_lossy().replace('\\', "/"))
+ })
+ .unwrap_or_else(|| fallback.to_string())
+}
+
+fn read_project_settings(
+ settings_path: &Path,
+ workspace_root: &Path,
+ defaults: ProjectSettings,
+) -> Result<(ProjectSettings, bool), String> {
+ if !settings_path.exists() {
+ return Ok((defaults, false));
+ }
+
+ let raw_settings = fs::read_to_string(settings_path).map_err(|error| {
+ format!(
+ "Unable to read project settings {}: {error}",
+ settings_path.display()
+ )
+ })?;
+ let parsed_settings =
+ serde_json::from_str::(&raw_settings).map_err(|error| {
+ format!(
+ "Unable to parse project settings {}: {error}",
+ settings_path.display()
+ )
+ })?;
+
+ Ok((
+ normalize_project_settings(workspace_root, defaults, Some(parsed_settings))?,
+ true,
+ ))
+}
+
+fn normalize_project_model(value: &str, fallback: &str) -> String {
+ const VALID_MODELS: &[&str] = &[
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.3-codex",
+ "gpt-5.2",
+ "claude-opus-4-1-20250805",
+ "claude-opus-4-20250514",
+ "claude-sonnet-4-20250514",
+ "claude-3-7-sonnet-20250219",
+ "claude-3-5-sonnet-20241022",
+ "claude-3-5-sonnet-20240620",
+ "claude-3-5-haiku-20241022",
+ "claude-3-haiku-20240307",
+ ];
+
+ if VALID_MODELS.contains(&value.trim()) {
+ return value.trim().to_string();
+ }
+
+ fallback.to_string()
+}
+
+fn normalize_project_reasoning(value: &str, fallback: &str) -> String {
+ match value.trim() {
+ "low" | "medium" | "high" | "max" => value.trim().to_string(),
+ _ => fallback.to_string(),
+ }
+}
diff --git a/src-tauri/src/state.rs b/src-tauri/src/state.rs
new file mode 100644
index 0000000..e7d7a5e
--- /dev/null
+++ b/src-tauri/src/state.rs
@@ -0,0 +1,35 @@
+use crate::models::WorkspaceScanResult;
+use std::{
+ collections::HashMap,
+ path::PathBuf,
+ sync::{Arc, Condvar, Mutex},
+};
+
+#[derive(Default)]
+pub(crate) struct SharedState {
+ pub(crate) runtime: Arc,
+ pub(crate) workspace: Mutex>,
+}
+
+#[derive(Default)]
+pub(crate) struct ExecutionRuntime {
+ pub(crate) control: Mutex,
+ pub(crate) signal: Condvar,
+}
+
+#[derive(Default)]
+pub(crate) struct ExecutionControl {
+ pub(crate) run_id: u64,
+ pub(crate) awaiting_approval: bool,
+ pub(crate) stop_requested: bool,
+}
+
+pub(crate) struct WorkspaceContext {
+ pub(crate) root: PathBuf,
+ pub(crate) files: HashMap,
+}
+
+pub(crate) struct ScannedWorkspace {
+ pub(crate) result: WorkspaceScanResult,
+ pub(crate) context: WorkspaceContext,
+}
diff --git a/src-tauri/src/workspace.rs b/src-tauri/src/workspace.rs
new file mode 100644
index 0000000..93c747a
--- /dev/null
+++ b/src-tauri/src/workspace.rs
@@ -0,0 +1,334 @@
+use crate::{
+ documents::parse_workspace_document,
+ models::{WorkspaceDocument, WorkspaceEntry, WorkspaceScanResult},
+ paths::{canonicalize_existing_path, normalize_relative_path, project_root},
+ state::{ScannedWorkspace, SharedState, WorkspaceContext},
+};
+use ignore::WalkBuilder;
+use std::{
+ collections::HashMap,
+ fs,
+ path::{Path, PathBuf},
+};
+use tauri::State;
+
+#[tauri::command]
+pub(crate) fn open_workspace_folder(
+ state: State,
+) -> Result, String> {
+ let Some(folder_path) = rfd::FileDialog::new().pick_folder() else {
+ return Ok(None);
+ };
+
+ let scanned_workspace = scan_workspace_folder(&folder_path)?;
+ let mut active_workspace = state
+ .workspace
+ .lock()
+ .map_err(|_| String::from("Workspace lock was poisoned."))?;
+ *active_workspace = Some(scanned_workspace.context);
+ Ok(Some(scanned_workspace.result))
+}
+
+#[tauri::command]
+pub(crate) fn read_workspace_file(
+ state: State,
+ file_path: String,
+) -> Result {
+ let resolved_path = resolve_workspace_file_path(&state, &file_path)?;
+
+ fs::read_to_string(&resolved_path).map_err(|error| {
+ format!(
+ "Unable to read workspace file {}: {error}",
+ resolved_path.display()
+ )
+ })
+}
+
+#[tauri::command]
+pub(crate) fn get_workspace_snapshot() -> Result, String> {
+ let root = project_root();
+ let directory = fs::read_dir(&root)
+ .map_err(|error| format!("Unable to read workspace root {}: {error}", root.display()))?;
+ let mut entries = directory
+ .filter_map(Result::ok)
+ .map(|entry| {
+ let path = entry.path();
+ let metadata = entry.metadata().ok();
+ let kind = if metadata.as_ref().is_some_and(|item| item.is_dir()) {
+ "directory"
+ } else {
+ "file"
+ };
+
+ WorkspaceEntry {
+ name: entry.file_name().to_string_lossy().to_string(),
+ path: path
+ .strip_prefix(&root)
+ .unwrap_or(path.as_path())
+ .to_string_lossy()
+ .replace('\\', "/"),
+ kind: kind.to_string(),
+ depth: 0,
+ }
+ })
+ .collect::>();
+
+ entries.sort_by(|left, right| left.kind.cmp(&right.kind).then(left.name.cmp(&right.name)));
+ Ok(entries)
+}
+
+pub(crate) fn scan_workspace_folder(root: &Path) -> Result {
+ let canonical_root = canonicalize_existing_path(root).map_err(|error| {
+ format!(
+ "Unable to prepare workspace folder {}: {error}",
+ root.display()
+ )
+ })?;
+ let root_name = canonical_root
+ .file_name()
+ .and_then(|value| value.to_str())
+ .unwrap_or("Workspace")
+ .to_string();
+ let mut directory_entries = HashMap::::new();
+ let mut file_entries = Vec::::new();
+ let mut file_paths = HashMap::::new();
+ let mut file_documents = Vec::<(String, PathBuf)>::new();
+
+ let walker = WalkBuilder::new(&canonical_root)
+ .hidden(false)
+ .git_ignore(true)
+ .git_global(true)
+ .git_exclude(true)
+ .build();
+
+ for item in walker {
+ let entry = match item {
+ Ok(entry) => entry,
+ Err(error) => {
+ return Err(format!(
+ "Unable to walk workspace folder {}: {error}",
+ canonical_root.display()
+ ));
+ }
+ };
+ let path = entry.path();
+
+ if path == canonical_root.as_path() {
+ continue;
+ }
+
+ let relative_path = path
+ .strip_prefix(&canonical_root)
+ .map_err(|error| {
+ format!(
+ "Unable to normalize workspace path {}: {error}",
+ path.display()
+ )
+ })?
+ .to_string_lossy()
+ .replace('\\', "/");
+
+ if relative_path.is_empty() {
+ continue;
+ }
+
+ let depth = relative_path.split('/').count().saturating_sub(1);
+ let file_name = path
+ .file_name()
+ .and_then(|value| value.to_str())
+ .unwrap_or_default()
+ .to_string();
+
+ if entry
+ .file_type()
+ .map(|file_type| file_type.is_dir())
+ .unwrap_or(false)
+ {
+ directory_entries.insert(
+ relative_path.clone(),
+ WorkspaceEntry {
+ name: file_name,
+ path: relative_path,
+ kind: String::from("directory"),
+ depth,
+ },
+ );
+ continue;
+ }
+
+ for (index, segment) in relative_path
+ .split('/')
+ .collect::>()
+ .iter()
+ .enumerate()
+ {
+ if index == relative_path.split('/').count() - 1 {
+ break;
+ }
+
+ let directory_path = relative_path
+ .split('/')
+ .take(index + 1)
+ .collect::>()
+ .join("/");
+
+ directory_entries
+ .entry(directory_path.clone())
+ .or_insert_with(|| WorkspaceEntry {
+ name: (*segment).to_string(),
+ path: directory_path,
+ kind: String::from("directory"),
+ depth: index,
+ });
+ }
+
+ file_entries.push(WorkspaceEntry {
+ name: file_name.clone(),
+ path: relative_path.clone(),
+ kind: String::from("file"),
+ depth,
+ });
+ file_paths.insert(relative_path.clone(), path.to_path_buf());
+ file_documents.push((relative_path, path.to_path_buf()));
+ }
+
+ let mut entries = directory_entries
+ .into_values()
+ .chain(file_entries)
+ .collect::>();
+ entries.sort_by(compare_workspace_entries);
+
+ let prd_document = pick_workspace_document(&file_documents, &["prd.md", "prd.pdf"])?;
+ let spec_document = pick_workspace_document(&file_documents, &["spec.md", "spec.pdf"])?;
+
+ Ok(ScannedWorkspace {
+ result: WorkspaceScanResult {
+ root_name,
+ entries,
+ ignored_file_count: 0,
+ prd_document,
+ spec_document,
+ },
+ context: WorkspaceContext {
+ root: canonical_root,
+ files: file_paths,
+ },
+ })
+}
+
+pub(crate) fn compare_workspace_entries(
+ left: &WorkspaceEntry,
+ right: &WorkspaceEntry,
+) -> std::cmp::Ordering {
+ let left_segments = left.path.split('/').collect::>();
+ let right_segments = right.path.split('/').collect::>();
+ let shared_length = left_segments.len().min(right_segments.len());
+
+ for index in 0..shared_length {
+ if left_segments[index] != right_segments[index] {
+ let left_is_directory = index < left_segments.len() - 1 || left.kind == "directory";
+ let right_is_directory = index < right_segments.len() - 1 || right.kind == "directory";
+
+ if left_is_directory != right_is_directory {
+ return if left_is_directory {
+ std::cmp::Ordering::Less
+ } else {
+ std::cmp::Ordering::Greater
+ };
+ }
+
+ return left_segments[index].cmp(right_segments[index]);
+ }
+ }
+
+ if left_segments.len() != right_segments.len() {
+ return left_segments.len().cmp(&right_segments.len());
+ }
+
+ if left.kind != right.kind {
+ return if left.kind == "directory" {
+ std::cmp::Ordering::Less
+ } else {
+ std::cmp::Ordering::Greater
+ };
+ }
+
+ left.path.cmp(&right.path)
+}
+
+pub(crate) fn pick_workspace_document(
+ files: &[(String, PathBuf)],
+ expected_names: &[&str],
+) -> Result, String> {
+ let mut ranked_files = files
+ .iter()
+ .filter_map(|(relative_path, absolute_path)| {
+ let file_name = absolute_path
+ .file_name()
+ .and_then(|value| value.to_str())
+ .map(|value| value.to_lowercase())?;
+
+ expected_names
+ .iter()
+ .position(|expected_name| *expected_name == file_name)
+ .map(|rank| (rank, relative_path, absolute_path))
+ })
+ .collect::>();
+
+ ranked_files.sort_by(|left, right| {
+ left.0
+ .cmp(&right.0)
+ .then(relative_path_depth(left.1).cmp(&relative_path_depth(right.1)))
+ .then(left.1.cmp(right.1))
+ });
+
+ ranked_files
+ .into_iter()
+ .next()
+ .map(|(_, relative_path, absolute_path)| {
+ parse_workspace_document(absolute_path).map(|content| WorkspaceDocument {
+ content,
+ source_path: absolute_path.display().to_string(),
+ file_name: relative_path
+ .rsplit('/')
+ .next()
+ .unwrap_or(relative_path)
+ .to_string(),
+ })
+ })
+ .transpose()
+}
+
+pub(crate) fn resolve_workspace_file_path(
+ state: &State,
+ file_path: &str,
+) -> Result {
+ let normalized_path = normalize_relative_path(file_path)?;
+ let workspace = state
+ .workspace
+ .lock()
+ .map_err(|_| String::from("Workspace lock was poisoned."))?;
+ let active_workspace = workspace
+ .as_ref()
+ .ok_or_else(|| String::from("No workspace folder is currently open."))?;
+ let resolved_path = active_workspace
+ .files
+ .get(&normalized_path)
+ .ok_or_else(|| {
+ format!("The file {normalized_path} is not part of the active workspace.")
+ })?;
+ let canonical_path = canonicalize_existing_path(resolved_path)
+ .map_err(|error| format!("Unable to resolve workspace file {normalized_path}: {error}"))?;
+
+ if !canonical_path.starts_with(&active_workspace.root) {
+ return Err(format!(
+ "Workspace file {normalized_path} resolved outside the active workspace root."
+ ));
+ }
+
+ Ok(canonical_path)
+}
+
+pub(crate) fn relative_path_depth(path: &str) -> usize {
+ path.split('/').count()
+}
diff --git a/src/App.tsx b/src/App.tsx
index b5771f1..eb4056d 100644
--- a/src/App.tsx
+++ b/src/App.tsx
@@ -1231,7 +1231,9 @@ function App() {
return;
}
- applyProjectContext(context);
+ applyProjectContext(context, {
+ navigateToReview: context.hasSavedSettings
+ });
})
.catch(() => {
if (isDisposed) {
From cafcf5e408e089d0d1c4e51a27eae83824de0e52 Mon Sep 17 00:00:00 2001
From: matheusBBarni
Date: Sat, 11 Apr 2026 11:18:35 -0300
Subject: [PATCH 03/32] Add tauri dev script and require Context7 for docs
---
AGENTS.md | 1 +
package.json | 3 ++-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/AGENTS.md b/AGENTS.md
index a9f8453..03cc9bd 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -20,6 +20,7 @@
- MUST run `cargo check --manifest-path .\src-tauri\Cargo.toml` after changing Rust commands or shared payload types.
- MUST run `bun run build` after changing routes, stores, document loading, or shared UI contracts. If Bun reports broken shims first, repair them with `bun install --force`.
- MUST extract new frontend behavior out of `src/App.tsx` when possible; it is already the main orchestration shell.
+- MUST use context7 mcp server for all documentation lookups.
## Ask First
- Ask before changing autonomy defaults, approval-gate behavior, or the emergency-stop semantics.
diff --git a/package.json b/package.json
index 2b05521..e7a6e70 100644
--- a/package.json
+++ b/package.json
@@ -6,6 +6,7 @@
"packageManager": "bun@1.3.6",
"scripts": {
"dev": "vite",
+ "tauri:dev": "tauri dev",
"build": "tsc && vite build",
"typecheck": "tsc --noEmit",
"preview": "vite preview",
@@ -32,4 +33,4 @@
"typescript": "^5.9.0",
"vite": "^7.0.0"
}
-}
+}
\ No newline at end of file
From 14c9c3993c3c93bc0d4012c1b43758e960fd4303 Mon Sep 17 00:00:00 2001
From: matheusBBarni
Date: Sun, 12 Apr 2026 00:28:21 -0300
Subject: [PATCH 04/32] Polish settings layout and unify workspace path display
---
src/App.tsx | 29 ++-
src/components/AppRail.tsx | 8 +-
src/components/MainWorkspace.tsx | 36 +---
src/components/ProjectAiSettingsCard.tsx | 156 +++++++-------
src/components/ProjectDocumentsCard.tsx | 125 ++++++-----
src/components/SettingsPrimitives.tsx | 149 +++++++++++++
src/components/SettingsView.tsx | 255 +++++++++++------------
src/lib/projectConfig.ts | 21 ++
src/screens/ConfigurationScreen.tsx | 203 +++++++++---------
src/screens/PrdScreen.tsx | 6 +-
src/screens/SettingsScreen.tsx | 9 +-
11 files changed, 565 insertions(+), 432 deletions(-)
create mode 100644 src/components/SettingsPrimitives.tsx
diff --git a/src/App.tsx b/src/App.tsx
index eb4056d..11ccf3b 100644
--- a/src/App.tsx
+++ b/src/App.tsx
@@ -39,6 +39,7 @@ import {
DEFAULT_PROJECT_SPEC_PATH,
SPECFORGE_SETTINGS_RELATIVE_PATH,
formatSupportingDocumentPaths,
+ getWorkspaceDisplayPath,
normalizeProjectRelativePath,
normalizeProjectSettings,
parseSupportingDocumentPaths
@@ -247,15 +248,11 @@ function App() {
);
const configPathDisplay = useMemo(() => {
if (projectConfigPath.trim()) {
- return projectConfigPath;
- }
-
- if (projectRootPath.trim()) {
- return `${projectRootPath.replace(/\\/g, "/")}/${SPECFORGE_SETTINGS_RELATIVE_PATH}`;
+ return getWorkspaceDisplayPath(projectConfigPath, projectRootName);
}
return SPECFORGE_SETTINGS_RELATIVE_PATH;
- }, [projectConfigPath, projectRootPath]);
+ }, [projectConfigPath, projectRootName]);
const supportingDocumentsValue = useMemo(
() => formatSupportingDocumentPaths(supportingDocumentPaths),
[supportingDocumentPaths]
@@ -457,8 +454,8 @@ function App() {
setSpecGenerationError("");
setProjectStatusMessage(
context.hasSavedSettings
- ? `Loaded project settings from ${context.settingsPath}.`
- : `Selected ${context.rootName}. Save the setup to create ${context.settingsPath}.`
+ ? `Loaded project settings from ${context.rootName}/${getWorkspaceDisplayPath(context.settingsPath, context.rootName)}.`
+ : `Selected ${context.rootName}. Save the setup to create ${context.rootName}/${getWorkspaceDisplayPath(context.settingsPath, context.rootName)}.`
);
setProjectErrorMessage("");
setWorkspaceNotice(buildWorkspaceNotice(context));
@@ -517,7 +514,11 @@ function App() {
setProjectSettings(savedSettings);
setHasSavedProjectSettings(true);
- setProjectStatusMessage(`Saved project settings to ${configPathDisplay}.`);
+ setProjectStatusMessage(
+ projectRootName
+ ? `Saved project settings to ${projectRootName}/${configPathDisplay}.`
+ : `Saved project settings to ${configPathDisplay}.`
+ );
if (reloadProject || navigateToReview) {
const reloadedContext = await loadProjectContext(projectRootPath);
@@ -536,6 +537,7 @@ function App() {
configPathDisplay,
currentProjectSettings,
desktopRuntime,
+ projectRootName,
projectRootPath,
setProjectSettings
]
@@ -580,13 +582,8 @@ function App() {
return;
}
- applyProjectContext(nextProjectContext, {
- navigateToReview: nextProjectContext.hasSavedSettings
- });
-
- if (!nextProjectContext.hasSavedSettings) {
- navigate("/");
- }
+ applyProjectContext(nextProjectContext);
+ navigate("/");
} catch (error) {
setProjectErrorMessage(
error instanceof Error ? error.message : "Unable to open the selected project folder."
diff --git a/src/components/AppRail.tsx b/src/components/AppRail.tsx
index ea078a2..beb7d2d 100644
--- a/src/components/AppRail.tsx
+++ b/src/components/AppRail.tsx
@@ -18,9 +18,11 @@ export function AppRail({ hasProjectConfigured }: AppRailProps) {
SF
-
-
-
+ {!hasProjectConfigured ? (
+
+
+
+ ) : null}
{hasProjectConfigured ? (
diff --git a/src/components/MainWorkspace.tsx b/src/components/MainWorkspace.tsx
index f432bb5..b4933b8 100644
--- a/src/components/MainWorkspace.tsx
+++ b/src/components/MainWorkspace.tsx
@@ -4,6 +4,7 @@ import {
} from "iconoir-react";
import { memo, useEffect, useMemo, type ChangeEvent } from "react";
+import { getWorkspaceDisplayPath } from "../lib/projectConfig";
import { DocumentActionBar } from "./DocumentActionBar";
import { DocumentEmptyState } from "./DocumentEmptyState";
import { DocumentPane } from "./DocumentPane";
@@ -116,11 +117,11 @@ export const MainWorkspace = memo(function MainWorkspace({
[activeTab, openEditorTabs]
);
const displayPrdPath = useMemo(
- () => getDisplayDocumentPath(prdPath, workspaceRootName),
+ () => getWorkspaceDisplayPath(prdPath, workspaceRootName),
[prdPath, workspaceRootName]
);
const displaySpecPath = useMemo(
- () => getDisplayDocumentPath(specPath, workspaceRootName),
+ () => getWorkspaceDisplayPath(specPath, workspaceRootName),
[specPath, workspaceRootName]
);
const hasPrdContent = prdContent.trim().length > 0;
@@ -170,7 +171,7 @@ export const MainWorkspace = memo(function MainWorkspace({
}, [activeEditorTab, onEditorTabClose]);
return (
-
+
-
+
@@ -214,7 +215,7 @@ export const MainWorkspace = memo(function MainWorkspace({
)}
-
+
@@ -275,8 +276,8 @@ export const MainWorkspace = memo(function MainWorkspace({
/>
) : activeEditorTab ? (
-
-
+
+
Workspace File
@@ -287,7 +288,7 @@ export const MainWorkspace = memo(function MainWorkspace({
onEditorTabChange(activeEditorTab.path, event.target.value)}
value={activeEditorTab.content}
/>
@@ -315,25 +316,6 @@ export const MainWorkspace = memo(function MainWorkspace({
);
});
-function getDisplayDocumentPath(path: string, workspaceRootName: string) {
- const normalizedPath = path.replace(/\\/g, "/");
-
- if (!workspaceRootName) {
- return normalizedPath;
- }
-
- const segments = normalizedPath.split("/").filter(Boolean);
- const rootIndex = segments.findIndex(
- (segment) => segment.toLowerCase() === workspaceRootName.toLowerCase()
- );
-
- if (rootIndex >= 0 && rootIndex < segments.length - 1) {
- return segments.slice(rootIndex + 1).join("/");
- }
-
- return normalizedPath;
-}
-
const HEADER_ACTION_BUTTON_CLASS =
"inline-flex items-center justify-center gap-2 rounded-[1rem] border border-[var(--border-soft)] bg-white/5 px-4 py-3 font-medium text-[var(--text-main)] transition hover:-translate-y-0.5 hover:bg-white/8";
diff --git a/src/components/ProjectAiSettingsCard.tsx b/src/components/ProjectAiSettingsCard.tsx
index 718392c..839cf45 100644
--- a/src/components/ProjectAiSettingsCard.tsx
+++ b/src/components/ProjectAiSettingsCard.tsx
@@ -1,11 +1,25 @@
+import {
+ Card,
+ TextArea
+} from "@heroui/react";
import { Brain, Spark } from "iconoir-react";
import { memo } from "react";
import { getModelOptions, getReasoningOptions } from "../lib/agentConfig";
+import {
+ ScopedPathReference,
+ SETTINGS_PANEL_CLASS,
+ SETTINGS_SURFACE_CLASS,
+ SettingsSectionHeader,
+ SettingsSelectField,
+ FIELD_LABEL_CLASS,
+ TEXTAREA_CLASS
+} from "./SettingsPrimitives";
import type { ModelId, ReasoningProfileId } from "../types";
interface ProjectAiSettingsCardProps {
configPath: string;
+ workspaceRootName: string;
selectedModel: ModelId;
selectedReasoning: ReasoningProfileId;
prdPrompt: string;
@@ -18,6 +32,7 @@ interface ProjectAiSettingsCardProps {
export const ProjectAiSettingsCard = memo(function ProjectAiSettingsCard({
configPath,
+ workspaceRootName,
selectedModel,
selectedReasoning,
prdPrompt,
@@ -31,96 +46,65 @@ export const ProjectAiSettingsCard = memo(function ProjectAiSettingsCard({
const reasoningOptions = getReasoningOptions(selectedModel);
return (
-
-
-
-
- AI Defaults
-
-
-
-
-
- Default model
- onModelChange(event.target.value as ModelId)}
- value={selectedModel}
- >
- {modelOptions.map((option) => (
-
- {option.label}
-
- ))}
-
-
-
-
- Reasoning profile
- onReasoningChange(event.target.value as ReasoningProfileId)}
- value={selectedReasoning}
- >
- {reasoningOptions.map((option) => (
-
- {option.label}
-
- ))}
-
-
-
-
-
-
- Default PRD prompt
- onPrdPromptChange(event.target.value)}
- value={prdPrompt}
+
+
+ } title="AI Defaults" />
+
+
-
- Saved in {configPath || ".specforge/settings.json"}.
-
-
-
- Default spec prompt
- onSpecPromptChange(event.target.value)}
- value={specPrompt}
+
-
- Saved in {configPath || ".specforge/settings.json"}.
-
-
-
-
-
-
-
-
- The empty-state prompt fields append the user note after these saved defaults before
- the selected CLI is invoked.
-
-
-
- );
-});
-const PANEL_CLASS =
- "grid gap-4 rounded-[1.5rem] border border-[var(--border-strong)] bg-[var(--bg-panel)] p-5 shadow-[var(--shadow)] backdrop-blur-[30px]";
+
+
+ Default PRD prompt
+ onPrdPromptChange(event.target.value)}
+ value={prdPrompt}
+ />
+ Saved in}
+ workspaceRootName={workspaceRootName}
+ />
+
-const FIELD_LABEL_CLASS =
- "text-sm font-medium leading-6 text-[var(--text-subtle)]";
-
-const INPUT_CLASS =
- "w-full rounded-[1rem] border border-[var(--border-soft)] bg-black/20 px-4 py-3 text-[15px] text-[var(--text-main)] outline-none transition focus:border-[var(--accent)]";
-
-const TEXTAREA_CLASS =
- "min-h-[12rem] w-full resize-y rounded-[1rem] border border-[var(--border-soft)] bg-black/20 px-4 py-4 font-[var(--font-mono)] text-[15px] leading-6 text-[var(--text-main)] outline-none transition focus:border-[var(--accent)]";
+
+ Default spec prompt
+ onSpecPromptChange(event.target.value)}
+ value={specPrompt}
+ />
+ Saved in}
+ workspaceRootName={workspaceRootName}
+ />
+
+
-const HELPER_CLASS =
- "m-0 text-sm leading-6 text-[var(--text-subtle)]";
+
+
+
+
+ The empty-state prompt fields append the user note after these saved defaults before
+ the selected CLI is invoked.
+
+
+
+
+
+ );
+});
diff --git a/src/components/ProjectDocumentsCard.tsx b/src/components/ProjectDocumentsCard.tsx
index d02c84e..9c286b6 100644
--- a/src/components/ProjectDocumentsCard.tsx
+++ b/src/components/ProjectDocumentsCard.tsx
@@ -1,6 +1,21 @@
+import {
+ Card,
+ Input,
+ TextArea
+} from "@heroui/react";
import { Database, Folder } from "iconoir-react";
import { memo } from "react";
+import {
+ FIELD_LABEL_CLASS,
+ INPUT_CLASS,
+ ScopedPathReference,
+ SETTINGS_PANEL_CLASS,
+ SETTINGS_SURFACE_CLASS,
+ SettingsSectionHeader,
+ TEXTAREA_CLASS
+} from "./SettingsPrimitives";
+
interface ProjectDocumentsCardProps {
configPath: string;
workspaceRootName: string;
@@ -23,76 +38,60 @@ export const ProjectDocumentsCard = memo(function ProjectDocumentsCard({
onSupportingDocumentsChange
}: ProjectDocumentsCardProps) {
return (
-
-
-
-
- Document Paths
-
-
+
+
+ } title="Document Paths" />
+
+ Paths are stored relative to the active workspace in
+
+
-
- Paths are stored relative to {workspaceRootName || "the selected workspace"}{" "}
- in {configPath || ".specforge/settings.json"}.
-
+
-
- Additional documents
- onSupportingDocumentsChange(event.target.value)}
- placeholder={"docs/notes/constraints.md\ndocs/research/api.md"}
- value={supportingDocumentsValue}
- />
-
- Add one relative path per line for any extra references you want to keep with the
- project.
-
-
-
-
-
-
-
- Generated AI documents are written to the configured PRD and SPEC paths. Configure
- Markdown targets if you want the generated output saved back into the workspace.
-
+
+
+
+
+ Generated AI documents are written to the configured PRD and SPEC paths. Configure
+ Markdown targets if you want the generated output saved back into the workspace.
+
+
-
-
+
+
);
});
-
-const PANEL_CLASS =
- "grid gap-4 rounded-[1.5rem] border border-[var(--border-strong)] bg-[var(--bg-panel)] p-5 shadow-[var(--shadow)] backdrop-blur-[30px]";
-
-const FIELD_LABEL_CLASS =
- "text-sm font-medium leading-6 text-[var(--text-subtle)]";
-
-const INPUT_CLASS =
- "w-full rounded-[1rem] border border-[var(--border-soft)] bg-black/20 px-4 py-3 text-[15px] text-[var(--text-main)] outline-none transition focus:border-[var(--accent)]";
-
-const TEXTAREA_CLASS =
- "min-h-[8rem] w-full resize-y rounded-[1rem] border border-[var(--border-soft)] bg-black/20 px-4 py-4 font-[var(--font-mono)] text-[15px] leading-6 text-[var(--text-main)] outline-none transition focus:border-[var(--accent)]";
diff --git a/src/components/SettingsPrimitives.tsx b/src/components/SettingsPrimitives.tsx
new file mode 100644
index 0000000..ce1d8a6
--- /dev/null
+++ b/src/components/SettingsPrimitives.tsx
@@ -0,0 +1,149 @@
+import {
+ Label,
+ ListBox,
+ Select
+} from "@heroui/react";
+import { useCallback, type Key, type ReactNode } from "react";
+
+import type { SelectOption } from "../lib/agentConfig";
+
+interface SettingsSelectFieldProps
{
+ label: string;
+ selectedKey: Value;
+ options: Array>;
+ onSelectionChange: (value: Value) => void;
+}
+
+interface ScopedPathReferenceProps {
+ path: string;
+ workspaceRootName?: string;
+ prefix?: ReactNode;
+ fallbackPath?: string;
+}
+
+interface SettingsSectionHeaderProps {
+ icon: ReactNode;
+ title: string;
+ description?: ReactNode;
+}
+
+export function SettingsSelectField({
+ label,
+ selectedKey,
+ options,
+ onSelectionChange
+}: SettingsSelectFieldProps) {
+ const handleSelectionChange = useCallback(
+ (key: Key | null) => {
+ if (key !== null) {
+ onSelectionChange(String(key) as Value);
+ }
+ },
+ [onSelectionChange]
+ );
+
+ return (
+
+ {label}
+
+
+
+
+
+
+ {options.map((option) => (
+
+
+ {option.label}
+ {option.hint ? (
+
+ {option.hint}
+
+ ) : null}
+
+
+ ))}
+
+
+
+ );
+}
+
+export function ScopedPathReference({
+ path,
+ workspaceRootName,
+ prefix,
+ fallbackPath = ".specforge/settings.json"
+}: ScopedPathReferenceProps) {
+ const displayPath = path.trim() || fallbackPath;
+
+ return (
+
+ {prefix}
+ {workspaceRootName ? (
+
+ {workspaceRootName}
+
+ ) : null}
+
+ {displayPath}
+
+
+ );
+}
+
+export function SettingsSectionHeader({
+ icon,
+ title,
+ description
+}: SettingsSectionHeaderProps) {
+ return (
+
+
{icon}
+
+
+ {title}
+
+ {description ? (
+
{description}
+ ) : null}
+
+
+ );
+}
+
+export const SETTINGS_PANEL_CLASS =
+ "border border-[var(--border-strong)] bg-[var(--bg-panel)] shadow-[var(--shadow)] backdrop-blur-[30px]";
+
+export const SETTINGS_SURFACE_CLASS =
+ "rounded-[1rem] border border-[var(--border-soft)] bg-[var(--bg-surface)]/85";
+
+export const FIELD_LABEL_CLASS =
+ "text-sm font-medium leading-6 text-[var(--text-subtle)]";
+
+export const INPUT_CLASS =
+ "w-full rounded-[1rem] border border-[var(--border-soft)] bg-black/15 px-4 py-3 text-[15px] text-[var(--text-main)] outline-none transition placeholder:text-[var(--text-muted)] focus:border-[var(--accent)]";
+
+export const TEXTAREA_CLASS =
+ "min-h-[10rem] w-full resize-y rounded-[1rem] border border-[var(--border-soft)] bg-black/15 px-4 py-4 font-[var(--font-mono)] text-[15px] leading-6 text-[var(--text-main)] outline-none transition placeholder:text-[var(--text-muted)] focus:border-[var(--accent)]";
+
+export const SELECT_TRIGGER_CLASS =
+ "min-h-[3rem] rounded-[1rem] border border-[var(--border-soft)] bg-black/15 px-4 text-[var(--text-main)] transition focus:border-[var(--accent)]";
+
+export const LISTBOX_ITEM_CLASS =
+ "cursor-pointer rounded-[0.95rem] px-3 py-3 text-[var(--text-main)] outline-none transition data-[focused=true]:bg-white/8";
+
+export const SECONDARY_BUTTON_CLASS =
+ "inline-flex items-center justify-center gap-2 rounded-[1rem] border border-[var(--border-soft)] bg-white/5 px-4 py-3 font-medium text-[var(--text-main)] transition hover:-translate-y-0.5 hover:bg-white/8";
+
+export const PRIMARY_BUTTON_CLASS =
+ "inline-flex items-center justify-center gap-2 rounded-[1rem] border-0 bg-[linear-gradient(135deg,var(--accent),#ff79c6)] px-4 py-3 font-semibold text-[#15131c] transition hover:-translate-y-0.5 hover:opacity-95";
diff --git a/src/components/SettingsView.tsx b/src/components/SettingsView.tsx
index adea090..909f8f3 100644
--- a/src/components/SettingsView.tsx
+++ b/src/components/SettingsView.tsx
@@ -1,5 +1,9 @@
import {
- CheckCircle,
+ Button,
+ Card,
+ Input
+} from "@heroui/react";
+import {
Database,
SunLight,
Terminal
@@ -9,6 +13,14 @@ import { memo } from "react";
import { CliHealthCard } from "./CliHealthCard";
import { ProjectAiSettingsCard } from "./ProjectAiSettingsCard";
import { ProjectDocumentsCard } from "./ProjectDocumentsCard";
+import {
+ FIELD_LABEL_CLASS,
+ INPUT_CLASS,
+ ScopedPathReference,
+ SETTINGS_PANEL_CLASS,
+ SETTINGS_SURFACE_CLASS,
+ SettingsSectionHeader
+} from "./SettingsPrimitives";
import type {
EnvironmentStatus,
ModelId,
@@ -76,96 +88,90 @@ export const SettingsView = memo(function SettingsView({
}: SettingsViewProps) {
return (
-
-
-
- Settings
-
-
- Machine and Project Preferences
-
-
- CLI overrides and theme stay local to this machine. Prompt templates, AI defaults, and
- document paths are saved inside the selected project at{" "}
- {configPath || ".specforge/settings.json"}.
-
- {projectStatusMessage ? (
-
- {projectStatusMessage}
+
+
+
+
+ Settings
- ) : null}
- {projectErrorMessage ? (
-
- {projectErrorMessage}
+
+ Machine and Project Preferences
+
+
+ CLI overrides and theme stay local to this machine. Prompt templates, AI defaults,
+ and document paths are saved inside the selected project.
- ) : null}
-
-
+
+ {projectStatusMessage ? (
+
+ {projectStatusMessage}
+
+ ) : null}
+ {projectErrorMessage ? (
+
+ {projectErrorMessage}
+
+ ) : null}
+
+
+
-
-
-
- Claude CLI
-
-
-
-
- Binary path override
+
+
+ } title="Claude CLI" />
+
+
+ Binary path override
+ onClaudePathChange(event.target.value)}
+ placeholder="Optional manual path"
+ value={claudePath}
+ />
- onClaudePathChange(event.target.value)}
- placeholder="Optional manual path"
- value={claudePath}
- />
-
-
-
-
-
-
- Codex CLI
-
-
-
-
- Binary path override
+
+
+
+
+
+ } title="Codex CLI" />
+
+
+ Binary path override
+ onCodexPathChange(event.target.value)}
+ placeholder="Optional manual path"
+ value={codexPath}
+ />
- onCodexPathChange(event.target.value)}
- placeholder="Optional manual path"
- value={codexPath}
- />
-
-
-
-
-
-
- Theme
-
-
- {[
- { id: "dracula", label: "Dracula", meta: "Primary dark IDE theme" },
- { id: "light", label: "Light", meta: "High-contrast daylight palette" },
- { id: "system", label: "System", meta: "Follow the OS appearance" }
- ].map((entry) => (
- onThemeChange(entry.id as ThemeMode)}
- type="button"
- >
- {entry.label}
- {entry.meta}
-
- ))}
-
-
+
+
+
+
+
+ } title="Theme" />
+
+ {[
+ { id: "dracula", label: "Dracula", meta: "Primary dark IDE theme" },
+ { id: "light", label: "Light", meta: "High-contrast daylight palette" },
+ { id: "system", label: "System", meta: "Follow the OS appearance" }
+ ].map((entry) => (
+ onThemeChange(entry.id as ThemeMode)}
+ >
+ {entry.label}
+ {entry.meta}
+
+ ))}
+
+
+
-
-
-
-
- Workspace Notes
-
-
-
-
Project-specific AI settings live inside the selected workspace.
-
Manual PRD/spec edits still remain in-memory until a generate action writes a file.
-
CLI overrides and theme remain machine-local and do not touch `.specforge/settings.json`.
-
-
-
- {annotations.length > 0 ? (
-
-
-
-
- Workspace Notes
-
-
-
- {annotations.map((annotation) => (
-
-
- {annotation.title}
-
-
- {annotation.body}
-
-
- ))}
+
+
+ }
+ title="Workspace Notes"
+ />
+
+
Project-specific AI settings live inside the selected workspace.
+
Manual PRD/spec edits still remain in-memory until a generate action writes a file.
+
CLI overrides and theme remain machine-local and do not touch `.specforge/settings.json`.
-
- ) : null}
+
+ {annotations.length > 0 ? (
+
+ {annotations.map((annotation) => (
+
+
+ {annotation.title}
+
+
+ {annotation.body}
+
+
+ ))}
+
+ ) : null}
+
+
);
@@ -244,22 +243,10 @@ function getAnnotationClassName(tone: "info" | "warning" | "success") {
return `grid gap-2 rounded-[1rem] border ${toneClass} bg-[var(--bg-surface)] p-4`;
}
-const PANEL_CLASS =
- "grid gap-4 rounded-[1.5rem] border border-[var(--border-strong)] bg-[var(--bg-panel)] p-5 shadow-[var(--shadow)] backdrop-blur-[30px]";
-
-const LIST_CLASS =
- "grid gap-2 rounded-[1rem] border border-[var(--border-soft)] bg-[var(--bg-surface)] px-4 py-4 font-[var(--font-mono)] text-sm text-[var(--text-main)]";
-
-const FIELD_LABEL_CLASS =
- "text-sm font-medium leading-6 text-[var(--text-subtle)]";
-
-const INPUT_CLASS =
- "w-full rounded-[1rem] border border-[var(--border-soft)] bg-black/20 px-4 py-3 text-[15px] text-[var(--text-main)] placeholder:text-[var(--text-muted)]";
-
const OPTION_CARD_CLASS =
- "grid gap-1 rounded-[1rem] border border-[var(--border-soft)] bg-[var(--bg-surface)] px-4 py-4 text-left text-[var(--text-main)] transition hover:-translate-y-0.5 hover:border-[rgba(189,147,249,0.34)]";
+ "flex h-full w-full flex-col items-start justify-start gap-1 rounded-[1rem] border border-[var(--border-soft)] bg-[var(--bg-surface)] px-4 py-4 text-left text-[var(--text-main)] transition hover:-translate-y-0.5 hover:border-[rgba(189,147,249,0.34)]";
const ACTIVE_OPTION_CARD_CLASS =
- "grid gap-1 rounded-[1rem] border border-[rgba(189,147,249,0.42)] bg-[linear-gradient(135deg,rgba(189,147,249,0.18),rgba(139,233,253,0.08)),var(--bg-surface)] px-4 py-4 text-left text-[var(--text-main)] transition";
+ "flex h-full w-full flex-col items-start justify-start gap-1 rounded-[1rem] border border-[rgba(189,147,249,0.42)] bg-[linear-gradient(135deg,rgba(189,147,249,0.18),rgba(139,233,253,0.08)),var(--bg-surface)] px-4 py-4 text-left text-[var(--text-main)] transition";
export default SettingsView;
diff --git a/src/lib/projectConfig.ts b/src/lib/projectConfig.ts
index b87ec2f..3f12a5e 100644
--- a/src/lib/projectConfig.ts
+++ b/src/lib/projectConfig.ts
@@ -105,6 +105,27 @@ export function parseSupportingDocumentPaths(value: string) {
return normalizeSupportingDocumentPaths(value.split(/\r?\n/));
}
+export function getWorkspaceDisplayPath(path: string, workspaceRootName?: string) {
+ const normalizedPath = path
+ .replace(/\\/g, "/")
+ .replace(/^\/{2}\?\//, "");
+
+ if (!workspaceRootName) {
+ return normalizedPath;
+ }
+
+ const segments = normalizedPath.split("/").filter(Boolean);
+ const rootIndex = segments.findIndex(
+ (segment) => segment.toLowerCase() === workspaceRootName.toLowerCase()
+ );
+
+ if (rootIndex >= 0 && rootIndex < segments.length - 1) {
+ return segments.slice(rootIndex + 1).join("/");
+ }
+
+ return normalizedPath;
+}
+
function isModelId(value?: string | null): value is ModelId {
return Boolean(value && VALID_MODEL_IDS.has(value as ModelId));
}
diff --git a/src/screens/ConfigurationScreen.tsx b/src/screens/ConfigurationScreen.tsx
index f2c9832..9ae90a9 100644
--- a/src/screens/ConfigurationScreen.tsx
+++ b/src/screens/ConfigurationScreen.tsx
@@ -1,8 +1,19 @@
+import {
+ Button,
+ Card,
+ Input
+} from "@heroui/react";
import { Folder, Refresh, Terminal } from "iconoir-react";
import { CliHealthCard } from "../components/CliHealthCard";
import { ProjectAiSettingsCard } from "../components/ProjectAiSettingsCard";
import { ProjectDocumentsCard } from "../components/ProjectDocumentsCard";
+import {
+ PRIMARY_BUTTON_CLASS,
+ SECONDARY_BUTTON_CLASS,
+ SETTINGS_PANEL_CLASS,
+ SETTINGS_SURFACE_CLASS
+} from "../components/SettingsPrimitives";
import type { EnvironmentStatus, ModelId, ReasoningProfileId } from "../types";
interface ConfigurationScreenProps {
@@ -73,96 +84,107 @@ export function ConfigurationScreen({
onSupportingDocumentsChange
}: ConfigurationScreenProps) {
const canContinue = desktopRuntime && workspaceRootPath.length > 0 && !isSaving;
+ const folderActionLabel = isProjectLoading
+ ? "Opening..."
+ : hasSavedSettings
+ ? "Open New Folder"
+ : "Select Folder";
return (
-
-
-
-
- Project Setup
-
-
- Configure SpecForge Before Review Starts
-
-
- Choose the project folder, verify the available CLIs, set the default AI prompts
- and model behavior, and point SpecForge at the PRD/spec files you want this
- workspace to use.
-
-
+
+
+
+
+
+ Project Setup
+
+
+ Configure SpecForge Before Review Starts
+
+
+ Choose the project folder, verify the available CLIs, set the default AI prompts
+ and model behavior, and point SpecForge at the PRD/spec files you want this
+ workspace to use.
+
+
-
-
- Refresh
-
-
+
+
+ Refresh
+
+
- {statusMessage ? (
- {statusMessage}
- ) : null}
- {errorMessage ? (
- {errorMessage}
- ) : null}
-
+ {statusMessage ? (
+
{statusMessage}
+ ) : null}
+ {errorMessage ? (
+
{errorMessage}
+ ) : null}
+
+
-
-
+
+
+
-
-
-
- {isProjectLoading ? "Opening..." : "Select Folder"}
-
-
+
+
+
+ {folderActionLabel}
+
+
-
-
Workspace: {workspaceRootName || "No folder selected yet"}
-
Path: {workspaceRootPath || "Pick a folder to begin"}
-
Settings file: {settingsPath || ".specforge/settings.json"}
-
-
+
+
Workspace: {workspaceRootName || "No folder selected yet"}
+
Path: {workspaceRootPath || "Pick a folder to begin"}
+
Settings file: {settingsPath || ".specforge/settings.json"}
+
+
+
-
-
+
+
+
-
-
-
-
-
+
+
+
+
+
-
+
+
@@ -181,6 +203,7 @@ export function ConfigurationScreen({
selectedModel={selectedModel}
selectedReasoning={selectedReasoning}
specPrompt={specPrompt}
+ workspaceRootName={workspaceRootName}
/>
@@ -203,8 +226,8 @@ export function ConfigurationScreen({
-
-
+
+
@@ -219,20 +242,19 @@ export function ConfigurationScreen({
-
{isSaving
? "Saving..."
: hasSavedSettings
? "Save Changes and Continue"
: "Create .specforge and Continue"}
-
-
-
+
+
+
);
@@ -260,17 +282,8 @@ function StepHeading({
);
}
-const PANEL_CLASS =
- "grid gap-4 rounded-[1.5rem] border border-[var(--border-strong)] bg-[var(--bg-panel)] p-5 shadow-[var(--shadow)] backdrop-blur-[30px]";
-
const FIELD_LABEL_CLASS =
"text-sm font-medium leading-6 text-[var(--text-subtle)]";
const INPUT_CLASS =
"w-full rounded-[1rem] border border-[var(--border-soft)] bg-black/20 px-4 py-3 text-[15px] text-[var(--text-main)] outline-none transition focus:border-[var(--accent)]";
-
-const SECONDARY_BUTTON_CLASS =
- "inline-flex items-center justify-center gap-2 rounded-[1rem] border border-[var(--border-soft)] bg-white/5 px-4 py-3 font-medium text-[var(--text-main)] transition hover:-translate-y-0.5 hover:bg-white/8";
-
-const PRIMARY_BUTTON_CLASS =
- "inline-flex items-center justify-center gap-2 rounded-[1rem] border-0 bg-[linear-gradient(135deg,var(--accent),#ff79c6)] px-4 py-3 font-semibold text-[#15131c] transition hover:-translate-y-0.5 hover:opacity-95";
diff --git a/src/screens/PrdScreen.tsx b/src/screens/PrdScreen.tsx
index 991c8e0..0da1603 100644
--- a/src/screens/PrdScreen.tsx
+++ b/src/screens/PrdScreen.tsx
@@ -72,15 +72,15 @@ export function PrdScreen({
-
+
-
+
-
diff --git a/src/screens/SettingsScreen.tsx b/src/screens/SettingsScreen.tsx
index a5c50f6..67045c5 100644
--- a/src/screens/SettingsScreen.tsx
+++ b/src/screens/SettingsScreen.tsx
@@ -1,6 +1,8 @@
+import { Button } from "@heroui/react";
import type { ComponentProps } from "react";
import { SettingsView } from "../components/SettingsView";
+import { SECONDARY_BUTTON_CLASS } from "../components/SettingsPrimitives";
import { StatusPill } from "../components/StatusPill";
import type { AgentStatus } from "../types";
@@ -24,9 +26,9 @@ export function SettingsScreen({
-
+
Refresh
-
+
@@ -36,6 +38,3 @@ export function SettingsScreen({
);
}
-
-const SECONDARY_BUTTON_CLASS =
- "inline-flex items-center justify-center gap-2 rounded-[1rem] border border-[var(--border-soft)] bg-white/5 px-4 py-3 font-medium text-[var(--text-main)] transition hover:-translate-y-0.5 hover:bg-white/8";
From 549d87fb07575c0f1c186c1fa40b99fb6207ba28 Mon Sep 17 00:00:00 2001
From: matheusBBarni
Date: Sun, 12 Apr 2026 01:29:33 -0300
Subject: [PATCH 05/32] Implement project-scoped multi-session chat workspace
---
docs/PRD.md | 124 +--
docs/SPEC.md | 199 ++--
src-tauri/src/chat.rs | 1399 ++++++++++++++++++++++++++
src-tauri/src/generation.rs | 111 ++-
src-tauri/src/git.rs | 19 +-
src-tauri/src/lib.rs | 17 +-
src-tauri/src/models.rs | 109 ++
src-tauri/src/project.rs | 14 +-
src-tauri/src/state.rs | 15 +
src/App.tsx | 1541 ++++++++++++++---------------
src/components/AppRail.tsx | 15 +
src/components/DocumentPane.tsx | 46 +-
src/components/ExecutionPanel.tsx | 44 +-
src/components/MainWorkspace.tsx | 3 +
src/hooks/useAppLifecycle.ts | 299 ++++++
src/hooks/useAppStoreSlices.ts | 119 +++
src/hooks/useAppView.ts | 826 ++++++++++++++++
src/lib/appState.ts | 209 ++++
src/lib/runtime.ts | 124 +++
src/screens/ChatScreen.tsx | 368 +++++++
src/screens/PrdScreen.tsx | 13 +-
src/store/useAgentStore.ts | 11 +-
src/store/useChatStore.ts | 197 ++++
src/types.ts | 84 ++
24 files changed, 4865 insertions(+), 1041 deletions(-)
create mode 100644 src-tauri/src/chat.rs
create mode 100644 src/hooks/useAppLifecycle.ts
create mode 100644 src/hooks/useAppStoreSlices.ts
create mode 100644 src/hooks/useAppView.ts
create mode 100644 src/lib/appState.ts
create mode 100644 src/screens/ChatScreen.tsx
create mode 100644 src/store/useChatStore.ts
diff --git a/docs/PRD.md b/docs/PRD.md
index 8b4747c..f7d7643 100644
--- a/docs/PRD.md
+++ b/docs/PRD.md
@@ -2,71 +2,77 @@
## 1. Product Overview
-**SpecForge** is a setup-first review workspace for desktop-first development. It helps a user choose a project folder, persist project-scoped AI/document settings in `.specforge/settings.json`, inspect CLI readiness, draft missing PRD/spec documents from AI when needed, review workspace files, and step through an execution-style dashboard before handing work off to a real IDE or CLI workflow.
+**SpecForge** is a setup-first desktop workspace for project-scoped agent chat. After a project is configured, the primary surface is a multi-topic chat workspace where each topic keeps its own transcript, context attachments, runtime state, approvals, and diff history.
-Today the product focuses on **review, import, diff inspection, and approval UX**. The execution loop shown in the app is currently a **simulated agent run**, not a real Claude CLI or Codex CLI orchestration engine.
+The product combines four responsibilities in one desktop shell:
+
+* project setup and saved workspace defaults
+* multi-session agent chat with real CLI-backed turns
+* PRD/spec review and editing
+* approval-aware diff and terminal visibility
## 2. Target Audience
-* **Solo engineers:** Wanting a structured review surface for PRDs, specs, diffs, and workspace files before implementation.
-* **Technical leads and PMs:** Wanting a lightweight way to refine a technical spec and verify execution gates without editing code directly.
-* **AI-assisted developers:** Wanting a desktop shell that keeps document loading, workspace inspection, and approval controls in one place.
+* **Solo engineers:** Wanting a desktop-native agent workspace with multiple project topics instead of a single disposable prompt thread.
+* **Technical leads and PMs:** Wanting to keep PRD/spec work visible while letting implementation happen in isolated chat topics.
+* **AI-assisted developers:** Wanting Codex CLI or Claude Code orchestration with per-topic context and explicit approval controls.
-## 3. Current User Flow
+## 3. Primary User Flow
-1. **Open the setup screen:** The app starts on a configuration flow instead of dropping directly into review.
-2. **Choose the project folder:** The user picks a workspace folder. If `.specforge/settings.json` already exists, SpecForge loads it immediately.
-3. **Review CLI status:** The user sees Claude CLI, Codex CLI, and Git health plus optional machine-local override paths.
-4. **Configure AI defaults:** The user chooses the default model/reasoning profile and edits the saved PRD/spec prompt templates for this project.
-5. **Configure document locations:** The user sets the PRD path, spec path, and optional supporting document paths relative to the selected workspace, then saves the setup to create or update `.specforge/settings.json`.
-6. **Review and adjust:** The review workspace loads the configured PRD/spec files when they exist. Missing files surface dedicated empty states instead of fallback bundled docs.
-7. **Approve and run:** Once the spec is approved, the user can launch the execution dashboard in stepped, milestone, or god mode.
-8. **Inspect the result:** The app streams simulated terminal output, shows approval gates, and renders a diff based on the current git state when available.
+1. **Open setup:** The app starts on configuration until a workspace is chosen and `.specforge/settings.json` exists.
+2. **Choose the project folder:** SpecForge scans the workspace, restores saved project settings, and restores the most recent chat topic when available.
+3. **Save configuration:** The user sets model/reasoning defaults, PRD/spec paths, and optional supporting documents, then continues into `/chat`.
+4. **Land in chat:** `/chat` is the primary post-setup workspace. If the project has no saved topics yet, SpecForge creates the first one automatically.
+5. **Work inside topics:** Each topic keeps isolated transcript, isolated selected context, isolated runtime state, and isolated approval/diff history.
+6. **Use seeded context:** New topics start with PRD, SPEC, configured supporting docs, and a workspace summary already attached.
+7. **Attach more files explicitly:** Additional workspace files can be attached per topic and never bleed into other topics.
+8. **Review output:** The `/review` screen remains available for PRD/spec/file editing, while its execute view mirrors the active chat topic instead of launching a separate run.
## 4. Functional Requirements
-### 4.1. Document Ingestion
-
-* **Desktop native picker:** Must support `.md` and `.pdf` imports for PRD and spec documents.
-* **Pane-local controls:** The PRD and spec panes must own their own load actions instead of relying on a separate sidebar ingestion panel.
-* **Project configuration file:** Saving setup must create or update `.specforge/settings.json` inside the selected workspace.
-* **Configured document paths:** The review panes should use the PRD/spec paths stored in `.specforge/settings.json`, not bundled defaults.
-* **Missing document reset:** Loading a project must clear stale PRD/spec content when the configured files do not exist yet.
-* **PRD empty state:** When the active PRD content is empty in preview mode, the PRD pane must show a dedicated empty state with a textbox, helper copy describing the saved default PRD prompt, and a generate action that appends the textbox note after that saved prompt.
-* **Empty spec generation:** When the active spec content is empty and a PRD is available, the spec pane must show a textbox and generate button that append the user's note after the saved default spec prompt and include the current PRD content.
-* **Blocked spec state:** When both the PRD and spec are empty in preview mode, the spec pane must explain that a PRD is required before generation while still allowing an existing spec to be loaded.
-* **Generated document persistence:** After PRD/spec generation succeeds in the desktop runtime, the markdown must be saved into the configured project-relative Markdown path from `.specforge/settings.json` before the pane updates.
-
-### 4.2. Workspace Review
-
-* **Split review panes:** PRD and spec must be visible side-by-side in preview or edit mode.
-* **Sidebar focus:** The left sidebar must be limited to agent configuration plus an MCP list summary.
-* **Workspace explorer:** The right rail must show files discovered from the active workspace and allow safe text/code file opening.
-* **Workspace safety:** Frontend file opens must be limited to the currently scanned workspace. Opening a new workspace should clear file tabs from the previous workspace.
-* **Search:** The file tree must support in-app filtering through the floating search UI.
-* **Spec empty state:** The spec pane must replace the normal preview view with a generation-oriented empty state whenever the spec content is blank and a PRD is available.
-* **Spec prerequisite state:** The spec pane must show a PRD-required message instead of the generation UI when the PRD is still blank.
-* **Saved-path visibility:** Once a spec is generated, the spec pane should reflect the saved file path rather than an unsaved placeholder path.
-
-### 4.3. Settings and Diagnostics
-
-* **Environment scan:** The app must surface Claude CLI, Codex CLI, and Git availability plus optional manual override paths.
-* **Manual override behavior:** A manual path is only considered healthy after the backend successfully probes it as an executable.
-* **Theme controls:** The workspace must support Dracula, Light, and System themes.
-* **Project-scoped AI settings:** Model selection, reasoning profile, PRD prompt, spec prompt, and configured document paths must be saved per project in `.specforge/settings.json`.
-* **Git diff visibility:** The review diff should include staged, unstaged, and untracked changes when a repository is available. Sample diff content is acceptable only when the repository is effectively clean or when running in browser fallback mode.
-
-### 4.4. Approval and Execution UX
-
-* **Approval modes:** The user can pick stepped, milestone, or god mode before starting a run.
-* **Execution stream:** The dashboard must stream status lines and milestone changes.
-* **Approval gates:** Stepped and milestone modes must pause and wait for explicit approval before continuing.
-* **Emergency stop:** The user must be able to halt the active run from the dashboard.
-* **Truthful execution copy:** The app must not imply that real code mutation or CLI orchestration is happening when the current implementation is simulated.
-
-## 5. Non-Goals For The Current Build
-
-* Real Claude CLI or Codex CLI execution that mutates or builds the opened workspace.
-* Automatic file mutation, test execution, or dependency repair driven by the agent.
-* Multi-user collaboration, cloud sync, or remote project state.
-* Persisted workspace editing or a save-to-disk flow for opened file tabs.
+### 4.1. Project Setup And Persistence
+
+* **Project-scoped settings:** Saving setup must create or update `.specforge/settings.json` inside the selected workspace.
+* **Project-scoped sessions:** Chat metadata must be stored in `.specforge/sessions/index.json`.
+* **Per-topic snapshots:** Each topic must be persisted in `.specforge/sessions/.json`.
+* **Last-active restore:** Reopening the app should restore the last active project and the last active topic when available.
+
+### 4.2. Chat Workspace
+
+* **Primary route:** `/chat` must be the default destination after setup or project restore.
+* **Three-zone desktop layout:** The chat screen must provide a topic list, transcript/composer workspace, and context/artifacts panel.
+* **Topic management:** Users must be able to create, search, select, rename, and delete topics.
+* **Per-topic isolation:** Messages, context items, runtime state, pending approvals, pending diff, and terminal output must remain scoped to a single topic.
+* **Per-topic drafts:** Composer drafts must be preserved per topic while switching between topics.
+* **Context seeding:** New topics must start with PRD, SPEC, supporting docs, and a workspace tree summary.
+* **Explicit file attachment:** Workspace files can be attached manually from the chat UI and only affect the active topic.
+* **Inline controls:** Send, approve, and stop actions must live directly in the chat composer area rather than in modal flows.
+
+### 4.3. Runtime And Approval Semantics
+
+* **Real CLI-backed turns:** Chat turns must run through the desktop backend using headless Codex CLI or Claude Code invocations.
+* **Stepped mode:** The first pass must be proposal-first or read-only, then require explicit approval before a write-capable rerun.
+* **Milestone mode:** One assistant turn may make changes, but it must pause on the resulting real git diff before the next turn.
+* **God mode:** The assistant may complete the turn without an approval pause while still surfacing output and diff history afterward.
+* **Session-scoped stop behavior:** Stop requests must only affect the active topic run and preserve existing emergency-stop semantics.
+* **Visible artifacts:** Terminal output and diff history must remain visible for each topic after the run.
+
+### 4.4. Caveman Requirement
+
+* **Always-on skill:** Chat must always run with the Caveman skill active.
+* **Auto-verify on chat entry:** On first entry into `/chat`, SpecForge must verify the skill and install it if missing.
+* **Required install command:** The default install path must use `npx skills add JuliusBrussee/caveman`.
+* **Blocked sends on failure:** If Caveman installation or verification fails, message sending must remain disabled and the UI must show a recoverable banner.
+
+### 4.5. Review And Settings
+
+* **Review remains available:** `/review` must still support PRD/spec/file editing and document generation.
+* **Read-only execution mirror:** The execute panel in review must reflect the active chat topic runtime and diff, but must not start, approve, or stop a separate execution engine.
+* **Settings remain project-scoped:** Model/reasoning defaults, prompt templates, document paths, and supporting docs remain editable from setup and settings.
+
+## 5. Non-Goals
+
+* Multi-user collaboration or cloud sync
+* Browser-only chat execution without the desktop runtime
+* Automatic saving of edited workspace file tabs back to disk
+* OpenCode runtime integration as a provider; it is only a UX reference for this release
diff --git a/docs/SPEC.md b/docs/SPEC.md
index ab0182e..da23699 100644
--- a/docs/SPEC.md
+++ b/docs/SPEC.md
@@ -4,136 +4,149 @@
SpecForge is a split desktop application:
-* **React webview:** Owns routing, document editing, pane-local load actions, workspace presentation, settings UI, empty-state spec generation UX, and simulated execution UX.
-* **Tauri/Rust backend:** Owns environment scanning, filesystem access, PDF parsing, workspace walking, git diff generation, native file dialogs, CLI-backed spec generation, and simulated agent coordination.
+* **React webview:** Owns routing, topic/session management UI, PRD/spec editing, workspace browsing, settings, and passive rendering of runtime output.
+* **Tauri/Rust backend:** Owns filesystem access, workspace scanning, session persistence, git diffing, native dialogs, PDF parsing, CLI process execution, Caveman verification, and chat event streaming.
-The webview must never execute shell commands or arbitrary file reads directly. All desktop-side operations go through `src/lib/runtime.ts` and Tauri commands in `src-tauri/src/lib.rs`.
+The webview never executes shell commands or writes workspace files directly. All desktop work continues to flow through `src/lib/runtime.ts` into Tauri commands exposed from `src-tauri/src/lib.rs`.
-## 2. Implemented Stack
+## 2. Routes
-### 2.1. Frontend
+* `/` is the project configuration flow.
+* `/chat` is the primary post-setup workspace.
+* `/review` is the document and file editing workspace.
+* `/settings` holds project-scoped and local runtime configuration.
-* React 19
-* React Router 7
-* Zustand
-* HeroUI
-* Tailwind v4
-* TypeScript
+When a saved project is restored, the app routes to `/chat` by default.
-### 2.2. Backend
+## 3. State Model
-* Tauri 2
-* Rust 2024 edition
-* `git2` for repository diffing
-* `ignore` for `.gitignore`-aware workspace walking
-* `lopdf` for PDF text extraction
-* `rfd` for native file and folder pickers
-* `which` for CLI discovery
+### 3.1. Frontend stores
-## 3. Default State And Stores
+* **`useProjectStore`:** PRD/spec content, document paths, prompt templates, selected project defaults, annotations, and open workspace file tabs.
+* **`useChatStore`:** Chat session summaries, `activeSessionId`, loaded per-topic snapshots, per-topic drafts, and Caveman readiness state.
+* **`useAgentStore`:** A lightweight runtime mirror used by review and shared execution UI. In chat-first flows it mirrors the active chat topic runtime rather than owning an independent executor.
+* **`useSettingsStore`:** Theme, CLI override paths, last opened project path, environment scan results, and workspace entries.
-### 3.1. Setup-first startup
+### 3.2. Persistence
-On startup, the app routes to a project configuration screen. The user selects a workspace folder, and the desktop runtime either loads an existing `.specforge/settings.json` or prepares default project settings that can be saved into that file.
+Project settings live in:
-The review workspace no longer boots with bundled `docs/PRD.md` / `docs/SPEC.md` content by default.
+* `.specforge/settings.json`
-### 3.2. Zustand stores
+Chat data lives in:
-* **`useProjectStore`:** PRD/spec content, approval mode, selected model/reasoning, saved prompt templates, configured document paths, annotations, and open workspace file tabs.
-* **`useAgentStore`:** Simulated run status, streamed output, current milestone, pending diff, and summary text.
-* **`useSettingsStore`:** Theme, CLI override paths, last opened project path, environment scan results, and the current workspace tree entries.
+* `.specforge/sessions/index.json`
+* `.specforge/sessions/.json`
-## 4. Import And Workspace Flows
+`index.json` stores topic summaries plus `lastActiveSessionId`. Each session snapshot stores:
-### 4.1. Desktop document import
+* `id`
+* `title`
+* `createdAt`
+* `updatedAt`
+* `selectedModel`
+* `selectedReasoning`
+* `autonomyMode`
+* `status`
+* `contextItems`
+* `messages`
+* `runtime`
+* `lastError`
-The desktop runtime currently exposes two import paths:
+## 4. Chat Session Behavior
-* **User-facing import:** `pick_document()` opens a native file picker for `.md` and `.pdf`, parses the chosen file in Rust, and returns a `WorkspaceDocument`. The PRD and spec panes trigger this from their own header controls.
-* **Reserved path import:** `parse_document(filePath)` still accepts only repository-relative paths that stay inside the project root, but it is not currently surfaced in the main review UI.
+### 4.1. Default context
-### 4.2. Project setup, workspace scan, and file opens
+When a new topic is created, the backend seeds the session with:
-* `pick_project_folder()` opens a native folder picker, walks the chosen directory with `.gitignore` awareness, loads `.specforge/settings.json` when it exists, and returns a project-context payload for the setup flow.
-* `load_project_context(folderPath)` reloads an already-known project folder and rehydrates the workspace plus saved project settings.
-* `save_project_settings(folderPath, settings)` writes `.specforge/settings.json` inside the selected project.
-* The backend stores the active workspace root and its relative-path-to-file map in shared state.
-* `read_workspace_file(filePath)` now treats `filePath` as a **workspace-relative path only** and resolves it through the active workspace map.
-* Files outside the active workspace must be rejected even if the frontend passes an absolute path or traversal sequence.
-* When the configured PRD/spec files do not exist yet, the frontend clears the prior document content instead of leaving stale content visible.
+* the configured PRD document
+* the configured SPEC document
+* any configured supporting documents
+* a workspace tree summary
-### 4.4. Empty document and spec generation flow
+Additional workspace files are attached explicitly per topic from the chat UI. Session context does not bleed across topics.
-* When `prdContent` is empty and the PRD pane is in preview mode, the left pane swaps to a dedicated PRD empty state while preserving preview/load/edit controls in the header.
-* The PRD empty state includes a note field, shows the saved default PRD prompt from `.specforge/settings.json`, and explains that the note is appended after that prompt before generation.
-* `generate_prd_document(...)` writes Markdown to the configured PRD path inside the workspace.
-* When `specContent` is empty, the spec pane keeps the same preview/load/edit controls in its header area.
-* If `specContent` is empty and `prdContent` is present, the spec pane swaps to a dedicated generation state with a prompt textarea and generate button.
-* If both `prdContent` and `specContent` are empty in preview mode, the spec pane shows a blocked state that asks for a PRD before generation while still allowing `Load Spec`.
-* The spec empty state shows the saved default spec prompt from `.specforge/settings.json` and explains that the note is appended after that prompt before generation.
-* The generate actions send the current prompt template, note, selected model, selected reasoning profile, and configured output path through `src/lib/runtime.ts`.
-* `generate_spec_document(...)` runs the selected Claude CLI or Codex CLI in non-interactive mode from a temporary folder and writes the returned markdown into the configured spec path inside the workspace.
-* The saved spec document metadata is returned to the frontend so the spec pane reflects the on-disk path immediately; execution remains a separate simulated flow.
+### 4.2. Runtime orchestration
+
+Chat turns are executed in Rust as headless CLI invocations:
+
+* **Codex provider:** mapped to suggest, auto-edit, or full-auto style permissions depending on the selected autonomy mode
+* **Claude provider:** mapped to default, accept-edits, or bypass-permissions style permissions
+
+Rust keeps a session-keyed runtime map so the following remain isolated by `sessionId`:
+
+* current status
+* terminal output
+* pending approval state
+* pending diff
+* stop requests
+
+### 4.3. Approval semantics
+
+* **`stepped`:** first run in proposal/read-only mode, then require explicit approval before the write-capable pass
+* **`milestone`:** run one assistant turn, capture the real git diff, and pause before the next turn
+* **`god_mode`:** allow a full-permission turn without an approval pause
+
+Review mode does not expose these controls directly; it only mirrors the active topic state.
## 5. Tauri Command Surface
-The current Tauri commands are:
+The desktop runtime currently exposes:
-* `run_environment_scan(claudePath?: string, codexPath?: string)`
-* `parse_document(filePath: string)`
-* `pick_document()`
-* `pick_project_folder()`
-* `load_project_context(folderPath: string)`
-* `save_project_settings(folderPath: string, settings: ProjectSettings)`
-* `open_workspace_folder()`
-* `read_workspace_file(filePath: string)`
-* `get_workspace_snapshot()`
-* `git_get_diff()`
-* `generate_prd_document(workspaceRoot: string, outputPath: string, promptTemplate: string, userPrompt: string, provider: string, model: string, reasoning: string, claudePath?: string, codexPath?: string)`
-* `generate_spec_document(workspaceRoot: string, outputPath: string, prdContent: string, promptTemplate: string, userPrompt: string, provider: string, model: string, reasoning: string, claudePath?: string, codexPath?: string)`
-* `spawn_cli_agent(specPayload: string, mode: string, model: string, reasoning: string)`
-* `approve_action()`
-* `kill_agent_process()`
+* `run_environment_scan`
+* `pick_document`
+* `pick_project_folder`
+* `load_project_context`
+* `save_project_settings`
+* `read_workspace_file`
+* `get_workspace_snapshot`
+* `git_get_diff`
+* `generate_prd_document`
+* `generate_spec_document`
+* `create_chat_session`
+* `load_chat_session`
+* `save_chat_session`
+* `rename_chat_session`
+* `delete_chat_session`
+* `send_chat_message`
+* `approve_chat_session`
+* `stop_chat_session`
+* `ensure_caveman_skill`
-Payloads crossing the Tauri boundary remain camelCase.
+Chat runtime updates are streamed through a typed `chat-session-event` payload carrying the session id plus the current session snapshot or summary update.
-## 6. Diff And Execution Behavior
+## 6. Caveman Integration
-### 6.1. Git diff
+Entering `/chat` triggers backend verification of the Caveman skill. If it is missing, the backend attempts installation with:
-`git_get_diff()` uses `git2` to render:
+* `npx skills add JuliusBrussee/caveman`
-* staged changes (`HEAD -> index`)
-* unstaged changes (`index -> worktree`)
-* untracked file content
+If verification or installation fails:
-If the repository renders no pending diff, the app falls back to the bundled sample patch for demo purposes.
+* the frontend stores a failed Caveman state in `useChatStore`
+* the composer send action remains disabled
+* the user sees a blocking but recoverable banner in chat
-### 6.2. Execution runtime
+Each outgoing chat turn prepends a Caveman activation preamble before the normal SpecForge system prompt so the skill is active on every turn, not merely installed on disk.
-The current execution runtime is **simulated**:
+## 7. Review Workspace
-* `spawn_cli_agent()` starts a Rust thread that emits milestone and terminal events.
-* `approve_action()` resumes a paused simulated gate.
-* `kill_agent_process()` stops the active simulated run.
-* Run IDs are tracked so stale runs do not leak output into newer runs.
+The review screen still provides:
-This is a review-and-approval shell, not a real CLI orchestration engine yet.
+* PRD/spec editing
+* workspace file browsing
+* PRD/spec generation
-The PRD/spec generation flows are separate from execution: they use the configured Claude/Codex CLI to draft markdown, save that markdown to the configured project-relative Markdown targets, and load the saved file into the review pane. They do not replace the simulated execution loop.
+Its execute panel is now a read-only mirror of the active chat topic:
-## 7. Environment And Settings
+* terminal output mirrors the active topic runtime
+* diff output mirrors the active topic pending diff
+* approval and stop controls are hidden
-* CLI health is derived from executable probing, not just path existence.
-* Manual override paths can be relative to the repo or absolute on disk.
-* Theme preference plus CLI override paths are stored in browser local storage.
-* The last opened project path is stored in browser local storage so the desktop app can restore project setup on the next launch.
-* Project-specific model/reasoning defaults, prompt templates, and document paths are stored in `.specforge/settings.json` inside the selected workspace.
-* The review sidebar now presents only agent configuration controls plus an MCP summary list derived from the current runtime/tool health.
+This prevents review from launching a second execution engine that could diverge from chat state.
## 8. Known Limits
-* Opened workspace file tabs are editable in-memory only; there is no save-to-disk flow.
-* The current project-setup flow expects the desktop runtime for real `.specforge/settings.json` persistence.
-* The app presents model and approval controls, but the current run loop is simulated rather than connected to real workspace-mutating Claude/Codex execution.
+* Opened workspace file tabs remain in-memory only; there is still no save-to-disk flow.
+* The desktop runtime is required for real project persistence, chat sessions, and CLI-backed turns.
+* The provider set remains limited to Codex CLI and Claude Code for this version.
diff --git a/src-tauri/src/chat.rs b/src-tauri/src/chat.rs
new file mode 100644
index 0000000..8a4b927
--- /dev/null
+++ b/src-tauri/src/chat.rs
@@ -0,0 +1,1399 @@
+use crate::{
+ documents::parse_workspace_document,
+ environment::{current_timestamp, resolve_cli_binary},
+ generation::{
+ create_spec_generation_temp_dir, format_process_failure, map_claude_reasoning,
+ map_codex_reasoning, run_command_with_stdin,
+ },
+ git::git_get_diff_for_root,
+ models::{
+ CavemanStatusPayload, ChatContextItem, ChatEventPayload, ChatMessage, ChatRuntimeState,
+ ChatSessionIndexPayload, ChatSessionSnapshot, ChatSessionSummary, ProjectSettings,
+ },
+ paths::{resolve_override_path, resolve_relative_path_under_root},
+ project::{build_default_project_settings, load_project_settings_from_workspace_root},
+ state::{ChatExecutionRuntime, SharedState, WorkspaceContext},
+};
+use std::{
+ collections::BTreeSet,
+ fs,
+ path::{Path, PathBuf},
+ process::{Command, Stdio},
+ sync::{
+ atomic::{AtomicU64, Ordering},
+ Arc,
+ },
+ thread,
+ time::{SystemTime, UNIX_EPOCH},
+};
+use tauri::{AppHandle, Emitter, State};
+
+const SESSION_DIRECTORY_RELATIVE_PATH: &str = ".specforge/sessions";
+const SESSION_INDEX_FILE_NAME: &str = "index.json";
+const CAVEMAN_REPO: &str = "JuliusBrussee/caveman";
+const CAVEMAN_PREAMBLE: &str =
+ "/caveman\nUse the Caveman skill. Be direct and minimal in prose while keeping code and diffs fully normal.";
+
+static SESSION_COUNTER: AtomicU64 = AtomicU64::new(0);
+
+#[tauri::command]
+pub(crate) fn create_chat_session(
+ state: State,
+ title: Option,
+) -> Result {
+ let workspace = active_workspace_context(&state)?;
+ let settings = load_workspace_project_settings(&workspace.root)?;
+ let mut index = load_chat_session_index(&workspace.root)?;
+ let timestamp = current_timestamp();
+ let session_id = create_chat_entity_id("session");
+ let next_title = normalized_title(title.as_deref())
+ .unwrap_or_else(|| format!("Topic {}", index.sessions.len() + 1));
+
+ let snapshot = ChatSessionSnapshot {
+ id: session_id.clone(),
+ title: next_title,
+ created_at: timestamp.clone(),
+ updated_at: timestamp,
+ status: String::from("idle"),
+ last_message_preview: String::new(),
+ selected_model: settings.selected_model.clone(),
+ selected_reasoning: settings.selected_reasoning.clone(),
+ autonomy_mode: String::from("milestone"),
+ context_items: build_default_context_items(&settings),
+ messages: Vec::new(),
+ runtime: ChatRuntimeState::default(),
+ };
+
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+ upsert_chat_session_summary(&mut index, summarize_session(&snapshot));
+ index.last_active_session_id = Some(session_id);
+ write_chat_session_index(&workspace.root, &index)?;
+
+ Ok(snapshot)
+}
+
+#[tauri::command]
+pub(crate) fn load_chat_session(
+ state: State,
+ session_id: String,
+) -> Result {
+ let workspace = active_workspace_context(&state)?;
+ let snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
+ let mut index = load_chat_session_index(&workspace.root)?;
+ index.last_active_session_id = Some(session_id);
+ upsert_chat_session_summary(&mut index, summarize_session(&snapshot));
+ write_chat_session_index(&workspace.root, &index)?;
+ Ok(snapshot)
+}
+
+#[tauri::command]
+pub(crate) fn save_chat_session(
+ state: State,
+ session_id: String,
+ selected_model: String,
+ selected_reasoning: String,
+ autonomy_mode: String,
+ context_items: Vec,
+) -> Result {
+ let workspace = active_workspace_context(&state)?;
+ let mut snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
+ snapshot.selected_model = selected_model.trim().to_string();
+ snapshot.selected_reasoning = selected_reasoning.trim().to_string();
+ snapshot.autonomy_mode = normalize_autonomy_mode(&autonomy_mode);
+ snapshot.context_items = normalize_context_items(context_items);
+ snapshot.updated_at = current_timestamp();
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+
+ let mut index = load_chat_session_index(&workspace.root)?;
+ upsert_chat_session_summary(&mut index, summarize_session(&snapshot));
+ write_chat_session_index(&workspace.root, &index)?;
+
+ Ok(snapshot)
+}
+
+#[tauri::command]
+pub(crate) fn rename_chat_session(
+ state: State,
+ session_id: String,
+ title: String,
+) -> Result {
+ let workspace = active_workspace_context(&state)?;
+ let mut snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
+ snapshot.title = normalized_title(Some(&title))
+ .ok_or_else(|| String::from("A non-empty session title is required."))?;
+ snapshot.updated_at = current_timestamp();
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+
+ let summary = summarize_session(&snapshot);
+ let mut index = load_chat_session_index(&workspace.root)?;
+ upsert_chat_session_summary(&mut index, summary.clone());
+ write_chat_session_index(&workspace.root, &index)?;
+
+ Ok(summary)
+}
+
+#[tauri::command]
+pub(crate) fn delete_chat_session(
+ state: State,
+ session_id: String,
+) -> Result {
+ let workspace = active_workspace_context(&state)?;
+ let session_path = session_snapshot_path(&workspace.root, &session_id);
+
+ if session_path.exists() {
+ fs::remove_file(&session_path).map_err(|error| {
+ format!(
+ "Unable to delete chat session {}: {error}",
+ session_path.display()
+ )
+ })?;
+ }
+
+ let mut index = load_chat_session_index(&workspace.root)?;
+ index.sessions.retain(|entry| entry.id != session_id);
+
+ if index
+ .last_active_session_id
+ .as_ref()
+ .is_some_and(|active_id| active_id == &session_id)
+ {
+ index.last_active_session_id = index.sessions.first().map(|entry| entry.id.clone());
+ }
+
+ write_chat_session_index(&workspace.root, &index)?;
+ Ok(index)
+}
+
+#[tauri::command]
+pub(crate) fn approve_chat_session(
+ state: State,
+ session_id: String,
+) -> Result<(), String> {
+ let mut controls = state
+ .chat_runtime
+ .control
+ .lock()
+ .map_err(|_| String::from("Chat execution lock was poisoned."))?;
+ let control = controls.entry(session_id).or_default();
+ control.awaiting_approval = false;
+ state.chat_runtime.signal.notify_all();
+ Ok(())
+}
+
+#[tauri::command]
+pub(crate) fn stop_chat_session(
+ state: State,
+ session_id: String,
+) -> Result<(), String> {
+ let mut controls = state
+ .chat_runtime
+ .control
+ .lock()
+ .map_err(|_| String::from("Chat execution lock was poisoned."))?;
+ let control = controls.entry(session_id).or_default();
+ control.stop_requested = true;
+ control.awaiting_approval = false;
+ state.chat_runtime.signal.notify_all();
+ Ok(())
+}
+
+#[tauri::command]
+pub(crate) fn ensure_caveman_skill() -> Result {
+ if is_caveman_installed() {
+ return Ok(CavemanStatusPayload {
+ ready: true,
+ detail: String::from("Caveman is already installed for this machine."),
+ });
+ }
+
+ install_caveman_skill()?;
+
+ if !is_caveman_installed() {
+ return Err(String::from(
+ "Caveman installation completed, but the skill directory could not be verified.",
+ ));
+ }
+
+ Ok(CavemanStatusPayload {
+ ready: true,
+ detail: String::from("Caveman was installed and verified successfully."),
+ })
+}
+
+#[tauri::command]
+pub(crate) fn send_chat_message(
+ app: AppHandle,
+ state: State,
+ session_id: String,
+ message: String,
+ claude_path: Option,
+ codex_path: Option,
+) -> Result<(), String> {
+ let trimmed_message = message.trim().to_string();
+
+ if trimmed_message.is_empty() {
+ return Err(String::from("A message is required before sending."));
+ }
+
+ if !is_caveman_installed() {
+ return Err(String::from(
+ "Caveman is not installed yet. Verify the skill before sending a chat turn.",
+ ));
+ }
+
+ let workspace = active_workspace_context(&state)?;
+ let snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
+
+ if snapshot.runtime.is_busy || snapshot.runtime.awaiting_approval {
+ return Err(String::from(
+ "This topic is still waiting on the current turn. Approve or stop it before sending another message.",
+ ));
+ }
+
+ let run_id = {
+ let mut controls = state
+ .chat_runtime
+ .control
+ .lock()
+ .map_err(|_| String::from("Chat execution lock was poisoned."))?;
+ let control = controls.entry(session_id.clone()).or_default();
+ control.run_id = control.run_id.wrapping_add(1);
+ control.stop_requested = false;
+ control.awaiting_approval = false;
+ control.run_id
+ };
+
+ let runtime = state.chat_runtime.clone();
+ thread::spawn(move || {
+ run_chat_turn(
+ app,
+ runtime,
+ workspace,
+ session_id,
+ run_id,
+ trimmed_message,
+ claude_path,
+ codex_path,
+ );
+ });
+
+ Ok(())
+}
+
+pub(crate) fn load_chat_session_index(
+ workspace_root: &Path,
+) -> Result {
+ let index_path = session_index_path(workspace_root);
+
+ if !index_path.exists() {
+ return Ok(ChatSessionIndexPayload {
+ sessions: Vec::new(),
+ last_active_session_id: None,
+ });
+ }
+
+ let raw_value = fs::read_to_string(&index_path).map_err(|error| {
+ format!(
+ "Unable to read the chat session index {}: {error}",
+ index_path.display()
+ )
+ })?;
+
+ serde_json::from_str::(&raw_value).map_err(|error| {
+ format!(
+ "Unable to parse the chat session index {}: {error}",
+ index_path.display()
+ )
+ })
+}
+
+fn run_chat_turn(
+ app: AppHandle,
+ runtime: Arc,
+ workspace: WorkspaceContext,
+ session_id: String,
+ run_id: u64,
+ user_message: String,
+ claude_path: Option,
+ codex_path: Option,
+) {
+ let result = (|| -> Result<(), String> {
+ let mut snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
+ snapshot.messages.push(ChatMessage {
+ id: create_chat_entity_id("msg"),
+ role: String::from("user"),
+ content: user_message.clone(),
+ created_at: current_timestamp(),
+ });
+ snapshot.status = String::from("executing");
+ snapshot.last_message_preview = build_message_preview(&user_message);
+ snapshot.updated_at = current_timestamp();
+ snapshot.runtime.status = String::from("executing");
+ snapshot.runtime.is_busy = true;
+ snapshot.runtime.awaiting_approval = false;
+ snapshot.runtime.last_error = None;
+ snapshot.runtime.pending_request = None;
+ snapshot.runtime.execution_summary =
+ Some(String::from("Preparing context and launching the selected CLI."));
+ snapshot.runtime.pending_diff = None;
+ snapshot.runtime.current_milestone = Some(String::from("Queue Turn"));
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+ refresh_index_summary(&workspace.root, &snapshot)?;
+ emit_session_event(
+ &app,
+ &session_id,
+ "messageStarted",
+ Some(snapshot.clone()),
+ None,
+ None,
+ Some(snapshot.runtime.clone()),
+ );
+
+ append_terminal_line(
+ &app,
+ &workspace.root,
+ &session_id,
+ &mut snapshot,
+ "Queued the new user turn and resolved the session context.",
+ )?;
+
+ if matches!(stop_state(&runtime, &session_id, run_id), ChatStopState::StopRequested) {
+ halt_session(
+ &app,
+ &workspace.root,
+ &session_id,
+ &mut snapshot,
+ "Turn stopped before execution began.",
+ )?;
+ return Ok(());
+ }
+
+ if snapshot.autonomy_mode == "stepped" {
+ execute_chat_phase(
+ &app,
+ &workspace,
+ &session_id,
+ &runtime,
+ run_id,
+ &mut snapshot,
+ &user_message,
+ &claude_path,
+ &codex_path,
+ ChatExecutionPhase::Proposal,
+ )?;
+ snapshot.runtime.awaiting_approval = true;
+ snapshot.runtime.is_busy = true;
+ snapshot.runtime.status = String::from("awaiting_approval");
+ snapshot.runtime.pending_request =
+ Some(String::from("Approve the proposal to rerun this turn with write access."));
+ snapshot.runtime.execution_summary = Some(String::from(
+ "Stepped mode paused after the proposal phase. Approve to rerun the turn with write access.",
+ ));
+ snapshot.runtime.pending_diff = Some(git_get_diff_for_root(&workspace.root)?);
+ snapshot.updated_at = current_timestamp();
+ snapshot.status = String::from("awaiting_approval");
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+ refresh_index_summary(&workspace.root, &snapshot)?;
+ emit_session_event(
+ &app,
+ &session_id,
+ "approvalRequired",
+ Some(snapshot.clone()),
+ None,
+ None,
+ Some(snapshot.runtime.clone()),
+ );
+
+ match wait_for_approval(&runtime, &session_id, run_id)? {
+ ApprovalOutcome::Approved => {
+ snapshot.runtime.awaiting_approval = false;
+ snapshot.runtime.status = String::from("executing");
+ snapshot.runtime.pending_request = None;
+ snapshot.runtime.execution_summary = Some(String::from(
+ "Approval received. Replaying the turn with write access enabled.",
+ ));
+ snapshot.status = String::from("executing");
+ snapshot.updated_at = current_timestamp();
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+ refresh_index_summary(&workspace.root, &snapshot)?;
+ }
+ ApprovalOutcome::StopRequested => {
+ halt_session(
+ &app,
+ &workspace.root,
+ &session_id,
+ &mut snapshot,
+ "Turn stopped during the stepped approval gate.",
+ )?;
+ return Ok(());
+ }
+ ApprovalOutcome::Replaced => return Ok(()),
+ }
+
+ execute_chat_phase(
+ &app,
+ &workspace,
+ &session_id,
+ &runtime,
+ run_id,
+ &mut snapshot,
+ &user_message,
+ &claude_path,
+ &codex_path,
+ ChatExecutionPhase::Write,
+ )?;
+ } else {
+ execute_chat_phase(
+ &app,
+ &workspace,
+ &session_id,
+ &runtime,
+ run_id,
+ &mut snapshot,
+ &user_message,
+ &claude_path,
+ &codex_path,
+ ChatExecutionPhase::Write,
+ )?;
+ }
+
+ if snapshot.autonomy_mode == "milestone" {
+ snapshot.runtime.awaiting_approval = true;
+ snapshot.runtime.is_busy = true;
+ snapshot.runtime.status = String::from("awaiting_approval");
+ snapshot.runtime.execution_summary = Some(String::from(
+ "Milestone mode paused after this turn. Review the current diff before the next prompt.",
+ ));
+ snapshot.runtime.pending_request =
+ Some(String::from("Approve the current diff to unlock the next turn."));
+ snapshot.runtime.pending_diff = Some(git_get_diff_for_root(&workspace.root)?);
+ snapshot.updated_at = current_timestamp();
+ snapshot.status = String::from("awaiting_approval");
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+ refresh_index_summary(&workspace.root, &snapshot)?;
+ emit_session_event(
+ &app,
+ &session_id,
+ "approvalRequired",
+ Some(snapshot.clone()),
+ None,
+ None,
+ Some(snapshot.runtime.clone()),
+ );
+
+ match wait_for_approval(&runtime, &session_id, run_id)? {
+ ApprovalOutcome::Approved => {
+ snapshot.runtime.awaiting_approval = false;
+ snapshot.runtime.pending_request = None;
+ snapshot.runtime.execution_summary = Some(String::from(
+ "Diff approved. The topic is ready for the next prompt.",
+ ));
+ }
+ ApprovalOutcome::StopRequested => {
+ halt_session(
+ &app,
+ &workspace.root,
+ &session_id,
+ &mut snapshot,
+ "Turn stopped during the milestone approval gate.",
+ )?;
+ return Ok(());
+ }
+ ApprovalOutcome::Replaced => return Ok(()),
+ }
+ }
+
+ snapshot.runtime.status = String::from("completed");
+ snapshot.runtime.is_busy = false;
+ snapshot.runtime.awaiting_approval = false;
+ snapshot.runtime.pending_request = None;
+ snapshot.runtime.current_milestone = Some(String::from("Complete"));
+ snapshot.runtime.pending_diff = Some(git_get_diff_for_root(&workspace.root)?);
+ snapshot.runtime.execution_summary = Some(String::from(
+ "Turn completed. The transcript, terminal stream, and current diff are ready.",
+ ));
+ snapshot.status = String::from("completed");
+ snapshot.updated_at = current_timestamp();
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+ refresh_index_summary(&workspace.root, &snapshot)?;
+ emit_session_event(
+ &app,
+ &session_id,
+ "completed",
+ Some(snapshot),
+ None,
+ None,
+ None,
+ );
+
+ Ok(())
+ })();
+
+ if let Err(error) = result {
+ let _ = mark_session_error(&app, &workspace.root, &session_id, error);
+ }
+}
+
+fn execute_chat_phase(
+ app: &AppHandle,
+ workspace: &WorkspaceContext,
+ session_id: &str,
+ runtime: &Arc,
+ run_id: u64,
+ snapshot: &mut ChatSessionSnapshot,
+ user_message: &str,
+ claude_path: &Option,
+ codex_path: &Option,
+ phase: ChatExecutionPhase,
+) -> Result<(), String> {
+ if !matches!(stop_state(runtime, session_id, run_id), ChatStopState::Continue) {
+ halt_session(
+ app,
+ &workspace.root,
+ session_id,
+ snapshot,
+ "Turn stopped before the provider phase finished.",
+ )?;
+ return Ok(());
+ }
+
+ let phase_copy = phase.copy();
+ snapshot.runtime.current_milestone = Some(String::from(phase_copy.milestone()));
+ snapshot.runtime.execution_summary = Some(String::from(phase_copy.summary()));
+ write_chat_session_snapshot(&workspace.root, snapshot)?;
+ refresh_index_summary(&workspace.root, snapshot)?;
+ append_terminal_line(app, &workspace.root, session_id, snapshot, phase_copy.line())?;
+
+ let context_blocks = build_context_blocks(workspace, snapshot)?;
+ let prompt_payload = build_chat_prompt(snapshot, &context_blocks, user_message, phase_copy);
+ let assistant_content = run_chat_provider_request(
+ &workspace.root,
+ &snapshot.selected_model,
+ &snapshot.selected_reasoning,
+ phase_copy,
+ &prompt_payload,
+ claude_path.as_deref(),
+ codex_path.as_deref(),
+ )?;
+
+ let assistant_message = ChatMessage {
+ id: create_chat_entity_id("msg"),
+ role: String::from("assistant"),
+ content: assistant_content.trim().to_string(),
+ created_at: current_timestamp(),
+ };
+ snapshot.messages.push(assistant_message.clone());
+ snapshot.last_message_preview = build_message_preview(&assistant_message.content);
+ snapshot.updated_at = current_timestamp();
+ snapshot.runtime.pending_diff = Some(git_get_diff_for_root(&workspace.root)?);
+ snapshot.runtime.current_milestone = Some(String::from(phase_copy.completed_milestone()));
+ snapshot.runtime.execution_summary = Some(String::from(phase_copy.completed_summary()));
+ snapshot.status = String::from("executing");
+ write_chat_session_snapshot(&workspace.root, snapshot)?;
+ refresh_index_summary(&workspace.root, snapshot)?;
+ emit_session_event(
+ app,
+ session_id,
+ "messageDelta",
+ None,
+ Some(assistant_message.content.clone()),
+ None,
+ None,
+ );
+ emit_session_event(
+ app,
+ session_id,
+ "sessionUpdated",
+ Some(snapshot.clone()),
+ Some(assistant_message.content),
+ None,
+ Some(snapshot.runtime.clone()),
+ );
+
+ Ok(())
+}
+
+fn build_context_blocks(
+ workspace: &WorkspaceContext,
+ snapshot: &ChatSessionSnapshot,
+) -> Result, String> {
+ let mut blocks = Vec::new();
+
+ for item in &snapshot.context_items {
+ let content = match item.kind.as_str() {
+ "workspace_summary" => build_workspace_summary(workspace),
+ _ => {
+ let Some(path) = item.path.as_deref() else {
+ continue;
+ };
+ let resolved_path = resolve_relative_path_under_root(&workspace.root, path)?;
+
+ if !resolved_path.exists() {
+ format!("Missing file at {path}.")
+ } else {
+ parse_workspace_document(&resolved_path)?
+ }
+ }
+ };
+
+ if content.trim().is_empty() {
+ continue;
+ }
+
+ blocks.push((item.label.clone(), content));
+ }
+
+ Ok(blocks)
+}
+
+fn build_chat_prompt(
+ snapshot: &ChatSessionSnapshot,
+ context_blocks: &[(String, String)],
+ user_message: &str,
+ phase: ChatExecutionPhase,
+) -> String {
+ let mut prompt = String::new();
+ prompt.push_str(CAVEMAN_PREAMBLE);
+ prompt.push_str("\n\n");
+ prompt.push_str("You are SpecForge Chat, a desktop coding assistant operating on a project-scoped topic.\n");
+ prompt.push_str("Keep responses direct. Preserve technical accuracy. Use the attached project context.\n");
+ prompt.push_str("Current topic: ");
+ prompt.push_str(&snapshot.title);
+ prompt.push_str("\nAutonomy mode: ");
+ prompt.push_str(&snapshot.autonomy_mode);
+ prompt.push_str("\nExecution phase: ");
+ prompt.push_str(phase.label());
+ prompt.push_str("\n");
+ prompt.push_str(phase.instructions());
+
+ if !context_blocks.is_empty() {
+ prompt.push_str("\n\nAttached context:\n");
+
+ for (label, content) in context_blocks {
+ prompt.push_str("\n### ");
+ prompt.push_str(label);
+ prompt.push('\n');
+ prompt.push_str(content.trim());
+ prompt.push('\n');
+ }
+ }
+
+ if !snapshot.messages.is_empty() {
+ prompt.push_str("\nConversation so far:\n");
+
+ for message in &snapshot.messages {
+ prompt.push_str("\n");
+ prompt.push_str(&message.role.to_uppercase());
+ prompt.push_str(": ");
+ prompt.push_str(message.content.trim());
+ prompt.push('\n');
+ }
+ } else {
+ prompt.push_str("\nConversation so far:\n\nNo prior turns yet.\n");
+ }
+
+ prompt.push_str("\nCurrent user request:\n");
+ prompt.push_str(user_message.trim());
+ prompt.push('\n');
+ prompt
+}
+
+fn run_chat_provider_request(
+ workspace_root: &Path,
+ model: &str,
+ reasoning: &str,
+ phase: ChatExecutionPhase,
+ prompt_payload: &str,
+ claude_path: Option<&str>,
+ codex_path: Option<&str>,
+) -> Result {
+ if model.starts_with("claude") {
+ run_claude_chat_request(
+ workspace_root,
+ &resolve_cli_binary("claude", claude_path)?,
+ model,
+ reasoning,
+ phase,
+ prompt_payload,
+ )
+ } else {
+ run_codex_chat_request(
+ workspace_root,
+ &resolve_cli_binary("codex", codex_path)?,
+ model,
+ reasoning,
+ phase,
+ prompt_payload,
+ )
+ }
+}
+
+fn run_codex_chat_request(
+ workspace_root: &Path,
+ binary_path: &Path,
+ model: &str,
+ reasoning: &str,
+ phase: ChatExecutionPhase,
+ prompt_payload: &str,
+) -> Result {
+ let temp_dir = create_spec_generation_temp_dir("codex-chat")?;
+ let output_path = temp_dir.join("assistant-message.md");
+ let mut command = Command::new(binary_path);
+ command
+ .current_dir(workspace_root)
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .arg("exec")
+ .arg("--color")
+ .arg("never")
+ .arg("--skip-git-repo-check")
+ .arg("--sandbox")
+ .arg(phase.codex_sandbox())
+ .arg("--model")
+ .arg(model)
+ .arg("--config")
+ .arg(format!(
+ "model_reasoning_effort=\"{}\"",
+ map_codex_reasoning(reasoning)
+ ))
+ .arg("--output-last-message")
+ .arg(&output_path);
+
+ let output = run_command_with_stdin(&mut command, "Codex CLI", prompt_payload)?;
+
+ if !output.status.success() {
+ let _ = fs::remove_dir_all(&temp_dir);
+ return Err(format_process_failure("Codex CLI", &output));
+ }
+
+ let result = fs::read_to_string(&output_path).or_else(|_| {
+ let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string();
+
+ if stdout.is_empty() {
+ Err(std::io::Error::new(
+ std::io::ErrorKind::Other,
+ "The Codex CLI returned no assistant content.",
+ ))
+ } else {
+ Ok(stdout)
+ }
+ });
+ let _ = fs::remove_dir_all(&temp_dir);
+
+ result.map_err(|error| format!("Unable to read the Codex assistant output: {error}"))
+}
+
+fn run_claude_chat_request(
+ workspace_root: &Path,
+ binary_path: &Path,
+ model: &str,
+ reasoning: &str,
+ phase: ChatExecutionPhase,
+ prompt_payload: &str,
+) -> Result {
+ let mut command = Command::new(binary_path);
+ command
+ .current_dir(workspace_root)
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .arg("--print")
+ .arg("Respond to the request provided on stdin.")
+ .arg("--model")
+ .arg(model)
+ .arg("--output-format")
+ .arg("text")
+ .arg("--permission-mode")
+ .arg(phase.claude_permission_mode())
+ .arg("--max-turns")
+ .arg("8")
+ .arg("--effort")
+ .arg(map_claude_reasoning(reasoning));
+
+ let output = run_command_with_stdin(&mut command, "Claude CLI", prompt_payload)?;
+
+ if !output.status.success() {
+ return Err(format_process_failure("Claude CLI", &output));
+ }
+
+ Ok(String::from_utf8_lossy(&output.stdout).trim().to_string())
+}
+
+fn refresh_index_summary(workspace_root: &Path, snapshot: &ChatSessionSnapshot) -> Result<(), String> {
+ let mut index = load_chat_session_index(workspace_root)?;
+ upsert_chat_session_summary(&mut index, summarize_session(snapshot));
+ if index.last_active_session_id.is_none() {
+ index.last_active_session_id = Some(snapshot.id.clone());
+ }
+ write_chat_session_index(workspace_root, &index)
+}
+
+fn append_terminal_line(
+ app: &AppHandle,
+ workspace_root: &Path,
+ session_id: &str,
+ snapshot: &mut ChatSessionSnapshot,
+ line: &str,
+) -> Result<(), String> {
+ snapshot.runtime.terminal_output.push(line.to_string());
+
+ if snapshot.runtime.terminal_output.len() > 240 {
+ let keep_from = snapshot.runtime.terminal_output.len() - 240;
+ snapshot.runtime.terminal_output.drain(0..keep_from);
+ }
+
+ snapshot.updated_at = current_timestamp();
+ write_chat_session_snapshot(workspace_root, snapshot)?;
+ refresh_index_summary(workspace_root, snapshot)?;
+ emit_session_event(
+ app,
+ session_id,
+ "terminalLine",
+ None,
+ None,
+ Some(line.to_string()),
+ Some(snapshot.runtime.clone()),
+ );
+ Ok(())
+}
+
+fn halt_session(
+ app: &AppHandle,
+ workspace_root: &Path,
+ session_id: &str,
+ snapshot: &mut ChatSessionSnapshot,
+ message: &str,
+) -> Result<(), String> {
+ snapshot.status = String::from("halted");
+ snapshot.runtime.status = String::from("halted");
+ snapshot.runtime.is_busy = false;
+ snapshot.runtime.awaiting_approval = false;
+ snapshot.runtime.pending_request = None;
+ snapshot.runtime.execution_summary = Some(message.to_string());
+ snapshot.updated_at = current_timestamp();
+ write_chat_session_snapshot(workspace_root, snapshot)?;
+ refresh_index_summary(workspace_root, snapshot)?;
+ emit_session_event(
+ app,
+ session_id,
+ "halted",
+ Some(snapshot.clone()),
+ None,
+ None,
+ Some(snapshot.runtime.clone()),
+ );
+ Ok(())
+}
+
+fn mark_session_error(
+ app: &AppHandle,
+ workspace_root: &Path,
+ session_id: &str,
+ error: String,
+) -> Result<(), String> {
+ let mut snapshot = read_chat_session_snapshot(workspace_root, session_id)?;
+ snapshot.status = String::from("error");
+ snapshot.runtime.status = String::from("error");
+ snapshot.runtime.is_busy = false;
+ snapshot.runtime.awaiting_approval = false;
+ snapshot.runtime.pending_request = None;
+ snapshot.runtime.last_error = Some(error.clone());
+ snapshot.runtime.execution_summary = Some(error.clone());
+ snapshot.updated_at = current_timestamp();
+ write_chat_session_snapshot(workspace_root, &snapshot)?;
+ refresh_index_summary(workspace_root, &snapshot)?;
+ emit_session_event(
+ app,
+ session_id,
+ "error",
+ Some(snapshot),
+ Some(error),
+ None,
+ None,
+ );
+ Ok(())
+}
+
+fn emit_session_event(
+ app: &AppHandle,
+ session_id: &str,
+ event_type: &str,
+ session: Option,
+ message_delta: Option,
+ terminal_line: Option,
+ runtime: Option,
+) {
+ let summary = session.as_ref().map(summarize_session);
+ let message = session
+ .as_ref()
+ .and_then(|snapshot| snapshot.messages.last().cloned());
+ let payload = ChatEventPayload {
+ session_id: session_id.to_string(),
+ event_type: event_type.to_string(),
+ message,
+ message_delta,
+ terminal_line,
+ session,
+ runtime,
+ summary,
+ };
+
+ let _ = app.emit("chat-session-event", payload);
+}
+
+fn wait_for_approval(
+ runtime: &Arc,
+ session_id: &str,
+ run_id: u64,
+) -> Result {
+ let mut controls = runtime
+ .control
+ .lock()
+ .map_err(|_| String::from("Chat execution lock was poisoned."))?;
+ let control = controls.entry(session_id.to_string()).or_default();
+ control.awaiting_approval = true;
+ runtime.signal.notify_all();
+
+ loop {
+ let current = controls.entry(session_id.to_string()).or_default().clone();
+
+ if current.run_id != run_id {
+ return Ok(ApprovalOutcome::Replaced);
+ }
+
+ if current.stop_requested {
+ return Ok(ApprovalOutcome::StopRequested);
+ }
+
+ if !current.awaiting_approval {
+ return Ok(ApprovalOutcome::Approved);
+ }
+
+ controls = runtime
+ .signal
+ .wait(controls)
+ .map_err(|_| String::from("Chat execution lock was poisoned."))?;
+ }
+}
+
+fn stop_state(runtime: &Arc, session_id: &str, run_id: u64) -> ChatStopState {
+ runtime
+ .control
+ .lock()
+ .map(|controls| {
+ let Some(control) = controls.get(session_id) else {
+ return ChatStopState::Continue;
+ };
+
+ if control.run_id != run_id {
+ ChatStopState::Replaced
+ } else if control.stop_requested {
+ ChatStopState::StopRequested
+ } else {
+ ChatStopState::Continue
+ }
+ })
+ .unwrap_or(ChatStopState::StopRequested)
+}
+
+fn active_workspace_context(state: &State) -> Result {
+ state
+ .workspace
+ .lock()
+ .map_err(|_| String::from("Workspace lock was poisoned."))?
+ .clone()
+ .ok_or_else(|| String::from("No workspace folder is currently open."))
+}
+
+fn load_workspace_project_settings(workspace_root: &Path) -> Result {
+ let defaults = build_default_project_settings(workspace_root, None, None);
+ load_project_settings_from_workspace_root(workspace_root, defaults).map(|(settings, _)| settings)
+}
+
+fn build_default_context_items(settings: &ProjectSettings) -> Vec {
+ let mut items = vec![
+ build_context_item("prd", "PRD", Some(settings.prd_path.clone()), true),
+ build_context_item("spec", "SPEC", Some(settings.spec_path.clone()), true),
+ build_context_item("workspace_summary", "Workspace Tree Summary", None, true),
+ ];
+
+ for path in &settings.supporting_document_paths {
+ items.push(build_context_item(
+ "supporting_document",
+ &format!("Supporting: {path}"),
+ Some(path.clone()),
+ true,
+ ));
+ }
+
+ normalize_context_items(items)
+}
+
+fn build_context_item(
+ kind: &str,
+ label: &str,
+ path: Option,
+ is_default: bool,
+) -> ChatContextItem {
+ ChatContextItem {
+ id: create_chat_entity_id("ctx"),
+ kind: kind.to_string(),
+ label: label.to_string(),
+ path,
+ is_default,
+ }
+}
+
+fn normalize_context_items(items: Vec) -> Vec {
+ let mut seen = BTreeSet::::new();
+ let mut normalized_items = Vec::new();
+
+ for item in items {
+ let dedupe_key = format!(
+ "{}::{}",
+ item.kind,
+ item.path.as_deref().unwrap_or(item.label.as_str())
+ );
+
+ if !seen.insert(dedupe_key) {
+ continue;
+ }
+
+ normalized_items.push(ChatContextItem {
+ id: if item.id.trim().is_empty() {
+ create_chat_entity_id("ctx")
+ } else {
+ item.id
+ },
+ kind: item.kind.trim().to_string(),
+ label: item.label.trim().to_string(),
+ path: item.path.and_then(|value| {
+ let trimmed_value = value.trim().replace('\\', "/");
+ (!trimmed_value.is_empty()).then_some(trimmed_value)
+ }),
+ is_default: item.is_default,
+ });
+ }
+
+ normalized_items
+}
+
+fn summarize_session(snapshot: &ChatSessionSnapshot) -> ChatSessionSummary {
+ ChatSessionSummary {
+ id: snapshot.id.clone(),
+ title: snapshot.title.clone(),
+ created_at: snapshot.created_at.clone(),
+ updated_at: snapshot.updated_at.clone(),
+ status: snapshot.status.clone(),
+ last_message_preview: snapshot.last_message_preview.clone(),
+ selected_model: snapshot.selected_model.clone(),
+ selected_reasoning: snapshot.selected_reasoning.clone(),
+ autonomy_mode: snapshot.autonomy_mode.clone(),
+ }
+}
+
+fn upsert_chat_session_summary(
+ index: &mut ChatSessionIndexPayload,
+ summary: ChatSessionSummary,
+) {
+ if let Some(existing_summary) = index.sessions.iter_mut().find(|entry| entry.id == summary.id) {
+ *existing_summary = summary;
+ } else {
+ index.sessions.push(summary);
+ }
+
+ index.sessions.sort_by(|left, right| right.updated_at.cmp(&left.updated_at));
+}
+
+fn write_chat_session_index(
+ workspace_root: &Path,
+ index: &ChatSessionIndexPayload,
+) -> Result<(), String> {
+ ensure_session_directory(workspace_root)?;
+ let encoded = serde_json::to_string_pretty(index)
+ .map_err(|error| format!("Unable to encode the chat session index: {error}"))?;
+ fs::write(session_index_path(workspace_root), encoded.as_bytes()).map_err(|error| {
+ format!(
+ "Unable to write the chat session index {}: {error}",
+ session_index_path(workspace_root).display()
+ )
+ })
+}
+
+fn read_chat_session_snapshot(
+ workspace_root: &Path,
+ session_id: &str,
+) -> Result {
+ let session_path = session_snapshot_path(workspace_root, session_id);
+ let raw_value = fs::read_to_string(&session_path).map_err(|error| {
+ format!(
+ "Unable to read the chat session {}: {error}",
+ session_path.display()
+ )
+ })?;
+
+ serde_json::from_str::(&raw_value).map_err(|error| {
+ format!(
+ "Unable to parse the chat session {}: {error}",
+ session_path.display()
+ )
+ })
+}
+
+fn write_chat_session_snapshot(
+ workspace_root: &Path,
+ snapshot: &ChatSessionSnapshot,
+) -> Result<(), String> {
+ ensure_session_directory(workspace_root)?;
+ let encoded = serde_json::to_string_pretty(snapshot)
+ .map_err(|error| format!("Unable to encode the chat session {}: {error}", snapshot.id))?;
+ fs::write(session_snapshot_path(workspace_root, &snapshot.id), encoded.as_bytes()).map_err(
+ |error| {
+ format!(
+ "Unable to write the chat session {}: {error}",
+ session_snapshot_path(workspace_root, &snapshot.id).display()
+ )
+ },
+ )
+}
+
+fn ensure_session_directory(workspace_root: &Path) -> Result<(), String> {
+ let sessions_path = sessions_directory_path(workspace_root);
+ fs::create_dir_all(&sessions_path).map_err(|error| {
+ format!(
+ "Unable to create the chat session directory {}: {error}",
+ sessions_path.display()
+ )
+ })
+}
+
+fn sessions_directory_path(workspace_root: &Path) -> PathBuf {
+ workspace_root.join(SESSION_DIRECTORY_RELATIVE_PATH)
+}
+
+fn session_index_path(workspace_root: &Path) -> PathBuf {
+ sessions_directory_path(workspace_root).join(SESSION_INDEX_FILE_NAME)
+}
+
+fn session_snapshot_path(workspace_root: &Path, session_id: &str) -> PathBuf {
+ sessions_directory_path(workspace_root).join(format!("{session_id}.json"))
+}
+
+fn build_workspace_summary(workspace: &WorkspaceContext) -> String {
+ let mut paths = workspace.files.keys().cloned().collect::>();
+ paths.sort();
+
+ if paths.is_empty() {
+ return String::from("No workspace files were discovered for this project.");
+ }
+
+ let mut summary = String::from("Workspace files:\n");
+
+ for path in paths.iter().take(180) {
+ summary.push_str("- ");
+ summary.push_str(path);
+ summary.push('\n');
+ }
+
+ if paths.len() > 180 {
+ summary.push_str(&format!(
+ "- ... and {} more files not shown in this summary.\n",
+ paths.len() - 180
+ ));
+ }
+
+ summary
+}
+
+fn normalized_title(title: Option<&str>) -> Option {
+ title
+ .map(str::trim)
+ .filter(|value| !value.is_empty())
+ .map(|value| value.replace('\n', " "))
+}
+
+fn normalize_autonomy_mode(value: &str) -> String {
+ match value.trim() {
+ "stepped" => String::from("stepped"),
+ "god_mode" => String::from("god_mode"),
+ _ => String::from("milestone"),
+ }
+}
+
+fn build_message_preview(value: &str) -> String {
+ let collapsed = value.split_whitespace().collect::>().join(" ");
+ let mut preview = collapsed.chars().take(120).collect::();
+
+ if collapsed.chars().count() > 120 {
+ preview.push('…');
+ }
+
+ preview
+}
+
+fn create_chat_entity_id(prefix: &str) -> String {
+ let millis = SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .map(|duration| duration.as_millis())
+ .unwrap_or_default();
+ let counter = SESSION_COUNTER.fetch_add(1, Ordering::Relaxed);
+ format!("{prefix}-{millis:x}-{counter:x}")
+}
+
+fn is_caveman_installed() -> bool {
+ caveman_install_paths().iter().any(|path| path.exists())
+}
+
+fn caveman_install_paths() -> Vec {
+ let Some(home_directory) = home_directory() else {
+ return Vec::new();
+ };
+
+ vec![
+ home_directory.join(".codex").join("skills").join("caveman"),
+ home_directory.join(".claude").join("skills").join("caveman"),
+ home_directory.join(".opencode").join("skill").join("caveman"),
+ home_directory.join(".opencode").join("skills").join("caveman"),
+ ]
+}
+
+fn install_caveman_skill() -> Result<(), String> {
+ let npm_binary = resolve_npx_binary()?;
+ let candidates = [
+ vec!["skills", "add", CAVEMAN_REPO, "-y"],
+ vec!["add-skill", CAVEMAN_REPO, "-g", "-a", "codex", "-a", "claude-code", "-y"],
+ ];
+
+ let mut last_error = None;
+
+ for arguments in candidates {
+ let output = Command::new(&npm_binary)
+ .args(&arguments)
+ .current_dir(resolve_override_path("."))
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output();
+
+ match output {
+ Ok(output) if output.status.success() => return Ok(()),
+ Ok(output) => {
+ last_error = Some(format_process_failure("npx", &output));
+ }
+ Err(error) => {
+ last_error = Some(format!("Unable to run npx for Caveman installation: {error}"));
+ }
+ }
+ }
+
+ Err(last_error.unwrap_or_else(|| {
+ String::from("Unable to install Caveman because the skills CLI returned no output.")
+ }))
+}
+
+fn resolve_npx_binary() -> Result {
+ which::which("npx")
+ .or_else(|_| which::which("npx.cmd"))
+ .map_err(|_| String::from("npx was not found on PATH, so Caveman cannot be installed."))
+}
+
+fn home_directory() -> Option {
+ std::env::var_os("USERPROFILE")
+ .map(PathBuf::from)
+ .or_else(|| std::env::var_os("HOME").map(PathBuf::from))
+}
+
+#[derive(Clone, Copy)]
+enum ChatExecutionPhase {
+ Proposal,
+ Write,
+}
+
+impl ChatExecutionPhase {
+ fn copy(self) -> Self {
+ self
+ }
+
+ fn label(self) -> &'static str {
+ match self {
+ Self::Proposal => "proposal",
+ Self::Write => "write",
+ }
+ }
+
+ fn milestone(self) -> &'static str {
+ match self {
+ Self::Proposal => "Proposal Pass",
+ Self::Write => "Execution Pass",
+ }
+ }
+
+ fn completed_milestone(self) -> &'static str {
+ match self {
+ Self::Proposal => "Proposal Complete",
+ Self::Write => "Execution Complete",
+ }
+ }
+
+ fn summary(self) -> &'static str {
+ match self {
+ Self::Proposal => {
+ "Running a read-only pass to propose the patch or command plan before approval."
+ }
+ Self::Write => "Running the selected CLI against the project workspace.",
+ }
+ }
+
+ fn completed_summary(self) -> &'static str {
+ match self {
+ Self::Proposal => "Proposal phase completed. Review the suggested plan before continuing.",
+ Self::Write => "Provider turn completed. Refresh the diff and transcript before continuing.",
+ }
+ }
+
+ fn line(self) -> &'static str {
+ match self {
+ Self::Proposal => {
+ "Launching the proposal pass with read-only permissions and the attached project context."
+ }
+ Self::Write => "Launching the write pass with the configured autonomy permissions.",
+ }
+ }
+
+ fn instructions(self) -> &'static str {
+ match self {
+ Self::Proposal => {
+ "Proposal-only pass. Do not mutate files or run write commands. Produce the clearest patch or command plan you would execute after approval."
+ }
+ Self::Write => {
+ "Write-enabled pass. You may edit files and run commands that fit the current autonomy mode. Summarize what changed and call out any blockers."
+ }
+ }
+ }
+
+ fn codex_sandbox(self) -> &'static str {
+ match self {
+ Self::Proposal => "read-only",
+ Self::Write => "workspace-write",
+ }
+ }
+
+ fn claude_permission_mode(self) -> &'static str {
+ match self {
+ Self::Proposal => "default",
+ Self::Write => "acceptEdits",
+ }
+ }
+}
+
+enum ChatStopState {
+ Continue,
+ StopRequested,
+ Replaced,
+}
+
+enum ApprovalOutcome {
+ Approved,
+ StopRequested,
+ Replaced,
+}
diff --git a/src-tauri/src/generation.rs b/src-tauri/src/generation.rs
index 2b6e7e1..28ae78c 100644
--- a/src-tauri/src/generation.rs
+++ b/src-tauri/src/generation.rs
@@ -6,9 +6,24 @@ use std::io::Write;
use std::path::PathBuf;
use std::process::{Command, Stdio};
use std::time::{SystemTime, UNIX_EPOCH};
+use tauri::async_runtime;
+
+struct DocumentGenerationRequest {
+ workspace_root: String,
+ output_path: String,
+ prompt_template: String,
+ user_prompt: String,
+ attachments: Vec<(String, String)>,
+ provider: String,
+ model: String,
+ reasoning: String,
+ claude_path: Option,
+ codex_path: Option,
+ field_name: &'static str,
+}
#[tauri::command]
-pub(crate) fn generate_prd_document(
+pub(crate) async fn generate_prd_document(
workspace_root: String,
output_path: String,
prompt_template: String,
@@ -27,26 +42,24 @@ pub(crate) fn generate_prd_document(
));
}
- let prompt_payload = build_generation_prompt(&prompt_template, trimmed_prompt, &[]);
- let generated_prd = run_generation_request(
- &provider,
- &model,
- &reasoning,
- claude_path.as_deref(),
- codex_path.as_deref(),
- &prompt_payload,
- )?;
-
- write_generated_workspace_document(
- &workspace_root,
- &output_path,
- generated_prd,
- "PRD output path",
- )
+ run_workspace_document_generation(DocumentGenerationRequest {
+ workspace_root,
+ output_path,
+ prompt_template,
+ user_prompt: trimmed_prompt.to_string(),
+ attachments: Vec::new(),
+ provider,
+ model,
+ reasoning,
+ claude_path,
+ codex_path,
+ field_name: "PRD output path",
+ })
+ .await
}
#[tauri::command]
-pub(crate) fn generate_spec_document(
+pub(crate) async fn generate_spec_document(
workspace_root: String,
output_path: String,
prd_content: String,
@@ -73,25 +86,57 @@ pub(crate) fn generate_spec_document(
));
}
- let prompt_payload = build_generation_prompt(
- &prompt_template,
- trimmed_prompt,
- &[("Attached Product Requirements Document (PRD)", trimmed_prd)],
- );
- let generated_spec = run_generation_request(
- &provider,
- &model,
- &reasoning,
- claude_path.as_deref(),
- codex_path.as_deref(),
+ run_workspace_document_generation(DocumentGenerationRequest {
+ workspace_root,
+ output_path,
+ prompt_template,
+ user_prompt: trimmed_prompt.to_string(),
+ attachments: vec![(
+ String::from("Attached Product Requirements Document (PRD)"),
+ trimmed_prd.to_string(),
+ )],
+ provider,
+ model,
+ reasoning,
+ claude_path,
+ codex_path,
+ field_name: "SPEC output path",
+ })
+ .await
+}
+
+async fn run_workspace_document_generation(
+ request: DocumentGenerationRequest,
+) -> Result {
+ async_runtime::spawn_blocking(move || run_workspace_document_generation_blocking(request))
+ .await
+ .map_err(|error| format!("Document generation task failed: {error}"))?
+}
+
+fn run_workspace_document_generation_blocking(
+ request: DocumentGenerationRequest,
+) -> Result {
+ let attachments = request
+ .attachments
+ .iter()
+ .map(|(label, content)| (label.as_str(), content.as_str()))
+ .collect::>();
+ let prompt_payload =
+ build_generation_prompt(&request.prompt_template, &request.user_prompt, &attachments);
+ let generated_document = run_generation_request(
+ &request.provider,
+ &request.model,
+ &request.reasoning,
+ request.claude_path.as_deref(),
+ request.codex_path.as_deref(),
&prompt_payload,
)?;
write_generated_workspace_document(
- &workspace_root,
- &output_path,
- generated_spec,
- "SPEC output path",
+ &request.workspace_root,
+ &request.output_path,
+ generated_document,
+ request.field_name,
)
}
diff --git a/src-tauri/src/git.rs b/src-tauri/src/git.rs
index 52ba0ae..c3d8f6a 100644
--- a/src-tauri/src/git.rs
+++ b/src-tauri/src/git.rs
@@ -1,10 +1,23 @@
use git2::{DiffFormat, DiffOptions, Repository};
-use crate::{constants::SAMPLE_DIFF, paths::project_root};
+use crate::{constants::SAMPLE_DIFF, paths::project_root, state::SharedState};
+use std::path::Path;
+use tauri::State;
#[tauri::command]
-pub(crate) fn git_get_diff() -> Result {
- let repository = Repository::discover(project_root())
+pub(crate) fn git_get_diff(state: State) -> Result {
+ let workspace_root = state
+ .workspace
+ .lock()
+ .map_err(|_| String::from("Workspace lock was poisoned."))?
+ .as_ref()
+ .map(|workspace| workspace.root.clone())
+ .unwrap_or_else(project_root);
+ git_get_diff_for_root(&workspace_root)
+}
+
+pub(crate) fn git_get_diff_for_root(root: &Path) -> Result {
+ let repository = Repository::discover(root)
.map_err(|error| format!("Unable to discover git repository: {error}"))?;
let head_tree = repository
.head()
diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs
index 9b229ca..2aa638e 100644
--- a/src-tauri/src/lib.rs
+++ b/src-tauri/src/lib.rs
@@ -1,4 +1,5 @@
mod agent;
+mod chat;
mod constants;
mod documents;
mod environment;
@@ -11,6 +12,11 @@ mod state;
mod workspace;
use agent::{approve_action, kill_agent_process, spawn_cli_agent};
+use chat::{
+ approve_chat_session, create_chat_session, delete_chat_session, ensure_caveman_skill,
+ load_chat_session, rename_chat_session, save_chat_session, send_chat_message,
+ stop_chat_session,
+};
use documents::{parse_document, pick_document};
use environment::run_environment_scan;
use generation::{generate_prd_document, generate_spec_document};
@@ -37,7 +43,16 @@ pub fn run() {
generate_spec_document,
spawn_cli_agent,
approve_action,
- kill_agent_process
+ kill_agent_process,
+ create_chat_session,
+ load_chat_session,
+ save_chat_session,
+ rename_chat_session,
+ delete_chat_session,
+ send_chat_message,
+ approve_chat_session,
+ stop_chat_session,
+ ensure_caveman_skill
])
.run(tauri::generate_context!())
.expect("error while running tauri application");
diff --git a/src-tauri/src/models.rs b/src-tauri/src/models.rs
index 7a01f16..2c4ec03 100644
--- a/src-tauri/src/models.rs
+++ b/src-tauri/src/models.rs
@@ -59,6 +59,8 @@ pub(crate) struct ProjectContextPayload {
pub(crate) ignored_file_count: usize,
pub(crate) prd_document: Option,
pub(crate) spec_document: Option,
+ pub(crate) chat_sessions: Vec,
+ pub(crate) last_active_session_id: Option,
}
#[derive(Clone, Serialize)]
@@ -80,6 +82,113 @@ pub(crate) struct AgentStateEvent {
pub(crate) summary: Option,
}
+#[derive(Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct ChatContextItem {
+ pub(crate) id: String,
+ pub(crate) kind: String,
+ pub(crate) label: String,
+ pub(crate) path: Option,
+ pub(crate) is_default: bool,
+}
+
+#[derive(Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct ChatMessage {
+ pub(crate) id: String,
+ pub(crate) role: String,
+ pub(crate) content: String,
+ pub(crate) created_at: String,
+}
+
+#[derive(Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct ChatRuntimeState {
+ pub(crate) status: String,
+ pub(crate) terminal_output: Vec,
+ pub(crate) current_milestone: Option,
+ pub(crate) pending_diff: Option,
+ pub(crate) execution_summary: Option,
+ pub(crate) awaiting_approval: bool,
+ pub(crate) last_error: Option,
+ pub(crate) is_busy: bool,
+ pub(crate) pending_request: Option,
+}
+
+impl Default for ChatRuntimeState {
+ fn default() -> Self {
+ Self {
+ status: String::from("idle"),
+ terminal_output: Vec::new(),
+ current_milestone: None,
+ pending_diff: None,
+ execution_summary: None,
+ awaiting_approval: false,
+ last_error: None,
+ is_busy: false,
+ pending_request: None,
+ }
+ }
+}
+
+#[derive(Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct ChatSessionSummary {
+ pub(crate) id: String,
+ pub(crate) title: String,
+ pub(crate) created_at: String,
+ pub(crate) updated_at: String,
+ pub(crate) status: String,
+ pub(crate) last_message_preview: String,
+ pub(crate) selected_model: String,
+ pub(crate) selected_reasoning: String,
+ pub(crate) autonomy_mode: String,
+}
+
+#[derive(Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct ChatSessionSnapshot {
+ pub(crate) id: String,
+ pub(crate) title: String,
+ pub(crate) created_at: String,
+ pub(crate) updated_at: String,
+ pub(crate) status: String,
+ pub(crate) last_message_preview: String,
+ pub(crate) selected_model: String,
+ pub(crate) selected_reasoning: String,
+ pub(crate) autonomy_mode: String,
+ pub(crate) context_items: Vec,
+ pub(crate) messages: Vec,
+ pub(crate) runtime: ChatRuntimeState,
+}
+
+#[derive(Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct ChatSessionIndexPayload {
+ pub(crate) sessions: Vec,
+ pub(crate) last_active_session_id: Option,
+}
+
+#[derive(Clone, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct CavemanStatusPayload {
+ pub(crate) ready: bool,
+ pub(crate) detail: String,
+}
+
+#[derive(Clone, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct ChatEventPayload {
+ pub(crate) session_id: String,
+ pub(crate) event_type: String,
+ pub(crate) message: Option,
+ pub(crate) message_delta: Option,
+ pub(crate) terminal_line: Option,
+ pub(crate) session: Option,
+ pub(crate) runtime: Option,
+ pub(crate) summary: Option,
+}
+
#[derive(Clone)]
pub(crate) struct SimulatedStep {
pub(crate) delay_ms: u64,
diff --git a/src-tauri/src/project.rs b/src-tauri/src/project.rs
index c8cedf8..ffe1d40 100644
--- a/src-tauri/src/project.rs
+++ b/src-tauri/src/project.rs
@@ -1,4 +1,5 @@
use crate::{
+ chat::load_chat_session_index,
constants::{
DEFAULT_PRD_PROMPT, DEFAULT_PROJECT_PRD_PATH, DEFAULT_PROJECT_SPEC_PATH,
DEFAULT_SPEC_PROMPT, SPECFORGE_SETTINGS_RELATIVE_PATH,
@@ -98,9 +99,10 @@ pub(crate) fn load_project_context_from_folder(
result.spec_document.as_ref(),
);
let (settings, has_saved_settings) =
- read_project_settings(&settings_path, &context.root, default_settings)?;
+ load_project_settings_from_workspace_root(&context.root, default_settings)?;
let prd_document = load_configured_workspace_document(&context.root, &settings.prd_path)?;
let spec_document = load_configured_workspace_document(&context.root, &settings.spec_path)?;
+ let chat_index = load_chat_session_index(&context.root)?;
let mut active_workspace = state
.workspace
.lock()
@@ -120,6 +122,8 @@ pub(crate) fn load_project_context_from_folder(
ignored_file_count: result.ignored_file_count,
prd_document,
spec_document,
+ chat_sessions: chat_index.sessions,
+ last_active_session_id: chat_index.last_active_session_id,
})
}
@@ -253,6 +257,14 @@ fn read_project_settings(
))
}
+pub(crate) fn load_project_settings_from_workspace_root(
+ workspace_root: &Path,
+ defaults: ProjectSettings,
+) -> Result<(ProjectSettings, bool), String> {
+ let settings_path = workspace_root.join(SPECFORGE_SETTINGS_RELATIVE_PATH);
+ read_project_settings(&settings_path, workspace_root, defaults)
+}
+
fn normalize_project_model(value: &str, fallback: &str) -> String {
const VALID_MODELS: &[&str] = &[
"gpt-5.4",
diff --git a/src-tauri/src/state.rs b/src-tauri/src/state.rs
index e7d7a5e..460f089 100644
--- a/src-tauri/src/state.rs
+++ b/src-tauri/src/state.rs
@@ -8,6 +8,7 @@ use std::{
#[derive(Default)]
pub(crate) struct SharedState {
pub(crate) runtime: Arc,
+ pub(crate) chat_runtime: Arc,
pub(crate) workspace: Mutex>,
}
@@ -24,6 +25,20 @@ pub(crate) struct ExecutionControl {
pub(crate) stop_requested: bool,
}
+#[derive(Default)]
+pub(crate) struct ChatExecutionRuntime {
+ pub(crate) control: Mutex>,
+ pub(crate) signal: Condvar,
+}
+
+#[derive(Default, Clone)]
+pub(crate) struct ChatExecutionControl {
+ pub(crate) run_id: u64,
+ pub(crate) awaiting_approval: bool,
+ pub(crate) stop_requested: bool,
+}
+
+#[derive(Clone)]
pub(crate) struct WorkspaceContext {
pub(crate) root: PathBuf,
pub(crate) files: HashMap,
diff --git a/src/App.tsx b/src/App.tsx
index 11ccf3b..472a767 100644
--- a/src/App.tsx
+++ b/src/App.tsx
@@ -1,7 +1,6 @@
import {
startTransition,
useCallback,
- useDeferredValue,
useEffect,
useMemo,
useRef,
@@ -18,49 +17,50 @@ import {
import { AppRail } from "./components/AppRail";
import {
- FallbackStep,
- WorkspaceFileSource,
buildFallbackSteps,
clearFallbackTimer,
- filterWorkspaceEntries,
isOpenableWorkspacePath,
- resolveTheme,
runFallbackStep,
stampLog,
- type DocumentTarget
+ type DocumentTarget,
+ type FallbackStep,
+ type WorkspaceFileSource
} from "./lib/appShell";
import {
getModelLabel,
- getModelProvider,
getReasoningLabel
} from "./lib/agentConfig";
import {
- DEFAULT_PROJECT_PRD_PATH,
- DEFAULT_PROJECT_SPEC_PATH,
- SPECFORGE_SETTINGS_RELATIVE_PATH,
- formatSupportingDocumentPaths,
- getWorkspaceDisplayPath,
- normalizeProjectRelativePath,
- normalizeProjectSettings,
- parseSupportingDocumentPaths
-} from "./lib/projectConfig";
+ buildConfigPathDisplay,
+ buildWorkspaceNotice,
+ waitForNextPaint
+} from "./lib/appState";
import {
DEFAULT_PENDING_DIFF,
+ approveChatSession,
approveAgentAction,
+ createChatSession,
+ deleteChatSession,
emergencyStop,
+ ensureCavemanSkill,
generatePrdDocument,
generateSpecDocument,
getGitDiff,
getWorkspaceSnapshot,
isTauriRuntime,
+ loadChatSession,
loadProjectContext,
pickDocument,
pickProjectFolder,
readWorkspaceFile,
+ renameChatSession,
runEnvironmentScan,
saveProjectSettings,
+ saveChatSession,
+ sendChatMessage,
startAgentRun,
- subscribeToAgentEvents
+ stopChatSession,
+ subscribeToChatSessionEvents
} from "./lib/runtime";
import {
isOpenableTextFile,
@@ -68,88 +68,69 @@ import {
parseWorkspaceTextFile,
type ImportableFile
} from "./lib/workspaceImport";
+import {
+ useAgentEventSubscription,
+ useDocumentTheme,
+ useInitialDiagnostics,
+ useProjectRestore,
+ useSystemThemePreference,
+ useWorkspaceSearchFocus,
+ useWorkspaceSearchRouteReset,
+ useWorkspaceSearchShortcuts
+} from "./hooks/useAppLifecycle";
+import {
+ useAgentStoreSlice,
+ useChatStoreSlice,
+ useProjectStoreSlice,
+ useSettingsStoreSlice
+} from "./hooks/useAppStoreSlices";
+import {
+ useAppDerivedState,
+ useAppScreenProps,
+ useAppUiHandlers,
+ useProjectSettingsHandlers
+} from "./hooks/useAppView";
import { ConfigurationScreen } from "./screens/ConfigurationScreen";
+import { ChatScreen } from "./screens/ChatScreen";
import { PrdScreen } from "./screens/PrdScreen";
import { SettingsScreen } from "./screens/SettingsScreen";
import { useAgentStore } from "./store/useAgentStore";
-import { useProjectStore } from "./store/useProjectStore";
-import { useSettingsStore } from "./store/useSettingsStore";
+import { useChatStore } from "./store/useChatStore";
import type {
+ ChatContextItem,
+ ChatSession,
EnvironmentStatus,
- ModelProvider,
ProjectContext
} from "./types";
function App() {
const location = useLocation();
const navigate = useNavigate();
+ const isChatRoute = location.pathname === "/chat";
const isReviewRoute = location.pathname === "/review";
const desktopRuntime = isTauriRuntime();
- const agentStatus = useAgentStore((state) => state.status);
- const terminalOutput = useAgentStore((state) => state.terminalOutput);
- const pendingDiff = useAgentStore((state) => state.pendingDiff);
- const executionSummary = useAgentStore((state) => state.executionSummary);
- const resetRun = useAgentStore((state) => state.resetRun);
- const appendTerminalOutput = useAgentStore((state) => state.appendTerminalOutput);
- const setAgentStatus = useAgentStore((state) => state.setStatus);
- const setCurrentMilestone = useAgentStore((state) => state.setCurrentMilestone);
- const setPendingDiff = useAgentStore((state) => state.setPendingDiff);
- const setExecutionSummary = useAgentStore((state) => state.setExecutionSummary);
- const applyAgentEvent = useAgentStore((state) => state.applyEvent);
-
- const annotations = useProjectStore((state) => state.annotations);
- const activeTab = useProjectStore((state) => state.activeTab);
- const autonomyMode = useProjectStore((state) => state.autonomyMode);
- const configuredPrdPath = useProjectStore((state) => state.configuredPrdPath);
- const configuredSpecPath = useProjectStore((state) => state.configuredSpecPath);
- const isSpecApproved = useProjectStore((state) => state.isSpecApproved);
- const openEditorTabs = useProjectStore((state) => state.openEditorTabs);
- const prdContent = useProjectStore((state) => state.prdContent);
- const prdPaneMode = useProjectStore((state) => state.prdPaneMode);
- const prdPath = useProjectStore((state) => state.prdPath);
- const prdPromptTemplate = useProjectStore((state) => state.prdPromptTemplate);
- const selectedModel = useProjectStore((state) => state.selectedModel);
- const selectedReasoning = useProjectStore((state) => state.selectedReasoning);
- const selectedSpecRange = useProjectStore((state) => state.selectedSpecRange);
- const specContent = useProjectStore((state) => state.specContent);
- const specPaneMode = useProjectStore((state) => state.specPaneMode);
- const specPath = useProjectStore((state) => state.specPath);
- const specPromptTemplate = useProjectStore((state) => state.specPromptTemplate);
- const supportingDocumentPaths = useProjectStore((state) => state.supportingDocumentPaths);
- const approveSpec = useProjectStore((state) => state.approveSpec);
- const closeEditorTab = useProjectStore((state) => state.closeEditorTab);
- const openEditorTab = useProjectStore((state) => state.openEditorTab);
- const resetWorkspaceContext = useProjectStore((state) => state.resetWorkspaceContext);
- const setActiveTab = useProjectStore((state) => state.setActiveTab);
- const setAutonomyMode = useProjectStore((state) => state.setAutonomyMode);
- const setConfiguredPrdPath = useProjectStore((state) => state.setConfiguredPrdPath);
- const setConfiguredSpecPath = useProjectStore((state) => state.setConfiguredSpecPath);
- const setPrdContent = useProjectStore((state) => state.setPrdContent);
- const setPrdPaneMode = useProjectStore((state) => state.setPrdPaneMode);
- const setPrdPromptTemplate = useProjectStore((state) => state.setPrdPromptTemplate);
- const setProjectSettings = useProjectStore((state) => state.setProjectSettings);
- const setReasoningProfile = useProjectStore((state) => state.setReasoningProfile);
- const setSelectedModel = useProjectStore((state) => state.setSelectedModel);
- const setSelectedSpecRange = useProjectStore((state) => state.setSelectedSpecRange);
- const setSpecContent = useProjectStore((state) => state.setSpecContent);
- const setSpecPaneMode = useProjectStore((state) => state.setSpecPaneMode);
- const setSpecPromptTemplate = useProjectStore((state) => state.setSpecPromptTemplate);
- const setSupportingDocumentPaths = useProjectStore((state) => state.setSupportingDocumentPaths);
- const updateEditorTabContent = useProjectStore((state) => state.updateEditorTabContent);
-
- const claudePath = useSettingsStore((state) => state.claudePath);
- const codexPath = useSettingsStore((state) => state.codexPath);
- const environment = useSettingsStore((state) => state.environment);
- const lastProjectPath = useSettingsStore((state) => state.lastProjectPath);
- const theme = useSettingsStore((state) => state.theme);
- const workspaceEntries = useSettingsStore((state) => state.workspaceEntries);
- const setClaudePath = useSettingsStore((state) => state.setClaudePath);
- const setCodexPath = useSettingsStore((state) => state.setCodexPath);
- const setEnvironment = useSettingsStore((state) => state.setEnvironment);
- const setLastProjectPath = useSettingsStore((state) => state.setLastProjectPath);
- const setTheme = useSettingsStore((state) => state.setTheme);
- const setWorkspaceEntries = useSettingsStore((state) => state.setWorkspaceEntries);
+ const agentState = useAgentStoreSlice();
+ const chatState = useChatStoreSlice();
+ const projectState = useProjectStoreSlice();
+ const settingsState = useSettingsStoreSlice();
+ const {
+ sessions: chatSessions,
+ activeSessionId,
+ loadedSessions,
+ drafts: chatDrafts,
+ cavemanReady,
+ cavemanMessage,
+ cavemanChecking,
+ setSessions: setChatSessions,
+ setActiveSessionId,
+ upsertSession,
+ setDraft: setChatDraft,
+ setContextItems: setChatContextItems,
+ setSessionConfig,
+ deleteSession: deleteChatSessionState,
+ setCavemanStatus
+ } = chatState;
const [commandSearch, setCommandSearch] = useState("");
const [isImporting, setIsImporting] = useState(false);
@@ -185,231 +166,66 @@ function App() {
const hasScannedEnvironmentRef = useRef(false);
const projectSaveTimerRef = useRef(null);
const pendingProjectReloadRef = useRef(false);
- const deferredSearch = useDeferredValue(commandSearch);
-
- const filteredWorkspaceEntries = useMemo(
- () => filterWorkspaceEntries(workspaceEntries, deferredSearch),
- [deferredSearch, workspaceEntries]
- );
- const selectedModelProvider = useMemo(
- () => getModelProvider(selectedModel),
- [selectedModel]
- );
- const isGeneratingPrd = agentStatus === "generating_prd";
- const isGeneratingSpec = agentStatus === "generating_spec";
- const visibleDiff = pendingDiff ?? latestDiff;
- const resolvedTheme = useMemo(
- () => resolveTheme(theme, systemPrefersDark),
- [theme, systemPrefersDark]
- );
- const configuredModelProviders = useMemo(() => {
- const providers: ModelProvider[] = [];
-
- if (environment.claude.status === "found") {
- providers.push("claude");
- }
-
- if (environment.codex.status === "found") {
- providers.push("codex");
- }
-
- return providers;
- }, [environment.claude.status, environment.codex.status]);
- const mcpItems = useMemo(
- () => [
- { name: environment.codex.name, detail: environment.codex.detail, status: environment.codex.status },
- { name: environment.claude.name, detail: environment.claude.detail, status: environment.claude.status },
- { name: environment.git.name, detail: environment.git.detail, status: environment.git.status }
- ],
- [environment]
- );
- const selectedProviderStatus =
- selectedModelProvider === "claude" ? environment.claude : environment.codex;
- const currentProjectSettings = useMemo(
- () =>
- normalizeProjectSettings({
- selectedModel,
- selectedReasoning,
- prdPrompt: prdPromptTemplate,
- specPrompt: specPromptTemplate,
- prdPath: configuredPrdPath || DEFAULT_PROJECT_PRD_PATH,
- specPath: configuredSpecPath || DEFAULT_PROJECT_SPEC_PATH,
- supportingDocumentPaths
- }),
- [
- configuredPrdPath,
- configuredSpecPath,
- prdPromptTemplate,
- selectedModel,
- selectedReasoning,
- specPromptTemplate,
- supportingDocumentPaths
- ]
- );
- const configPathDisplay = useMemo(() => {
- if (projectConfigPath.trim()) {
- return getWorkspaceDisplayPath(projectConfigPath, projectRootName);
- }
-
- return SPECFORGE_SETTINGS_RELATIVE_PATH;
- }, [projectConfigPath, projectRootName]);
- const supportingDocumentsValue = useMemo(
- () => formatSupportingDocumentPaths(supportingDocumentPaths),
- [supportingDocumentPaths]
- );
- const canGeneratePrd = useMemo(
- () =>
- desktopRuntime &&
- !isGeneratingPrd &&
- projectRootPath.trim().length > 0 &&
- configuredPrdPath.trim().length > 0 &&
- prdGenerationPrompt.trim().length > 0,
- [
- configuredPrdPath,
- desktopRuntime,
- isGeneratingPrd,
- prdGenerationPrompt,
- projectRootPath
- ]
- );
- const canGenerateSpec = useMemo(
- () =>
- desktopRuntime &&
- !isGeneratingSpec &&
- projectRootPath.trim().length > 0 &&
- prdContent.trim().length > 0 &&
- configuredSpecPath.trim().length > 0 &&
- specGenerationPrompt.trim().length > 0,
- [
- configuredSpecPath,
- desktopRuntime,
- isGeneratingSpec,
- prdContent,
- projectRootPath,
- specGenerationPrompt
- ]
+ const activeChatSession = useMemo(
+ () => (activeSessionId ? loadedSessions[activeSessionId] ?? null : null),
+ [activeSessionId, loadedSessions]
);
- const prdGenerationHelperText = useMemo(() => {
- if (!desktopRuntime) {
- return "AI PRD generation requires the desktop runtime.";
- }
-
- if (!projectRootPath.trim()) {
- return "Choose a project folder in setup before generating a PRD.";
- }
-
- if (!configuredPrdPath.trim()) {
- return "Configure a PRD path in setup or settings first.";
- }
-
- if (!configuredPrdPath.toLowerCase().endsWith(".md")) {
- return "Configure the PRD path as a Markdown file if you want generated output saved into the workspace.";
- }
-
- if (!prdGenerationPrompt.trim()) {
- return "Add the product context you want to append after the saved PRD prompt.";
- }
-
- if (selectedProviderStatus.status !== "found") {
- return `${selectedProviderStatus.name} is not currently marked ready. Update its path in Settings and refresh if generation fails.`;
- }
-
- return `This appends your note after the saved PRD prompt from ${configPathDisplay}, runs ${getModelLabel(selectedModel)}, and writes markdown to ${configuredPrdPath}.`;
- }, [
- configPathDisplay,
- configuredPrdPath,
+ const activeChatDraft = activeSessionId ? chatDrafts[activeSessionId] ?? "" : "";
+ const reviewVisibleDiff = activeChatSession
+ ? activeChatSession.runtime.pendingDiff ?? "No diff captured for the active chat topic yet."
+ : agentState.pendingDiff ?? latestDiff;
+
+ const derivedState = useAppDerivedState({
+ agentState,
+ commandSearch,
desktopRuntime,
+ latestDiff,
prdGenerationPrompt,
+ projectConfigPath,
+ projectRootName,
projectRootPath,
- selectedModel,
- selectedProviderStatus.name,
- selectedProviderStatus.status
- ]);
- const specGenerationHelperText = useMemo(() => {
- if (!desktopRuntime) {
- return "AI spec generation requires the desktop runtime.";
- }
-
- if (!projectRootPath.trim()) {
- return "Choose a project folder in setup before generating a spec.";
- }
-
- if (!prdContent.trim()) {
- return "Load or generate a PRD first. The spec generator appends your note after the saved spec prompt and includes the current PRD content.";
- }
-
- if (!configuredSpecPath.trim()) {
- return "Configure a spec path in setup or settings first.";
- }
-
- if (!configuredSpecPath.toLowerCase().endsWith(".md")) {
- return "Configure the spec path as a Markdown file if you want generated output saved into the workspace.";
- }
-
- if (!specGenerationPrompt.trim()) {
- return "Add the technical guidance you want to append after the saved spec prompt.";
- }
-
- if (selectedProviderStatus.status !== "found") {
- return `${selectedProviderStatus.name} is not currently marked ready. Update its path in Settings and refresh if generation fails.`;
- }
-
- return `This appends your note after the saved spec prompt from ${configPathDisplay}, includes the current PRD content, and writes markdown to ${configuredSpecPath}.`;
- }, [
- configPathDisplay,
- configuredSpecPath,
- desktopRuntime,
- prdContent,
- projectRootPath,
- selectedProviderStatus.name,
- selectedProviderStatus.status,
- specGenerationPrompt
- ]);
+ projectState,
+ settingsState,
+ specGenerationPrompt,
+ systemPrefersDark
+ });
const refreshDiagnostics = useCallback(
async (previousEnvironment?: EnvironmentStatus) => {
const [nextEnvironment, snapshotEntries, diff] = await Promise.all([
runEnvironmentScan({
- claudePath,
- codexPath
- }).catch(() => previousEnvironment ?? environment),
+ claudePath: settingsState.claudePath,
+ codexPath: settingsState.codexPath
+ }).catch(() => previousEnvironment ?? settingsState.environment),
hasSelectedProject
- ? Promise.resolve(workspaceEntries)
- : getWorkspaceSnapshot().catch(() => workspaceEntries),
+ ? Promise.resolve(settingsState.workspaceEntries)
+ : getWorkspaceSnapshot().catch(() => settingsState.workspaceEntries),
getGitDiff().catch(() => DEFAULT_PENDING_DIFF)
]);
- setEnvironment(nextEnvironment);
+ settingsState.setEnvironment(nextEnvironment);
if (!hasSelectedProject) {
- setWorkspaceEntries(snapshotEntries);
+ settingsState.setWorkspaceEntries(snapshotEntries);
}
setLatestDiff(diff);
},
- [
- claudePath,
- codexPath,
- environment,
- hasSelectedProject,
- setEnvironment,
- setWorkspaceEntries,
- workspaceEntries
- ]
+ [hasSelectedProject, settingsState]
);
const assignDocument = useCallback(
(target: DocumentTarget, content: string, path: string) => {
startTransition(() => {
if (target === "prd") {
- setPrdContent(content, path);
- setPrdPaneMode("preview");
+ projectState.setPrdContent(content, path);
+ projectState.setPrdPaneMode("preview");
return;
}
- setSpecContent(content, path);
- setSpecPaneMode("preview");
+ projectState.setSpecContent(content, path);
+ projectState.setSpecPaneMode("preview");
});
if (target === "prd") {
@@ -421,11 +237,15 @@ function App() {
setSpecGenerationPrompt("");
setSpecGenerationError("");
},
- [setPrdContent, setPrdPaneMode, setSpecContent, setSpecPaneMode]
+ [projectState]
);
const applyProjectContext = useCallback(
- (context: ProjectContext, options?: { navigateToReview?: boolean }) => {
+ (context: ProjectContext, options?: { navigateToChat?: boolean }) => {
+ const settingsPathDisplay = buildConfigPathDisplay(
+ context.settingsPath,
+ context.rootName
+ );
const nextWorkspaceFiles = Object.fromEntries(
context.entries
.filter((entry) => entry.kind === "file")
@@ -438,59 +258,61 @@ function App() {
])
);
- resetWorkspaceContext();
+ projectState.resetWorkspaceContext();
setProjectRootName(context.rootName);
setProjectRootPath(context.rootPath);
setProjectConfigPath(context.settingsPath);
setHasSelectedProject(true);
setHasSavedProjectSettings(context.hasSavedSettings);
- setWorkspaceEntries(context.entries);
+ settingsState.setWorkspaceEntries(context.entries);
setWorkspaceFiles(nextWorkspaceFiles);
- setLastProjectPath(context.rootPath);
- setProjectSettings(context.settings);
+ settingsState.setLastProjectPath(context.rootPath);
+ projectState.setProjectSettings(context.settings);
setPrdGenerationPrompt("");
setPrdGenerationError("");
setSpecGenerationPrompt("");
setSpecGenerationError("");
+ setChatSessions(context.chatSessions);
+ setActiveSessionId(context.lastActiveSessionId ?? context.chatSessions[0]?.id ?? null);
+ setCavemanStatus({
+ ready: false,
+ message: "Caveman has not been verified for this project yet."
+ });
setProjectStatusMessage(
context.hasSavedSettings
- ? `Loaded project settings from ${context.rootName}/${getWorkspaceDisplayPath(context.settingsPath, context.rootName)}.`
- : `Selected ${context.rootName}. Save the setup to create ${context.rootName}/${getWorkspaceDisplayPath(context.settingsPath, context.rootName)}.`
+ ? `Loaded project settings from ${context.rootName}/${settingsPathDisplay}.`
+ : `Selected ${context.rootName}. Save the setup to create ${context.rootName}/${settingsPathDisplay}.`
);
setProjectErrorMessage("");
setWorkspaceNotice(buildWorkspaceNotice(context));
startTransition(() => {
- setPrdContent(context.prdDocument?.content ?? "", context.prdDocument?.sourcePath ?? context.settings.prdPath);
- setSpecContent(context.specDocument?.content ?? "", context.specDocument?.sourcePath ?? context.settings.specPath);
- setPrdPaneMode("preview");
- setSpecPaneMode("preview");
+ projectState.setPrdContent(
+ context.prdDocument?.content ?? "",
+ context.prdDocument?.sourcePath ?? context.settings.prdPath
+ );
+ projectState.setSpecContent(
+ context.specDocument?.content ?? "",
+ context.specDocument?.sourcePath ?? context.settings.specPath
+ );
+ projectState.setPrdPaneMode("preview");
+ projectState.setSpecPaneMode("preview");
});
- if (options?.navigateToReview) {
- navigate("/review");
+ if (options?.navigateToChat) {
+ navigate("/chat");
}
},
- [
- navigate,
- resetWorkspaceContext,
- setLastProjectPath,
- setPrdContent,
- setPrdPaneMode,
- setProjectSettings,
- setSpecContent,
- setSpecPaneMode,
- setWorkspaceEntries
- ]
+ [navigate, projectState, setActiveSessionId, setCavemanStatus, setChatSessions, settingsState]
);
const saveCurrentProjectSettings = useCallback(
async ({
reloadProject = false,
- navigateToReview = false
+ navigateToChat = false
}: {
reloadProject?: boolean;
- navigateToReview?: boolean;
+ navigateToChat?: boolean;
} = {}) => {
if (!desktopRuntime) {
setProjectErrorMessage("Project configuration requires the desktop runtime.");
@@ -509,20 +331,20 @@ function App() {
try {
const savedSettings = await saveProjectSettings({
folderPath: projectRootPath,
- settings: currentProjectSettings
+ settings: derivedState.currentProjectSettings
});
- setProjectSettings(savedSettings);
+ projectState.setProjectSettings(savedSettings);
setHasSavedProjectSettings(true);
setProjectStatusMessage(
projectRootName
- ? `Saved project settings to ${projectRootName}/${configPathDisplay}.`
- : `Saved project settings to ${configPathDisplay}.`
+ ? `Saved project settings to ${projectRootName}/${derivedState.configPathDisplay}.`
+ : `Saved project settings to ${derivedState.configPathDisplay}.`
);
- if (reloadProject || navigateToReview) {
+ if (reloadProject || navigateToChat) {
const reloadedContext = await loadProjectContext(projectRootPath);
- applyProjectContext(reloadedContext, { navigateToReview });
+ applyProjectContext(reloadedContext, { navigateToChat });
}
} catch (error) {
setProjectErrorMessage(
@@ -534,12 +356,12 @@ function App() {
},
[
applyProjectContext,
- configPathDisplay,
- currentProjectSettings,
+ derivedState.configPathDisplay,
+ derivedState.currentProjectSettings,
desktopRuntime,
projectRootName,
projectRootPath,
- setProjectSettings
+ projectState
]
);
@@ -565,6 +387,18 @@ function App() {
[desktopRuntime, hasSavedProjectSettings, projectRootPath, saveCurrentProjectSettings]
);
+ const projectSettingsHandlers = useProjectSettingsHandlers({
+ saveCurrentProjectSettings,
+ scheduleProjectSettingsSave,
+ setConfiguredPrdPath: projectState.setConfiguredPrdPath,
+ setConfiguredSpecPath: projectState.setConfiguredSpecPath,
+ setPrdPromptTemplate: projectState.setPrdPromptTemplate,
+ setReasoningProfile: projectState.setReasoningProfile,
+ setSelectedModel: projectState.setSelectedModel,
+ setSpecPromptTemplate: projectState.setSpecPromptTemplate,
+ setSupportingDocumentPaths: projectState.setSupportingDocumentPaths
+ });
+
const handlePickProjectFolder = useCallback(async () => {
if (!desktopRuntime) {
setProjectErrorMessage("Project configuration requires the desktop runtime.");
@@ -620,11 +454,6 @@ function App() {
[assignDocument]
);
- const handleWorkspaceFolderSelection = useCallback(
- (_event: ChangeEvent) => undefined,
- []
- );
-
const handleWorkspaceFileOpen = useCallback(
async (path: string) => {
const file = workspaceFiles[path];
@@ -641,7 +470,7 @@ function App() {
}
const document = await parseWorkspaceTextFile(file.file);
- openEditorTab({
+ projectState.openEditorTab({
title: file.file.name,
path,
content: document.content
@@ -656,7 +485,7 @@ function App() {
try {
const content = await readWorkspaceFile(path);
- openEditorTab({
+ projectState.openEditorTab({
title: file.fileName,
path,
content
@@ -669,7 +498,7 @@ function App() {
);
}
},
- [openEditorTab, workspaceFiles]
+ [projectState, workspaceFiles]
);
const handleOpenImportFile = useCallback(
@@ -707,37 +536,50 @@ function App() {
);
const handleApproveSpec = useCallback(() => {
- if (!specContent.trim()) {
+ if (!projectState.specContent.trim()) {
return;
}
- approveSpec();
- appendTerminalOutput(stampLog("review", "Specification approved. Build controls are now armed."));
- }, [appendTerminalOutput, approveSpec, specContent]);
+ projectState.approveSpec();
+ agentState.appendTerminalOutput(
+ stampLog("review", "Specification approved. The active chat topics can now work from this spec.")
+ );
+ }, [agentState, projectState]);
const handleStartBuild = useCallback(async () => {
- if (!isSpecApproved) {
+ if (!projectState.isSpecApproved) {
return;
}
- const modelLabel = getModelLabel(selectedModel);
- const reasoningLabel = getReasoningLabel(selectedModel, selectedReasoning);
+ const modelLabel = getModelLabel(projectState.selectedModel);
+ const reasoningLabel = getReasoningLabel(
+ projectState.selectedModel,
+ projectState.selectedReasoning
+ );
clearFallbackTimer(fallbackTimerRef);
- resetRun();
- setActiveTab("execute");
- setAgentStatus("executing");
- setCurrentMilestone("Pre-flight Check");
- appendTerminalOutput(
- stampLog("build", `Starting spec-driven build run with ${modelLabel} (${reasoningLabel} reasoning).`)
+ agentState.resetRun();
+ projectState.setActiveTab("execute");
+ agentState.setStatus("executing");
+ agentState.setCurrentMilestone("Pre-flight Check");
+ agentState.appendTerminalOutput(
+ stampLog(
+ "build",
+ `Starting spec-driven build run with ${modelLabel} (${reasoningLabel} reasoning).`
+ )
);
if (desktopRuntime) {
try {
- await startAgentRun(specContent, autonomyMode, selectedModel, selectedReasoning);
+ await startAgentRun(
+ projectState.specContent,
+ projectState.autonomyMode,
+ projectState.selectedModel,
+ projectState.selectedReasoning
+ );
return;
} catch (error) {
- appendTerminalOutput(
+ agentState.appendTerminalOutput(
stampLog(
"error",
`${error instanceof Error ? error.message : "Agent startup failed."} Falling back to the local simulator.`
@@ -746,36 +588,34 @@ function App() {
}
}
- fallbackStepsRef.current = buildFallbackSteps(autonomyMode, modelLabel, reasoningLabel);
+ fallbackStepsRef.current = buildFallbackSteps(
+ projectState.autonomyMode,
+ modelLabel,
+ reasoningLabel
+ );
fallbackIndexRef.current = 0;
- runFallbackStep(useAgentStore.getState(), fallbackStepsRef, fallbackIndexRef, fallbackTimerRef, setLatestDiff);
- }, [
- appendTerminalOutput,
- autonomyMode,
- desktopRuntime,
- isSpecApproved,
- resetRun,
- selectedModel,
- selectedReasoning,
- setActiveTab,
- setAgentStatus,
- setCurrentMilestone,
- specContent
- ]);
+ runFallbackStep(
+ useAgentStore.getState(),
+ fallbackStepsRef,
+ fallbackIndexRef,
+ fallbackTimerRef,
+ setLatestDiff
+ );
+ }, [agentState, desktopRuntime, projectState]);
const handleApproveExecutionGate = useCallback(async () => {
- if (agentStatus !== "awaiting_approval") {
+ if (agentState.status !== "awaiting_approval") {
return;
}
if (desktopRuntime) {
try {
await approveAgentAction();
- appendTerminalOutput(stampLog("gate", "Approval received. Resuming execution."));
- setPendingDiff(null);
- setAgentStatus("executing");
+ agentState.appendTerminalOutput(stampLog("gate", "Approval received. Resuming execution."));
+ agentState.setPendingDiff(null);
+ agentState.setStatus("executing");
} catch (error) {
- appendTerminalOutput(
+ agentState.appendTerminalOutput(
stampLog(
"error",
error instanceof Error ? error.message : "Unable to approve the current execution gate."
@@ -785,22 +625,30 @@ function App() {
return;
}
- appendTerminalOutput(stampLog("gate", "Approval received. Resuming execution."));
- setPendingDiff(null);
- setAgentStatus("executing");
- runFallbackStep(useAgentStore.getState(), fallbackStepsRef, fallbackIndexRef, fallbackTimerRef, setLatestDiff);
- }, [agentStatus, appendTerminalOutput, desktopRuntime, setAgentStatus, setPendingDiff]);
+ agentState.appendTerminalOutput(stampLog("gate", "Approval received. Resuming execution."));
+ agentState.setPendingDiff(null);
+ agentState.setStatus("executing");
+ runFallbackStep(
+ useAgentStore.getState(),
+ fallbackStepsRef,
+ fallbackIndexRef,
+ fallbackTimerRef,
+ setLatestDiff
+ );
+ }, [agentState, desktopRuntime]);
const handleEmergencyStop = useCallback(async () => {
if (desktopRuntime) {
try {
await emergencyStop();
- setAgentStatus("halted");
- setExecutionSummary("Execution stopped by the operator.");
- setPendingDiff(null);
- appendTerminalOutput(stampLog("halt", "Emergency stop triggered. Agent loop is paused."));
+ agentState.setStatus("halted");
+ agentState.setExecutionSummary("Execution stopped by the operator.");
+ agentState.setPendingDiff(null);
+ agentState.appendTerminalOutput(
+ stampLog("halt", "Emergency stop triggered. Agent loop is paused.")
+ );
} catch (error) {
- appendTerminalOutput(
+ agentState.appendTerminalOutput(
stampLog(
"error",
error instanceof Error ? error.message : "Unable to stop the current execution run."
@@ -811,67 +659,13 @@ function App() {
}
clearFallbackTimer(fallbackTimerRef);
- setAgentStatus("halted");
- setExecutionSummary("Execution stopped by the operator.");
- setPendingDiff(null);
- appendTerminalOutput(stampLog("halt", "Emergency stop triggered. Agent loop is paused."));
- }, [appendTerminalOutput, desktopRuntime, setAgentStatus, setExecutionSummary, setPendingDiff]);
-
- const handlePrdContentChange = useCallback(
- (value: string) => setPrdContent(value, prdPath),
- [prdPath, setPrdContent]
- );
-
- const handleSpecContentChange = useCallback(
- (value: string) => {
- if (value.trim()) {
- setSpecGenerationError("");
- }
-
- setSpecContent(value, specPath);
- },
- [setSpecContent, specPath]
- );
-
- const handleSpecSelect = useCallback(
- (event: ChangeEvent) => {
- const { selectionStart, selectionEnd, value } = event.target;
- setSelectedSpecRange(
- selectionStart === selectionEnd
- ? null
- : {
- start: selectionStart,
- end: selectionEnd,
- text: value.slice(selectionStart, selectionEnd)
- }
- );
- },
- [setSelectedSpecRange]
- );
-
- const handlePrdGenerationPromptChange = useCallback((value: string) => {
- setPrdGenerationPrompt(value);
-
- if (prdGenerationError) {
- setPrdGenerationError("");
- }
-
- if (agentStatus === "error") {
- setAgentStatus("idle");
- }
- }, [agentStatus, prdGenerationError, setAgentStatus]);
-
- const handleSpecGenerationPromptChange = useCallback((value: string) => {
- setSpecGenerationPrompt(value);
-
- if (specGenerationError) {
- setSpecGenerationError("");
- }
-
- if (agentStatus === "error") {
- setAgentStatus("idle");
- }
- }, [agentStatus, setAgentStatus, specGenerationError]);
+ agentState.setStatus("halted");
+ agentState.setExecutionSummary("Execution stopped by the operator.");
+ agentState.setPendingDiff(null);
+ agentState.appendTerminalOutput(
+ stampLog("halt", "Emergency stop triggered. Agent loop is paused.")
+ );
+ }, [agentState, desktopRuntime]);
const handleGeneratePrd = useCallback(async () => {
const trimmedPrompt = prdGenerationPrompt.trim();
@@ -886,7 +680,7 @@ function App() {
return;
}
- if (!currentProjectSettings.prdPath.toLowerCase().endsWith(".md")) {
+ if (!derivedState.currentProjectSettings.prdPath.toLowerCase().endsWith(".md")) {
setPrdGenerationError("Configure the PRD path as a Markdown file before generating.");
return;
}
@@ -897,34 +691,36 @@ function App() {
}
setPrdGenerationError("");
- setAgentStatus("generating_prd");
- appendTerminalOutput(
+ agentState.setStatus("generating_prd");
+ agentState.appendTerminalOutput(
stampLog(
"prd",
- `Generating a PRD draft with ${getModelLabel(selectedModel)} (${getReasoningLabel(selectedModel, selectedReasoning)} reasoning).`
+ `Generating a PRD draft with ${getModelLabel(projectState.selectedModel)} (${getReasoningLabel(projectState.selectedModel, projectState.selectedReasoning)} reasoning).`
)
);
try {
+ await waitForNextPaint();
+
const generatedPrd = await generatePrdDocument({
workspaceRoot: projectRootPath,
- outputPath: currentProjectSettings.prdPath,
- promptTemplate: currentProjectSettings.prdPrompt,
+ outputPath: derivedState.currentProjectSettings.prdPath,
+ promptTemplate: derivedState.currentProjectSettings.prdPrompt,
userPrompt: trimmedPrompt,
- provider: selectedModelProvider,
- model: selectedModel,
- reasoning: selectedReasoning,
- claudePath,
- codexPath
+ provider: derivedState.selectedModelProvider,
+ model: projectState.selectedModel,
+ reasoning: projectState.selectedReasoning,
+ claudePath: settingsState.claudePath,
+ codexPath: settingsState.codexPath
});
startTransition(() => {
- setPrdContent(generatedPrd.content, generatedPrd.sourcePath);
- setPrdPaneMode("preview");
+ projectState.setPrdContent(generatedPrd.content, generatedPrd.sourcePath);
+ projectState.setPrdPaneMode("preview");
});
setPrdGenerationPrompt("");
- setAgentStatus("idle");
- appendTerminalOutput(
+ agentState.setStatus("idle");
+ agentState.appendTerminalOutput(
stampLog(
"prd",
`PRD draft generated, saved to ${generatedPrd.fileName}, and loaded into the review pane.`
@@ -933,24 +729,18 @@ function App() {
} catch (error) {
const message = error instanceof Error ? error.message : "Unable to generate a PRD.";
setPrdGenerationError(message);
- setAgentStatus("error");
- appendTerminalOutput(stampLog("error", message));
+ agentState.setStatus("error");
+ agentState.appendTerminalOutput(stampLog("error", message));
}
}, [
- appendTerminalOutput,
- claudePath,
- codexPath,
- currentProjectSettings.prdPath,
- currentProjectSettings.prdPrompt,
+ agentState,
+ derivedState.currentProjectSettings,
+ derivedState.selectedModelProvider,
desktopRuntime,
prdGenerationPrompt,
projectRootPath,
- selectedModel,
- selectedModelProvider,
- selectedReasoning,
- setAgentStatus,
- setPrdContent,
- setPrdPaneMode
+ projectState,
+ settingsState
]);
const handleGenerateSpec = useCallback(async () => {
@@ -966,12 +756,12 @@ function App() {
return;
}
- if (!prdContent.trim()) {
+ if (!projectState.prdContent.trim()) {
setSpecGenerationError("Load or generate a PRD before drafting a specification.");
return;
}
- if (!currentProjectSettings.specPath.toLowerCase().endsWith(".md")) {
+ if (!derivedState.currentProjectSettings.specPath.toLowerCase().endsWith(".md")) {
setSpecGenerationError("Configure the spec path as a Markdown file before generating.");
return;
}
@@ -982,35 +772,37 @@ function App() {
}
setSpecGenerationError("");
- setAgentStatus("generating_spec");
- appendTerminalOutput(
+ agentState.setStatus("generating_spec");
+ agentState.appendTerminalOutput(
stampLog(
"spec",
- `Generating a technical specification with ${getModelLabel(selectedModel)} (${getReasoningLabel(selectedModel, selectedReasoning)} reasoning).`
+ `Generating a technical specification with ${getModelLabel(projectState.selectedModel)} (${getReasoningLabel(projectState.selectedModel, projectState.selectedReasoning)} reasoning).`
)
);
try {
+ await waitForNextPaint();
+
const generatedSpec = await generateSpecDocument({
workspaceRoot: projectRootPath,
- outputPath: currentProjectSettings.specPath,
- prdContent,
- promptTemplate: currentProjectSettings.specPrompt,
+ outputPath: derivedState.currentProjectSettings.specPath,
+ prdContent: projectState.prdContent,
+ promptTemplate: derivedState.currentProjectSettings.specPrompt,
userPrompt: trimmedPrompt,
- provider: selectedModelProvider,
- model: selectedModel,
- reasoning: selectedReasoning,
- claudePath,
- codexPath
+ provider: derivedState.selectedModelProvider,
+ model: projectState.selectedModel,
+ reasoning: projectState.selectedReasoning,
+ claudePath: settingsState.claudePath,
+ codexPath: settingsState.codexPath
});
startTransition(() => {
- setSpecContent(generatedSpec.content, generatedSpec.sourcePath);
- setSpecPaneMode("preview");
+ projectState.setSpecContent(generatedSpec.content, generatedSpec.sourcePath);
+ projectState.setSpecPaneMode("preview");
});
setSpecGenerationPrompt("");
- setAgentStatus("idle");
- appendTerminalOutput(
+ agentState.setStatus("idle");
+ agentState.appendTerminalOutput(
stampLog(
"spec",
`Specification draft generated, saved to ${generatedSpec.fileName}, and loaded into the review pane.`
@@ -1020,256 +812,437 @@ function App() {
const message =
error instanceof Error ? error.message : "Unable to generate a specification.";
setSpecGenerationError(message);
- setAgentStatus("error");
- appendTerminalOutput(stampLog("error", message));
+ agentState.setStatus("error");
+ agentState.appendTerminalOutput(stampLog("error", message));
}
}, [
- appendTerminalOutput,
- claudePath,
- codexPath,
- currentProjectSettings.specPath,
- currentProjectSettings.specPrompt,
+ agentState,
+ derivedState.currentProjectSettings,
+ derivedState.selectedModelProvider,
desktopRuntime,
- prdContent,
projectRootPath,
- selectedModel,
- selectedModelProvider,
- selectedReasoning,
- setAgentStatus,
- setSpecContent,
- setSpecPaneMode,
+ projectState,
+ settingsState,
specGenerationPrompt
]);
- const handleProjectModelChange = useCallback((model: typeof selectedModel) => {
- setSelectedModel(model);
- scheduleProjectSettingsSave(false);
- }, [scheduleProjectSettingsSave, setSelectedModel]);
-
- const handleProjectReasoningChange = useCallback((reasoning: typeof selectedReasoning) => {
- setReasoningProfile(reasoning);
- scheduleProjectSettingsSave(false);
- }, [scheduleProjectSettingsSave, setReasoningProfile]);
-
- const handlePrdPromptTemplateChange = useCallback((value: string) => {
- setPrdPromptTemplate(value);
- scheduleProjectSettingsSave(false);
- }, [scheduleProjectSettingsSave, setPrdPromptTemplate]);
-
- const handleSpecPromptTemplateChange = useCallback((value: string) => {
- setSpecPromptTemplate(value);
- scheduleProjectSettingsSave(false);
- }, [scheduleProjectSettingsSave, setSpecPromptTemplate]);
-
- const handleConfiguredPrdPathChange = useCallback((value: string) => {
- setConfiguredPrdPath(normalizeProjectRelativePath(value));
- scheduleProjectSettingsSave(true);
- }, [scheduleProjectSettingsSave, setConfiguredPrdPath]);
-
- const handleConfiguredSpecPathChange = useCallback((value: string) => {
- setConfiguredSpecPath(normalizeProjectRelativePath(value));
- scheduleProjectSettingsSave(true);
- }, [scheduleProjectSettingsSave, setConfiguredSpecPath]);
-
- const handleSupportingDocumentsChange = useCallback((value: string) => {
- setSupportingDocumentPaths(parseSupportingDocumentPaths(value));
- scheduleProjectSettingsSave(false);
- }, [scheduleProjectSettingsSave, setSupportingDocumentPaths]);
-
- const handleCommandSearchChange = useCallback(
- (event: ChangeEvent) => setCommandSearch(event.target.value),
- []
+ const uiHandlers = useAppUiHandlers({
+ agentState,
+ handleApproveExecutionGate,
+ handleEmergencyStop,
+ handleGeneratePrd,
+ handleGenerateSpec,
+ handleOpenImportFile,
+ handleStartBuild,
+ handleWorkspaceFileOpen,
+ prdGenerationError,
+ projectState,
+ refreshDiagnostics,
+ setCommandSearch,
+ setIsSearchOpen,
+ setPrdGenerationError,
+ setPrdGenerationPrompt,
+ setSpecGenerationError,
+ setSpecGenerationPrompt,
+ specGenerationError
+ });
+
+ const persistChatSession = useCallback(
+ async (payload: {
+ sessionId: string;
+ selectedModel: ChatSession["selectedModel"];
+ selectedReasoning: ChatSession["selectedReasoning"];
+ autonomyMode: ChatSession["autonomyMode"];
+ contextItems: ChatContextItem[];
+ }) => {
+ const nextSession = await saveChatSession(payload);
+ upsertSession(nextSession);
+ return nextSession;
+ },
+ [upsertSession]
);
- const closeWorkspaceSearch = useCallback(() => {
- setIsSearchOpen(false);
- setCommandSearch("");
- }, []);
+ const handleCreateChatSessionClick = useCallback(async () => {
+ try {
+ const nextSession = await createChatSession();
+ upsertSession(nextSession);
+ setActiveSessionId(nextSession.id);
+ } catch (error) {
+ setProjectErrorMessage(
+ error instanceof Error ? error.message : "Unable to create a new chat topic."
+ );
+ }
+ }, [setActiveSessionId, upsertSession]);
- const handleRefresh = useCallback(() => {
- void refreshDiagnostics();
- }, [refreshDiagnostics]);
+ const handleSelectChatSession = useCallback(
+ (sessionId: string) => {
+ setActiveSessionId(sessionId);
+ },
+ [setActiveSessionId]
+ );
- const handleOpenPrdImportClick = useCallback(() => {
- void handleOpenImportFile("prd");
- }, [handleOpenImportFile]);
+ const handleRenameChatSession = useCallback(
+ async (sessionId: string, title: string) => {
+ try {
+ await renameChatSession({ sessionId, title });
+ const nextSession = await loadChatSession(sessionId);
+ upsertSession(nextSession);
+ } catch (error) {
+ setProjectErrorMessage(
+ error instanceof Error ? error.message : "Unable to rename the selected chat topic."
+ );
+ }
+ },
+ [upsertSession]
+ );
- const handleOpenSpecImportClick = useCallback(() => {
- void handleOpenImportFile("spec");
- }, [handleOpenImportFile]);
+ const handleDeleteChatSession = useCallback(
+ async (sessionId: string) => {
+ const confirmed = window.confirm("Delete this topic and its saved context?");
- const handleStartBuildClick = useCallback(() => {
- void handleStartBuild();
- }, [handleStartBuild]);
+ if (!confirmed) {
+ return;
+ }
+
+ try {
+ const nextIndex = await deleteChatSession(sessionId);
+ deleteChatSessionState(sessionId, nextIndex.lastActiveSessionId);
+ setChatSessions(nextIndex.sessions);
+
+ if (nextIndex.lastActiveSessionId) {
+ setActiveSessionId(nextIndex.lastActiveSessionId);
+ }
+ } catch (error) {
+ setProjectErrorMessage(
+ error instanceof Error ? error.message : "Unable to delete the selected chat topic."
+ );
+ }
+ },
+ [deleteChatSessionState, setActiveSessionId, setChatSessions]
+ );
+
+ const handleChatDraftChange = useCallback(
+ (value: string) => {
+ if (!activeSessionId) {
+ return;
+ }
- const handleApproveExecutionGateClick = useCallback(() => {
- void handleApproveExecutionGate();
- }, [handleApproveExecutionGate]);
+ setChatDraft(activeSessionId, value);
+ },
+ [activeSessionId, setChatDraft]
+ );
- const handleEmergencyStopClick = useCallback(() => {
- void handleEmergencyStop();
- }, [handleEmergencyStop]);
+ const handleSendChatMessage = useCallback(async () => {
+ if (!activeChatSession || !activeChatDraft.trim() || !cavemanReady) {
+ return;
+ }
- const handleWorkspaceFileOpenClick = useCallback((path: string) => {
- void handleWorkspaceFileOpen(path);
- }, [handleWorkspaceFileOpen]);
+ try {
+ await sendChatMessage({
+ sessionId: activeChatSession.id,
+ message: activeChatDraft,
+ claudePath: settingsState.claudePath,
+ codexPath: settingsState.codexPath
+ });
+ setChatDraft(activeChatSession.id, "");
+ } catch (error) {
+ setProjectErrorMessage(
+ error instanceof Error ? error.message : "Unable to send the current chat message."
+ );
+ }
+ }, [activeChatDraft, activeChatSession, cavemanReady, setChatDraft, settingsState]);
- const handleGeneratePrdClick = useCallback(() => {
- void handleGeneratePrd();
- }, [handleGeneratePrd]);
+ const handleApproveChatSession = useCallback(async () => {
+ if (!activeChatSession) {
+ return;
+ }
- const handleGenerateSpecClick = useCallback(() => {
- void handleGenerateSpec();
- }, [handleGenerateSpec]);
+ try {
+ await approveChatSession(activeChatSession.id);
+ } catch (error) {
+ setProjectErrorMessage(
+ error instanceof Error ? error.message : "Unable to approve the active chat topic."
+ );
+ }
+ }, [activeChatSession]);
- const handleSaveConfigurationAndContinue = useCallback(() => {
- void saveCurrentProjectSettings({ reloadProject: true, navigateToReview: true });
- }, [saveCurrentProjectSettings]);
- useEffect(() => {
- if (typeof window === "undefined" || !window.matchMedia) {
+ const handleStopChatSession = useCallback(async () => {
+ if (!activeChatSession) {
return;
}
- const mediaQuery = window.matchMedia("(prefers-color-scheme: dark)");
- setSystemPrefersDark(mediaQuery.matches);
- const handleThemeChange = (event: MediaQueryListEvent) => setSystemPrefersDark(event.matches);
- mediaQuery.addEventListener("change", handleThemeChange);
- return () => mediaQuery.removeEventListener("change", handleThemeChange);
- }, []);
+ try {
+ await stopChatSession(activeChatSession.id);
+ } catch (error) {
+ setProjectErrorMessage(
+ error instanceof Error ? error.message : "Unable to stop the active chat topic."
+ );
+ }
+ }, [activeChatSession]);
+
+ const handleSaveChatSessionConfig = useCallback(
+ async (payload: {
+ sessionId: string;
+ selectedModel: ChatSession["selectedModel"];
+ selectedReasoning: ChatSession["selectedReasoning"];
+ autonomyMode: ChatSession["autonomyMode"];
+ contextItems: ChatContextItem[];
+ }) => {
+ setSessionConfig(payload);
+ setChatContextItems(payload.sessionId, payload.contextItems);
- useEffect(() => {
- document.documentElement.dataset.theme = resolvedTheme;
- document.documentElement.classList.toggle("dark", resolvedTheme === "dracula");
- }, [resolvedTheme]);
+ try {
+ await persistChatSession(payload);
+ } catch (error) {
+ setProjectErrorMessage(
+ error instanceof Error ? error.message : "Unable to save the current chat topic."
+ );
+ }
+ },
+ [persistChatSession, setChatContextItems, setSessionConfig]
+ );
- useEffect(() => {
- const handleKeyDown = (event: globalThis.KeyboardEvent) => {
- if (event.defaultPrevented || event.isComposing) {
+ const handleAttachChatFile = useCallback(
+ (path: string) => {
+ if (!activeChatSession) {
return;
}
- const isFindShortcut =
- (event.ctrlKey || event.metaKey) &&
- !event.altKey &&
- !event.shiftKey &&
- event.key.toLowerCase() === "f";
+ if (activeChatSession.contextItems.some((item) => item.path === path)) {
+ return;
+ }
- if (isFindShortcut) {
- if (!isReviewRoute) {
- return;
+ const nextContextItems = [
+ ...activeChatSession.contextItems,
+ {
+ id: `file-${Date.now().toString(36)}`,
+ kind: "file" as const,
+ label: path.split("/").pop() ?? path,
+ path,
+ isDefault: false
}
+ ];
+
+ void handleSaveChatSessionConfig({
+ sessionId: activeChatSession.id,
+ selectedModel: activeChatSession.selectedModel,
+ selectedReasoning: activeChatSession.selectedReasoning,
+ autonomyMode: activeChatSession.autonomyMode,
+ contextItems: nextContextItems
+ });
+ },
+ [activeChatSession, handleSaveChatSessionConfig]
+ );
- event.preventDefault();
- setIsSearchOpen((currentValue) => {
- if (currentValue) {
- setCommandSearch("");
- return false;
- }
-
- return true;
- });
+ const handleRemoveChatContextItem = useCallback(
+ (itemId: string) => {
+ if (!activeChatSession) {
return;
}
- if (event.key === "Escape" && isSearchOpen) {
- event.preventDefault();
- closeWorkspaceSearch();
- }
- };
-
- window.addEventListener("keydown", handleKeyDown);
- return () => window.removeEventListener("keydown", handleKeyDown);
- }, [closeWorkspaceSearch, isReviewRoute, isSearchOpen]);
+ const nextContextItems = activeChatSession.contextItems.filter(
+ (item) => item.id !== itemId
+ );
- useEffect(() => {
- if (!isReviewRoute && isSearchOpen) {
- closeWorkspaceSearch();
- }
- }, [closeWorkspaceSearch, isReviewRoute, isSearchOpen]);
+ void handleSaveChatSessionConfig({
+ sessionId: activeChatSession.id,
+ selectedModel: activeChatSession.selectedModel,
+ selectedReasoning: activeChatSession.selectedReasoning,
+ autonomyMode: activeChatSession.autonomyMode,
+ contextItems: nextContextItems
+ });
+ },
+ [activeChatSession, handleSaveChatSessionConfig]
+ );
+ const handleOpenChat = useCallback(() => {
+ navigate("/chat");
+ }, [navigate]);
+
+ const handleOpenReview = useCallback(() => {
+ navigate("/review");
+ }, [navigate]);
+
+ useSystemThemePreference(setSystemPrefersDark);
+ useDocumentTheme(derivedState.resolvedTheme);
+ useWorkspaceSearchShortcuts({
+ closeWorkspaceSearch: uiHandlers.closeWorkspaceSearch,
+ isReviewRoute,
+ isSearchOpen,
+ setCommandSearch,
+ setIsSearchOpen
+ });
+ useWorkspaceSearchRouteReset({
+ closeWorkspaceSearch: uiHandlers.closeWorkspaceSearch,
+ isReviewRoute,
+ isSearchOpen
+ });
+ useWorkspaceSearchFocus({
+ isReviewRoute,
+ isSearchOpen,
+ searchInputRef
+ });
+ useInitialDiagnostics({
+ environment: settingsState.environment,
+ hasScannedEnvironmentRef,
+ refreshDiagnostics
+ });
+ useProjectRestore({
+ applyProjectContext,
+ desktopRuntime,
+ hasAttemptedProjectRestore,
+ lastProjectPath: settingsState.lastProjectPath,
+ setHasAttemptedProjectRestore,
+ setIsProjectLoading,
+ setLastProjectPath: settingsState.setLastProjectPath
+ });
useEffect(() => {
- if (!isSearchOpen || !isReviewRoute) {
+ if (
+ !desktopRuntime ||
+ !activeSessionId ||
+ loadedSessions[activeSessionId]
+ ) {
return;
}
- const focusFrame = window.requestAnimationFrame(() => {
- searchInputRef.current?.focus();
- searchInputRef.current?.select();
- });
+ let isDisposed = false;
- return () => window.cancelAnimationFrame(focusFrame);
- }, [isReviewRoute, isSearchOpen]);
+ void loadChatSession(activeSessionId)
+ .then((session) => {
+ if (isDisposed) {
+ return;
+ }
- useEffect(() => {
- if (hasScannedEnvironmentRef.current) {
- return;
- }
+ upsertSession(session);
+ })
+ .catch((error) => {
+ if (isDisposed) {
+ return;
+ }
+
+ setProjectErrorMessage(
+ error instanceof Error ? error.message : "Unable to load the selected chat topic."
+ );
+ });
- hasScannedEnvironmentRef.current = true;
- void refreshDiagnostics(environment);
- }, [environment, refreshDiagnostics]);
+ return () => {
+ isDisposed = true;
+ };
+ }, [activeSessionId, desktopRuntime, loadedSessions, upsertSession]);
useEffect(() => {
- if (hasAttemptedProjectRestore || !desktopRuntime) {
- return;
- }
-
- if (!lastProjectPath.trim()) {
- setHasAttemptedProjectRestore(true);
+ if (
+ !desktopRuntime ||
+ !hasSavedProjectSettings ||
+ !isChatRoute ||
+ chatSessions.length > 0
+ ) {
return;
}
let isDisposed = false;
- setIsProjectLoading(true);
- void loadProjectContext(lastProjectPath)
- .then((context) => {
+ void createChatSession()
+ .then((session) => {
if (isDisposed) {
return;
}
- applyProjectContext(context, {
- navigateToReview: context.hasSavedSettings
- });
+ upsertSession(session);
+ setActiveSessionId(session.id);
})
- .catch(() => {
+ .catch((error) => {
+ if (isDisposed) {
+ return;
+ }
+
+ setProjectErrorMessage(
+ error instanceof Error ? error.message : "Unable to create the first chat topic."
+ );
+ });
+
+ return () => {
+ isDisposed = true;
+ };
+ }, [
+ chatSessions.length,
+ desktopRuntime,
+ hasSavedProjectSettings,
+ isChatRoute,
+ setActiveSessionId,
+ upsertSession
+ ]);
+
+ useEffect(() => {
+ if (
+ !desktopRuntime ||
+ !hasSavedProjectSettings ||
+ !isChatRoute ||
+ cavemanReady ||
+ cavemanChecking
+ ) {
+ return;
+ }
+
+ let isDisposed = false;
+ setCavemanStatus({
+ ready: false,
+ message: "Verifying Caveman skill...",
+ checking: true
+ });
+
+ void ensureCavemanSkill()
+ .then((status) => {
if (isDisposed) {
return;
}
- setLastProjectPath("");
+ setCavemanStatus({
+ ready: status.ready,
+ message: status.detail
+ });
})
- .finally(() => {
+ .catch((error) => {
if (isDisposed) {
return;
}
- setIsProjectLoading(false);
- setHasAttemptedProjectRestore(true);
+ setCavemanStatus({
+ ready: false,
+ message:
+ error instanceof Error
+ ? error.message
+ : "Unable to verify the Caveman skill for this workspace."
+ });
});
return () => {
isDisposed = true;
};
}, [
- applyProjectContext,
+ cavemanChecking,
+ cavemanReady,
desktopRuntime,
- hasAttemptedProjectRestore,
- lastProjectPath,
- setLastProjectPath
+ hasSavedProjectSettings,
+ isChatRoute,
+ setCavemanStatus
]);
useEffect(() => {
let unlisten: (() => void) | undefined;
let isDisposed = false;
- void subscribeToAgentEvents({
- onLine: appendTerminalOutput,
- onState: (payload) => {
- applyAgentEvent(payload);
- if (payload.pendingDiff) {
- setLatestDiff(payload.pendingDiff);
- }
+ void subscribeToChatSessionEvents((payload) => {
+ const nextSession = payload.session;
+
+ if (nextSession) {
+ upsertSession(nextSession);
+ } else if (payload.summary) {
+ const currentSessions = useChatStore.getState().sessions;
+ setChatSessions([
+ ...currentSessions.filter((entry) => entry.id !== payload.summary!.id),
+ payload.summary
+ ]);
}
}).then((dispose) => {
if (isDisposed) {
@@ -1283,143 +1256,112 @@ function App() {
return () => {
isDisposed = true;
unlisten?.();
- clearFallbackTimer(fallbackTimerRef);
-
- if (projectSaveTimerRef.current !== null) {
- window.clearTimeout(projectSaveTimerRef.current);
- projectSaveTimerRef.current = null;
- }
};
- }, [appendTerminalOutput, applyAgentEvent]);
+ }, [setChatSessions, upsertSession]);
+
+ useEffect(() => {
+ if (activeChatSession) {
+ agentState.syncFromChatRuntime(activeChatSession.runtime);
+ return;
+ }
+
+ agentState.resetRun();
+ }, [activeChatSession, agentState]);
+
+ useAgentEventSubscription({
+ appendTerminalOutput: agentState.appendTerminalOutput,
+ applyAgentEvent: agentState.applyEvent,
+ fallbackTimerRef,
+ projectSaveTimerRef,
+ setLatestDiff
+ });
+
+ const {
+ configurationScreenProps,
+ reviewScreenProps,
+ settingsScreenProps
+ } = useAppScreenProps({
+ agentState,
+ commandSearch,
+ derivedState,
+ desktopRuntime,
+ folderInputRef,
+ handleApproveSpec,
+ handleOpenChat,
+ handlePickProjectFolder,
+ hasSavedProjectSettings,
+ isImporting,
+ isProjectLoading,
+ isProjectSaving,
+ isSearchOpen,
+ reviewVisibleDiff,
+ prdGenerationError,
+ prdGenerationPrompt,
+ projectErrorMessage,
+ projectRootName,
+ projectRootPath,
+ projectSettingsHandlers,
+ projectState,
+ projectStatusMessage,
+ searchInputRef,
+ settingsState,
+ specGenerationError,
+ specGenerationPrompt,
+ uiHandlers,
+ workspaceNotice
+ });
+
+ const loadingState = (
+
+ Loading project configuration...
+
+ );
const reviewScreen = hasSavedProjectSettings ? (
- 0,
- onFileOpen: handleWorkspaceFileOpenClick,
- onFolderChange: handleWorkspaceFolderSelection,
- onOpenFolder: handlePickProjectFolder,
- workspaceEntries: filteredWorkspaceEntries,
- workspaceNotice,
- workspaceRootName: projectRootName
- }}
- isSearchOpen={isSearchOpen}
- isSpecApproved={isSpecApproved}
- mainWorkspaceProps={{
- activeTab,
- agentStatus,
- canGeneratePrd,
- canGenerateSpec,
- configPath: configPathDisplay,
- executionSummary,
- isGeneratingPrd,
- isGeneratingSpec,
- isSpecApproved,
- onActiveTabChange: setActiveTab,
- onApproveExecutionGate: handleApproveExecutionGateClick,
- onApproveSpec: handleApproveSpec,
- onEditorTabChange: updateEditorTabContent,
- onEditorTabClose: closeEditorTab,
- onEmergencyStop: handleEmergencyStopClick,
- onGeneratePrd: handleGeneratePrdClick,
- onGenerateSpec: handleGenerateSpecClick,
- onLoadPrd: handleOpenPrdImportClick,
- onLoadSpec: handleOpenSpecImportClick,
- onPrdContentChange: handlePrdContentChange,
- onPrdGenerationPromptChange: handlePrdGenerationPromptChange,
- onPrdPaneModeChange: setPrdPaneMode,
- onSpecContentChange: handleSpecContentChange,
- onSpecGenerationPromptChange: handleSpecGenerationPromptChange,
- onSpecPaneModeChange: setSpecPaneMode,
- onSpecSelect: handleSpecSelect,
- openEditorTabs,
- prdContent,
- prdGenerationError,
- prdGenerationHelperText,
- prdGenerationPrompt,
- prdPaneMode,
- prdPath,
- prdPromptTemplate,
- specContent,
- specGenerationError,
- specGenerationHelperText,
- specGenerationPrompt,
- specPaneMode,
- specPath,
- specPromptTemplate,
- terminalOutput,
- visibleDiff,
- workspaceRootName: projectRootName
- }}
- onCommandSearchChange={handleCommandSearchChange}
- onRefresh={handleRefresh}
- onStartBuild={handleStartBuildClick}
- searchInputRef={searchInputRef}
+
+ ) : hasAttemptedProjectRestore ? (
+
+ ) : (
+ loadingState
+ );
+
+ const chatScreen = hasSavedProjectSettings ? (
+
) : hasAttemptedProjectRestore ? (
) : (
-
- Loading project configuration...
-
+ loadingState
);
const settingsScreen = hasSavedProjectSettings ? (
-
+
) : hasAttemptedProjectRestore ? (
) : (
-
- Loading project configuration...
-
+ loadingState
);
return (
@@ -1437,46 +1379,16 @@ function App() {
- }
+ element={ }
path="/"
/>
+
- } path="*" />
+ }
+ path="*"
+ />
@@ -1484,16 +1396,3 @@ function App() {
}
export default App;
-
-function buildWorkspaceNotice(context: ProjectContext) {
- const loadedDocuments = [
- context.prdDocument?.fileName ? `PRD: ${context.prdDocument.fileName}` : null,
- context.specDocument?.fileName ? `SPEC: ${context.specDocument.fileName}` : null
- ].filter((value): value is string => value !== null);
-
- if (loadedDocuments.length === 0) {
- return `${context.rootName} is configured. No document exists yet at ${context.settings.prdPath} or ${context.settings.specPath}.`;
- }
-
- return `${context.rootName} is configured. Loaded ${loadedDocuments.join(" and ")} from the saved project paths.`;
-}
diff --git a/src/components/AppRail.tsx b/src/components/AppRail.tsx
index beb7d2d..7c8e8ec 100644
--- a/src/components/AppRail.tsx
+++ b/src/components/AppRail.tsx
@@ -1,4 +1,5 @@
import {
+ ChatBubble,
CodeBracketsSquare,
Flask,
Folder,
@@ -24,6 +25,20 @@ export function AppRail({ hasProjectConfigured }: AppRailProps) {
) : null}
+ {hasProjectConfigured ? (
+
+
+
+ ) : (
+
+
+
+ )}
+
{hasProjectConfigured ? (
diff --git a/src/components/DocumentPane.tsx b/src/components/DocumentPane.tsx
index 401b046..2fb6a22 100644
--- a/src/components/DocumentPane.tsx
+++ b/src/components/DocumentPane.tsx
@@ -1,4 +1,4 @@
-import { memo, type ChangeEvent, type ReactNode } from "react";
+import { memo, useEffect, useState, type ChangeEvent, type ReactNode } from "react";
import { MarkdownDocument } from "./MarkdownDocument";
import type { PaneMode } from "../types";
@@ -20,6 +20,21 @@ export const DocumentPane = memo(function DocumentPane({
onChange,
onSelect
}: DocumentPaneProps) {
+ const [renderedPreviewContent, setRenderedPreviewContent] = useState(() =>
+ mode === "preview" ? content : ""
+ );
+ const showPreviewPlaceholder = mode === "preview" && renderedPreviewContent !== content;
+
+ useEffect(() => {
+ if (mode !== "preview" || renderedPreviewContent === content) {
+ return;
+ }
+
+ return schedulePreviewRender(() => {
+ setRenderedPreviewContent(content);
+ });
+ }, [content, mode, renderedPreviewContent]);
+
return (
-
+ {showPreviewPlaceholder ? (
+
+ ) : (
+
+ )}
) : (
);
});
+
+function schedulePreviewRender(callback: () => void) {
+ let timeoutId: number | null = null;
+ const frameId = window.requestAnimationFrame(() => {
+ timeoutId = window.setTimeout(callback, 0);
+ });
+
+ return () => {
+ window.cancelAnimationFrame(frameId);
+
+ if (timeoutId !== null) {
+ window.clearTimeout(timeoutId);
+ }
+ };
+}
+
+function PreviewPlaceholder() {
+ return (
+
+ );
+}
diff --git a/src/components/ExecutionPanel.tsx b/src/components/ExecutionPanel.tsx
index ffad119..f8ff278 100644
--- a/src/components/ExecutionPanel.tsx
+++ b/src/components/ExecutionPanel.tsx
@@ -14,6 +14,7 @@ interface ExecutionPanelProps {
terminalOutput: string[];
executionSummary: string | null;
visibleDiff: string;
+ showControls?: boolean;
onApproveExecutionGate: () => void;
onEmergencyStop: () => void;
}
@@ -23,6 +24,7 @@ export const ExecutionPanel = memo(function ExecutionPanel({
terminalOutput,
executionSummary,
visibleDiff,
+ showControls = true,
onApproveExecutionGate,
onEmergencyStop
}: ExecutionPanelProps) {
@@ -39,7 +41,9 @@ export const ExecutionPanel = memo(function ExecutionPanel({
{terminalOutput.length === 0 ? (
- Approve the spec, then start a build to stream the agent loop here.
+ {showControls
+ ? "Approve the spec, then start a build to stream the agent loop here."
+ : "The active chat topic streams terminal output here. Review mode is read-only."}
) : (
terminalOutput.map((line, index) => (
@@ -50,23 +54,25 @@ export const ExecutionPanel = memo(function ExecutionPanel({
)}
-
- {agentStatus === "awaiting_approval" ? (
-
-
- Approve Gate
-
- ) : null}
+ {showControls ? (
+
+ {agentStatus === "awaiting_approval" ? (
+
+
+ Approve Gate
+
+ ) : null}
-
-
- Emergency Stop
-
-
+
+
+ Emergency Stop
+
+
+ ) : null}
@@ -81,7 +87,9 @@ export const ExecutionPanel = memo(function ExecutionPanel({
{executionSummary ||
- "Diff output stays visible across approval gates so the next mutation can be reviewed in context."}
+ (showControls
+ ? "Diff output stays visible across approval gates so the next mutation can be reviewed in context."
+ : "This panel mirrors the active chat topic diff so review stays aligned with the current session state.")}
diff --git a/src/components/MainWorkspace.tsx b/src/components/MainWorkspace.tsx
index b4933b8..678b471 100644
--- a/src/components/MainWorkspace.tsx
+++ b/src/components/MainWorkspace.tsx
@@ -47,6 +47,7 @@ interface MainWorkspaceProps {
executionSummary: string | null;
visibleDiff: string;
agentStatus: AgentStatus;
+ executionControlsEnabled?: boolean;
onActiveTabChange: (tab: WorkspaceTab) => void;
onPrdPaneModeChange: (mode: PaneMode) => void;
onSpecPaneModeChange: (mode: PaneMode) => void;
@@ -94,6 +95,7 @@ export const MainWorkspace = memo(function MainWorkspace({
executionSummary,
visibleDiff,
agentStatus,
+ executionControlsEnabled = true,
onActiveTabChange,
onPrdPaneModeChange,
onSpecPaneModeChange,
@@ -271,6 +273,7 @@ export const MainWorkspace = memo(function MainWorkspace({
executionSummary={executionSummary}
onApproveExecutionGate={onApproveExecutionGate}
onEmergencyStop={onEmergencyStop}
+ showControls={executionControlsEnabled}
terminalOutput={terminalOutput}
visibleDiff={visibleDiff}
/>
diff --git a/src/hooks/useAppLifecycle.ts b/src/hooks/useAppLifecycle.ts
new file mode 100644
index 0000000..9dd5044
--- /dev/null
+++ b/src/hooks/useAppLifecycle.ts
@@ -0,0 +1,299 @@
+import {
+ useEffect,
+ type Dispatch,
+ type MutableRefObject,
+ type RefObject,
+ type SetStateAction
+} from "react";
+
+import { clearFallbackTimer } from "../lib/appShell";
+import {
+ loadProjectContext,
+ subscribeToAgentEvents
+} from "../lib/runtime";
+import type {
+ AgentEventPayload,
+ EnvironmentStatus,
+ ProjectContext
+} from "../types";
+
+export function useSystemThemePreference(
+ setSystemPrefersDark: (nextValue: boolean) => void
+) {
+ useEffect(() => {
+ if (typeof window === "undefined" || !window.matchMedia) {
+ return;
+ }
+
+ const mediaQuery = window.matchMedia("(prefers-color-scheme: dark)");
+ setSystemPrefersDark(mediaQuery.matches);
+ const handleThemeChange = (event: MediaQueryListEvent) => {
+ setSystemPrefersDark(event.matches);
+ };
+
+ mediaQuery.addEventListener("change", handleThemeChange);
+ return () => mediaQuery.removeEventListener("change", handleThemeChange);
+ }, [setSystemPrefersDark]);
+}
+
+export function useDocumentTheme(resolvedTheme: string) {
+ useEffect(() => {
+ document.documentElement.dataset.theme = resolvedTheme;
+ document.documentElement.classList.toggle("dark", resolvedTheme === "dracula");
+ }, [resolvedTheme]);
+}
+
+interface WorkspaceSearchShortcutsOptions {
+ closeWorkspaceSearch: () => void;
+ isReviewRoute: boolean;
+ isSearchOpen: boolean;
+ setCommandSearch: Dispatch>;
+ setIsSearchOpen: Dispatch>;
+}
+
+export function useWorkspaceSearchShortcuts({
+ closeWorkspaceSearch,
+ isReviewRoute,
+ isSearchOpen,
+ setCommandSearch,
+ setIsSearchOpen
+}: WorkspaceSearchShortcutsOptions) {
+ useEffect(() => {
+ const handleKeyDown = (event: globalThis.KeyboardEvent) => {
+ if (event.defaultPrevented || event.isComposing) {
+ return;
+ }
+
+ const isFindShortcut =
+ (event.ctrlKey || event.metaKey) &&
+ !event.altKey &&
+ !event.shiftKey &&
+ event.key.toLowerCase() === "f";
+
+ if (isFindShortcut) {
+ if (!isReviewRoute) {
+ return;
+ }
+
+ event.preventDefault();
+ setIsSearchOpen((currentValue) => {
+ if (currentValue) {
+ setCommandSearch("");
+ return false;
+ }
+
+ return true;
+ });
+ return;
+ }
+
+ if (event.key === "Escape" && isSearchOpen) {
+ event.preventDefault();
+ closeWorkspaceSearch();
+ }
+ };
+
+ window.addEventListener("keydown", handleKeyDown);
+ return () => window.removeEventListener("keydown", handleKeyDown);
+ }, [
+ closeWorkspaceSearch,
+ isReviewRoute,
+ isSearchOpen,
+ setCommandSearch,
+ setIsSearchOpen
+ ]);
+}
+
+interface WorkspaceSearchRouteResetOptions {
+ closeWorkspaceSearch: () => void;
+ isReviewRoute: boolean;
+ isSearchOpen: boolean;
+}
+
+export function useWorkspaceSearchRouteReset({
+ closeWorkspaceSearch,
+ isReviewRoute,
+ isSearchOpen
+}: WorkspaceSearchRouteResetOptions) {
+ useEffect(() => {
+ if (!isReviewRoute && isSearchOpen) {
+ closeWorkspaceSearch();
+ }
+ }, [closeWorkspaceSearch, isReviewRoute, isSearchOpen]);
+}
+
+interface WorkspaceSearchFocusOptions {
+ isReviewRoute: boolean;
+ isSearchOpen: boolean;
+ searchInputRef: RefObject;
+}
+
+export function useWorkspaceSearchFocus({
+ isReviewRoute,
+ isSearchOpen,
+ searchInputRef
+}: WorkspaceSearchFocusOptions) {
+ useEffect(() => {
+ if (!isSearchOpen || !isReviewRoute) {
+ return;
+ }
+
+ const focusFrame = window.requestAnimationFrame(() => {
+ searchInputRef.current?.focus();
+ searchInputRef.current?.select();
+ });
+
+ return () => window.cancelAnimationFrame(focusFrame);
+ }, [isReviewRoute, isSearchOpen, searchInputRef]);
+}
+
+interface InitialDiagnosticsOptions {
+ environment: EnvironmentStatus;
+ hasScannedEnvironmentRef: MutableRefObject;
+ refreshDiagnostics: (previousEnvironment?: EnvironmentStatus) => Promise;
+}
+
+export function useInitialDiagnostics({
+ environment,
+ hasScannedEnvironmentRef,
+ refreshDiagnostics
+}: InitialDiagnosticsOptions) {
+ useEffect(() => {
+ if (hasScannedEnvironmentRef.current) {
+ return;
+ }
+
+ hasScannedEnvironmentRef.current = true;
+ void refreshDiagnostics(environment);
+ }, [environment, hasScannedEnvironmentRef, refreshDiagnostics]);
+}
+
+interface ProjectRestoreOptions {
+ applyProjectContext: (
+ context: ProjectContext,
+ options?: { navigateToChat?: boolean }
+ ) => void;
+ desktopRuntime: boolean;
+ hasAttemptedProjectRestore: boolean;
+ lastProjectPath: string;
+ setHasAttemptedProjectRestore: (nextValue: boolean) => void;
+ setIsProjectLoading: (nextValue: boolean) => void;
+ setLastProjectPath: (path: string) => void;
+}
+
+export function useProjectRestore({
+ applyProjectContext,
+ desktopRuntime,
+ hasAttemptedProjectRestore,
+ lastProjectPath,
+ setHasAttemptedProjectRestore,
+ setIsProjectLoading,
+ setLastProjectPath
+}: ProjectRestoreOptions) {
+ useEffect(() => {
+ if (hasAttemptedProjectRestore || !desktopRuntime) {
+ return;
+ }
+
+ if (!lastProjectPath.trim()) {
+ setHasAttemptedProjectRestore(true);
+ return;
+ }
+
+ let isDisposed = false;
+ setIsProjectLoading(true);
+
+ void loadProjectContext(lastProjectPath)
+ .then((context) => {
+ if (isDisposed) {
+ return;
+ }
+
+ applyProjectContext(context, {
+ navigateToChat: context.hasSavedSettings
+ });
+ })
+ .catch(() => {
+ if (isDisposed) {
+ return;
+ }
+
+ setLastProjectPath("");
+ })
+ .finally(() => {
+ if (isDisposed) {
+ return;
+ }
+
+ setIsProjectLoading(false);
+ setHasAttemptedProjectRestore(true);
+ });
+
+ return () => {
+ isDisposed = true;
+ };
+ }, [
+ applyProjectContext,
+ desktopRuntime,
+ hasAttemptedProjectRestore,
+ lastProjectPath,
+ setHasAttemptedProjectRestore,
+ setIsProjectLoading,
+ setLastProjectPath
+ ]);
+}
+
+interface AgentEventSubscriptionOptions {
+ appendTerminalOutput: (line: string) => void;
+ applyAgentEvent: (payload: AgentEventPayload) => void;
+ fallbackTimerRef: MutableRefObject;
+ projectSaveTimerRef: MutableRefObject;
+ setLatestDiff: Dispatch>;
+}
+
+export function useAgentEventSubscription({
+ appendTerminalOutput,
+ applyAgentEvent,
+ fallbackTimerRef,
+ projectSaveTimerRef,
+ setLatestDiff
+}: AgentEventSubscriptionOptions) {
+ useEffect(() => {
+ let unlisten: (() => void) | undefined;
+ let isDisposed = false;
+
+ void subscribeToAgentEvents({
+ onLine: appendTerminalOutput,
+ onState: (payload) => {
+ applyAgentEvent(payload);
+ if (payload.pendingDiff) {
+ setLatestDiff(payload.pendingDiff);
+ }
+ }
+ }).then((dispose) => {
+ if (isDisposed) {
+ dispose();
+ return;
+ }
+
+ unlisten = dispose;
+ });
+
+ return () => {
+ isDisposed = true;
+ unlisten?.();
+ clearFallbackTimer(fallbackTimerRef);
+
+ if (projectSaveTimerRef.current !== null) {
+ window.clearTimeout(projectSaveTimerRef.current);
+ projectSaveTimerRef.current = null;
+ }
+ };
+ }, [
+ appendTerminalOutput,
+ applyAgentEvent,
+ fallbackTimerRef,
+ projectSaveTimerRef,
+ setLatestDiff
+ ]);
+}
diff --git a/src/hooks/useAppStoreSlices.ts b/src/hooks/useAppStoreSlices.ts
new file mode 100644
index 0000000..d67145d
--- /dev/null
+++ b/src/hooks/useAppStoreSlices.ts
@@ -0,0 +1,119 @@
+import { useShallow } from "zustand/react/shallow";
+
+import { useAgentStore } from "../store/useAgentStore";
+import { useChatStore } from "../store/useChatStore";
+import { useProjectStore } from "../store/useProjectStore";
+import { useSettingsStore } from "../store/useSettingsStore";
+
+export function useAgentStoreSlice() {
+ return useAgentStore(
+ useShallow((state) => ({
+ status: state.status,
+ terminalOutput: state.terminalOutput,
+ pendingDiff: state.pendingDiff,
+ executionSummary: state.executionSummary,
+ resetRun: state.resetRun,
+ appendTerminalOutput: state.appendTerminalOutput,
+ setStatus: state.setStatus,
+ setCurrentMilestone: state.setCurrentMilestone,
+ setPendingDiff: state.setPendingDiff,
+ setExecutionSummary: state.setExecutionSummary,
+ syncFromChatRuntime: state.syncFromChatRuntime,
+ applyEvent: state.applyEvent
+ }))
+ );
+}
+
+export type AgentStoreSlice = ReturnType;
+
+export function useProjectStoreSlice() {
+ return useProjectStore(
+ useShallow((state) => ({
+ annotations: state.annotations,
+ activeTab: state.activeTab,
+ autonomyMode: state.autonomyMode,
+ configuredPrdPath: state.configuredPrdPath,
+ configuredSpecPath: state.configuredSpecPath,
+ isSpecApproved: state.isSpecApproved,
+ openEditorTabs: state.openEditorTabs,
+ prdContent: state.prdContent,
+ prdPaneMode: state.prdPaneMode,
+ prdPath: state.prdPath,
+ prdPromptTemplate: state.prdPromptTemplate,
+ selectedModel: state.selectedModel,
+ selectedReasoning: state.selectedReasoning,
+ specContent: state.specContent,
+ specPaneMode: state.specPaneMode,
+ specPath: state.specPath,
+ specPromptTemplate: state.specPromptTemplate,
+ supportingDocumentPaths: state.supportingDocumentPaths,
+ approveSpec: state.approveSpec,
+ closeEditorTab: state.closeEditorTab,
+ openEditorTab: state.openEditorTab,
+ resetWorkspaceContext: state.resetWorkspaceContext,
+ setActiveTab: state.setActiveTab,
+ setAutonomyMode: state.setAutonomyMode,
+ setConfiguredPrdPath: state.setConfiguredPrdPath,
+ setConfiguredSpecPath: state.setConfiguredSpecPath,
+ setPrdContent: state.setPrdContent,
+ setPrdPaneMode: state.setPrdPaneMode,
+ setPrdPromptTemplate: state.setPrdPromptTemplate,
+ setProjectSettings: state.setProjectSettings,
+ setReasoningProfile: state.setReasoningProfile,
+ setSelectedModel: state.setSelectedModel,
+ setSelectedSpecRange: state.setSelectedSpecRange,
+ setSpecContent: state.setSpecContent,
+ setSpecPaneMode: state.setSpecPaneMode,
+ setSpecPromptTemplate: state.setSpecPromptTemplate,
+ setSupportingDocumentPaths: state.setSupportingDocumentPaths,
+ updateEditorTabContent: state.updateEditorTabContent
+ }))
+ );
+}
+
+export type ProjectStoreSlice = ReturnType;
+
+export function useChatStoreSlice() {
+ return useChatStore(
+ useShallow((state) => ({
+ sessions: state.sessions,
+ activeSessionId: state.activeSessionId,
+ loadedSessions: state.loadedSessions,
+ drafts: state.drafts,
+ cavemanReady: state.cavemanReady,
+ cavemanMessage: state.cavemanMessage,
+ cavemanChecking: state.cavemanChecking,
+ setSessions: state.setSessions,
+ setActiveSessionId: state.setActiveSessionId,
+ upsertSession: state.upsertSession,
+ setDraft: state.setDraft,
+ setContextItems: state.setContextItems,
+ setSessionConfig: state.setSessionConfig,
+ deleteSession: state.deleteSession,
+ setCavemanStatus: state.setCavemanStatus
+ }))
+ );
+}
+
+export type ChatStoreSlice = ReturnType;
+
+export function useSettingsStoreSlice() {
+ return useSettingsStore(
+ useShallow((state) => ({
+ claudePath: state.claudePath,
+ codexPath: state.codexPath,
+ environment: state.environment,
+ lastProjectPath: state.lastProjectPath,
+ theme: state.theme,
+ workspaceEntries: state.workspaceEntries,
+ setClaudePath: state.setClaudePath,
+ setCodexPath: state.setCodexPath,
+ setEnvironment: state.setEnvironment,
+ setLastProjectPath: state.setLastProjectPath,
+ setTheme: state.setTheme,
+ setWorkspaceEntries: state.setWorkspaceEntries
+ }))
+ );
+}
+
+export type SettingsStoreSlice = ReturnType;
diff --git a/src/hooks/useAppView.ts b/src/hooks/useAppView.ts
new file mode 100644
index 0000000..2881d1e
--- /dev/null
+++ b/src/hooks/useAppView.ts
@@ -0,0 +1,826 @@
+import {
+ useCallback,
+ useDeferredValue,
+ useMemo,
+ type ChangeEvent,
+ type ComponentProps,
+ type Dispatch,
+ type RefObject,
+ type SetStateAction
+} from "react";
+
+import {
+ filterWorkspaceEntries,
+ resolveTheme,
+ type DocumentTarget
+} from "../lib/appShell";
+import { getModelProvider } from "../lib/agentConfig";
+import {
+ formatSupportingDocumentPaths,
+ normalizeProjectRelativePath,
+ parseSupportingDocumentPaths
+} from "../lib/projectConfig";
+import {
+ buildConfigPathDisplay,
+ buildConfiguredModelProviders,
+ buildCurrentProjectSettings,
+ buildMcpItems,
+ getPrdGenerationHelperText,
+ getSpecGenerationHelperText
+} from "../lib/appState";
+import { ConfigurationScreen } from "../screens/ConfigurationScreen";
+import { PrdScreen } from "../screens/PrdScreen";
+import { SettingsScreen } from "../screens/SettingsScreen";
+import type { EnvironmentStatus } from "../types";
+import type {
+ AgentStoreSlice,
+ ProjectStoreSlice,
+ SettingsStoreSlice
+} from "./useAppStoreSlices";
+
+interface UseAppDerivedStateOptions {
+ agentState: AgentStoreSlice;
+ commandSearch: string;
+ desktopRuntime: boolean;
+ latestDiff: string;
+ prdGenerationPrompt: string;
+ projectConfigPath: string;
+ projectRootName: string;
+ projectRootPath: string;
+ projectState: ProjectStoreSlice;
+ settingsState: SettingsStoreSlice;
+ specGenerationPrompt: string;
+ systemPrefersDark: boolean;
+}
+
+export function useAppDerivedState({
+ agentState,
+ commandSearch,
+ desktopRuntime,
+ latestDiff,
+ prdGenerationPrompt,
+ projectConfigPath,
+ projectRootName,
+ projectRootPath,
+ projectState,
+ settingsState,
+ specGenerationPrompt,
+ systemPrefersDark
+}: UseAppDerivedStateOptions) {
+ const deferredSearch = useDeferredValue(commandSearch);
+
+ const filteredWorkspaceEntries = useMemo(
+ () => filterWorkspaceEntries(settingsState.workspaceEntries, deferredSearch),
+ [deferredSearch, settingsState.workspaceEntries]
+ );
+ const selectedModelProvider = useMemo(
+ () => getModelProvider(projectState.selectedModel),
+ [projectState.selectedModel]
+ );
+ const isGeneratingPrd = agentState.status === "generating_prd";
+ const isGeneratingSpec = agentState.status === "generating_spec";
+ const visibleDiff = agentState.pendingDiff ?? latestDiff;
+ const resolvedTheme = useMemo(
+ () => resolveTheme(settingsState.theme, systemPrefersDark),
+ [settingsState.theme, systemPrefersDark]
+ );
+ const configuredModelProviders = useMemo(
+ () => buildConfiguredModelProviders(settingsState.environment),
+ [settingsState.environment]
+ );
+ const mcpItems = useMemo(
+ () => buildMcpItems(settingsState.environment),
+ [settingsState.environment]
+ );
+ const selectedProviderStatus =
+ selectedModelProvider === "claude"
+ ? settingsState.environment.claude
+ : settingsState.environment.codex;
+ const currentProjectSettings = useMemo(
+ () =>
+ buildCurrentProjectSettings({
+ configuredPrdPath: projectState.configuredPrdPath,
+ configuredSpecPath: projectState.configuredSpecPath,
+ prdPromptTemplate: projectState.prdPromptTemplate,
+ selectedModel: projectState.selectedModel,
+ selectedReasoning: projectState.selectedReasoning,
+ specPromptTemplate: projectState.specPromptTemplate,
+ supportingDocumentPaths: projectState.supportingDocumentPaths
+ }),
+ [
+ projectState.configuredPrdPath,
+ projectState.configuredSpecPath,
+ projectState.prdPromptTemplate,
+ projectState.selectedModel,
+ projectState.selectedReasoning,
+ projectState.specPromptTemplate,
+ projectState.supportingDocumentPaths
+ ]
+ );
+ const configPathDisplay = useMemo(
+ () => buildConfigPathDisplay(projectConfigPath, projectRootName),
+ [projectConfigPath, projectRootName]
+ );
+ const supportingDocumentsValue = useMemo(
+ () => formatSupportingDocumentPaths(projectState.supportingDocumentPaths),
+ [projectState.supportingDocumentPaths]
+ );
+ const canGeneratePrd = useMemo(
+ () =>
+ desktopRuntime &&
+ !isGeneratingPrd &&
+ projectRootPath.trim().length > 0 &&
+ projectState.configuredPrdPath.trim().length > 0 &&
+ prdGenerationPrompt.trim().length > 0,
+ [
+ desktopRuntime,
+ isGeneratingPrd,
+ prdGenerationPrompt,
+ projectRootPath,
+ projectState.configuredPrdPath
+ ]
+ );
+ const canGenerateSpec = useMemo(
+ () =>
+ desktopRuntime &&
+ !isGeneratingSpec &&
+ projectRootPath.trim().length > 0 &&
+ projectState.prdContent.trim().length > 0 &&
+ projectState.configuredSpecPath.trim().length > 0 &&
+ specGenerationPrompt.trim().length > 0,
+ [
+ desktopRuntime,
+ isGeneratingSpec,
+ projectRootPath,
+ projectState.configuredSpecPath,
+ projectState.prdContent,
+ specGenerationPrompt
+ ]
+ );
+ const prdGenerationHelperText = useMemo(
+ () =>
+ getPrdGenerationHelperText({
+ configPathDisplay,
+ configuredDocumentPath: projectState.configuredPrdPath,
+ desktopRuntime,
+ generationPrompt: prdGenerationPrompt,
+ projectRootPath,
+ selectedModel: projectState.selectedModel,
+ selectedProviderStatus
+ }),
+ [
+ configPathDisplay,
+ desktopRuntime,
+ prdGenerationPrompt,
+ projectRootPath,
+ projectState.configuredPrdPath,
+ projectState.selectedModel,
+ selectedProviderStatus
+ ]
+ );
+ const specGenerationHelperText = useMemo(
+ () =>
+ getSpecGenerationHelperText({
+ configPathDisplay,
+ configuredDocumentPath: projectState.configuredSpecPath,
+ desktopRuntime,
+ generationPrompt: specGenerationPrompt,
+ prdContent: projectState.prdContent,
+ projectRootPath,
+ selectedModel: projectState.selectedModel,
+ selectedProviderStatus
+ }),
+ [
+ configPathDisplay,
+ desktopRuntime,
+ projectRootPath,
+ projectState.configuredSpecPath,
+ projectState.prdContent,
+ projectState.selectedModel,
+ selectedProviderStatus,
+ specGenerationPrompt
+ ]
+ );
+
+ return {
+ deferredSearch,
+ filteredWorkspaceEntries,
+ selectedModelProvider,
+ isGeneratingPrd,
+ isGeneratingSpec,
+ visibleDiff,
+ resolvedTheme,
+ configuredModelProviders,
+ mcpItems,
+ selectedProviderStatus,
+ currentProjectSettings,
+ configPathDisplay,
+ supportingDocumentsValue,
+ canGeneratePrd,
+ canGenerateSpec,
+ prdGenerationHelperText,
+ specGenerationHelperText
+ };
+}
+
+export type AppDerivedState = ReturnType;
+
+interface SaveCurrentProjectSettingsOptions {
+ reloadProject?: boolean;
+ navigateToChat?: boolean;
+}
+
+type SaveCurrentProjectSettings = (
+ options?: SaveCurrentProjectSettingsOptions
+) => Promise;
+
+interface UseProjectSettingsHandlersOptions {
+ saveCurrentProjectSettings: SaveCurrentProjectSettings;
+ scheduleProjectSettingsSave: (reloadProject?: boolean) => void;
+ setConfiguredPrdPath: ProjectStoreSlice["setConfiguredPrdPath"];
+ setConfiguredSpecPath: ProjectStoreSlice["setConfiguredSpecPath"];
+ setPrdPromptTemplate: ProjectStoreSlice["setPrdPromptTemplate"];
+ setReasoningProfile: ProjectStoreSlice["setReasoningProfile"];
+ setSelectedModel: ProjectStoreSlice["setSelectedModel"];
+ setSpecPromptTemplate: ProjectStoreSlice["setSpecPromptTemplate"];
+ setSupportingDocumentPaths: ProjectStoreSlice["setSupportingDocumentPaths"];
+}
+
+export function useProjectSettingsHandlers({
+ saveCurrentProjectSettings,
+ scheduleProjectSettingsSave,
+ setConfiguredPrdPath,
+ setConfiguredSpecPath,
+ setPrdPromptTemplate,
+ setReasoningProfile,
+ setSelectedModel,
+ setSpecPromptTemplate,
+ setSupportingDocumentPaths
+}: UseProjectSettingsHandlersOptions) {
+ const handleProjectModelChange = useCallback(
+ (model: Parameters[0]) => {
+ setSelectedModel(model);
+ scheduleProjectSettingsSave(false);
+ },
+ [scheduleProjectSettingsSave, setSelectedModel]
+ );
+
+ const handleProjectReasoningChange = useCallback(
+ (reasoning: Parameters[0]) => {
+ setReasoningProfile(reasoning);
+ scheduleProjectSettingsSave(false);
+ },
+ [scheduleProjectSettingsSave, setReasoningProfile]
+ );
+
+ const handlePrdPromptTemplateChange = useCallback(
+ (value: string) => {
+ setPrdPromptTemplate(value);
+ scheduleProjectSettingsSave(false);
+ },
+ [scheduleProjectSettingsSave, setPrdPromptTemplate]
+ );
+
+ const handleSpecPromptTemplateChange = useCallback(
+ (value: string) => {
+ setSpecPromptTemplate(value);
+ scheduleProjectSettingsSave(false);
+ },
+ [scheduleProjectSettingsSave, setSpecPromptTemplate]
+ );
+
+ const handleConfiguredPrdPathChange = useCallback(
+ (value: string) => {
+ setConfiguredPrdPath(normalizeProjectRelativePath(value));
+ scheduleProjectSettingsSave(true);
+ },
+ [scheduleProjectSettingsSave, setConfiguredPrdPath]
+ );
+
+ const handleConfiguredSpecPathChange = useCallback(
+ (value: string) => {
+ setConfiguredSpecPath(normalizeProjectRelativePath(value));
+ scheduleProjectSettingsSave(true);
+ },
+ [scheduleProjectSettingsSave, setConfiguredSpecPath]
+ );
+
+ const handleSupportingDocumentsChange = useCallback(
+ (value: string) => {
+ setSupportingDocumentPaths(parseSupportingDocumentPaths(value));
+ scheduleProjectSettingsSave(false);
+ },
+ [scheduleProjectSettingsSave, setSupportingDocumentPaths]
+ );
+
+ const handleSaveConfigurationAndContinue = useCallback(() => {
+ void saveCurrentProjectSettings({ reloadProject: true, navigateToChat: true });
+ }, [saveCurrentProjectSettings]);
+
+ return {
+ handleProjectModelChange,
+ handleProjectReasoningChange,
+ handlePrdPromptTemplateChange,
+ handleSpecPromptTemplateChange,
+ handleConfiguredPrdPathChange,
+ handleConfiguredSpecPathChange,
+ handleSupportingDocumentsChange,
+ handleSaveConfigurationAndContinue
+ };
+}
+
+export type ProjectSettingsHandlers = ReturnType;
+
+interface UseAppUiHandlersOptions {
+ agentState: AgentStoreSlice;
+ handleApproveExecutionGate: () => Promise;
+ handleEmergencyStop: () => Promise;
+ handleGeneratePrd: () => Promise;
+ handleGenerateSpec: () => Promise;
+ handleOpenImportFile: (target: DocumentTarget) => Promise;
+ handleStartBuild: () => Promise;
+ handleWorkspaceFileOpen: (path: string) => Promise;
+ prdGenerationError: string;
+ projectState: ProjectStoreSlice;
+ refreshDiagnostics: (previousEnvironment?: EnvironmentStatus) => Promise;
+ setCommandSearch: Dispatch>;
+ setIsSearchOpen: Dispatch>;
+ setPrdGenerationError: Dispatch>;
+ setPrdGenerationPrompt: Dispatch>;
+ setSpecGenerationError: Dispatch>;
+ setSpecGenerationPrompt: Dispatch>;
+ specGenerationError: string;
+}
+
+export function useAppUiHandlers({
+ agentState,
+ handleApproveExecutionGate,
+ handleEmergencyStop,
+ handleGeneratePrd,
+ handleGenerateSpec,
+ handleOpenImportFile,
+ handleStartBuild,
+ handleWorkspaceFileOpen,
+ prdGenerationError,
+ projectState,
+ refreshDiagnostics,
+ setCommandSearch,
+ setIsSearchOpen,
+ setPrdGenerationError,
+ setPrdGenerationPrompt,
+ setSpecGenerationError,
+ setSpecGenerationPrompt,
+ specGenerationError
+}: UseAppUiHandlersOptions) {
+ const handlePrdContentChange = useCallback(
+ (value: string) => {
+ projectState.setPrdContent(value, projectState.prdPath);
+ },
+ [projectState]
+ );
+
+ const handleSpecContentChange = useCallback(
+ (value: string) => {
+ if (value.trim()) {
+ setSpecGenerationError("");
+ }
+
+ projectState.setSpecContent(value, projectState.specPath);
+ },
+ [projectState, setSpecGenerationError]
+ );
+
+ const handleSpecSelect = useCallback(
+ (event: ChangeEvent) => {
+ const { selectionStart, selectionEnd, value } = event.target;
+
+ projectState.setSelectedSpecRange(
+ selectionStart === selectionEnd
+ ? null
+ : {
+ start: selectionStart,
+ end: selectionEnd,
+ text: value.slice(selectionStart, selectionEnd)
+ }
+ );
+ },
+ [projectState]
+ );
+
+ const handlePrdGenerationPromptChange = useCallback(
+ (value: string) => {
+ setPrdGenerationPrompt(value);
+
+ if (prdGenerationError) {
+ setPrdGenerationError("");
+ }
+
+ if (agentState.status === "error") {
+ agentState.setStatus("idle");
+ }
+ },
+ [
+ agentState,
+ prdGenerationError,
+ setPrdGenerationError,
+ setPrdGenerationPrompt
+ ]
+ );
+
+ const handleSpecGenerationPromptChange = useCallback(
+ (value: string) => {
+ setSpecGenerationPrompt(value);
+
+ if (specGenerationError) {
+ setSpecGenerationError("");
+ }
+
+ if (agentState.status === "error") {
+ agentState.setStatus("idle");
+ }
+ },
+ [
+ agentState,
+ setSpecGenerationError,
+ setSpecGenerationPrompt,
+ specGenerationError
+ ]
+ );
+
+ const handleCommandSearchChange = useCallback(
+ (event: ChangeEvent) => {
+ setCommandSearch(event.target.value);
+ },
+ [setCommandSearch]
+ );
+
+ const closeWorkspaceSearch = useCallback(() => {
+ setIsSearchOpen(false);
+ setCommandSearch("");
+ }, [setCommandSearch, setIsSearchOpen]);
+
+ const handleRefresh = useCallback(() => {
+ void refreshDiagnostics();
+ }, [refreshDiagnostics]);
+
+ const handleOpenPrdImportClick = useCallback(() => {
+ void handleOpenImportFile("prd");
+ }, [handleOpenImportFile]);
+
+ const handleOpenSpecImportClick = useCallback(() => {
+ void handleOpenImportFile("spec");
+ }, [handleOpenImportFile]);
+
+ const handleStartBuildClick = useCallback(() => {
+ void handleStartBuild();
+ }, [handleStartBuild]);
+
+ const handleApproveExecutionGateClick = useCallback(() => {
+ void handleApproveExecutionGate();
+ }, [handleApproveExecutionGate]);
+
+ const handleEmergencyStopClick = useCallback(() => {
+ void handleEmergencyStop();
+ }, [handleEmergencyStop]);
+
+ const handleWorkspaceFolderSelection = useCallback(
+ (_event: ChangeEvent) => undefined,
+ []
+ );
+
+ const handleWorkspaceFileOpenClick = useCallback(
+ (path: string) => {
+ void handleWorkspaceFileOpen(path);
+ },
+ [handleWorkspaceFileOpen]
+ );
+
+ const handleGeneratePrdClick = useCallback(() => {
+ void handleGeneratePrd();
+ }, [handleGeneratePrd]);
+
+ const handleGenerateSpecClick = useCallback(() => {
+ void handleGenerateSpec();
+ }, [handleGenerateSpec]);
+
+ return {
+ handlePrdContentChange,
+ handleSpecContentChange,
+ handleSpecSelect,
+ handlePrdGenerationPromptChange,
+ handleSpecGenerationPromptChange,
+ handleCommandSearchChange,
+ closeWorkspaceSearch,
+ handleRefresh,
+ handleOpenPrdImportClick,
+ handleOpenSpecImportClick,
+ handleStartBuildClick,
+ handleApproveExecutionGateClick,
+ handleEmergencyStopClick,
+ handleWorkspaceFolderSelection,
+ handleWorkspaceFileOpenClick,
+ handleGeneratePrdClick,
+ handleGenerateSpecClick
+ };
+}
+
+export type AppUiHandlers = ReturnType;
+
+interface UseAppScreenPropsOptions {
+ agentState: AgentStoreSlice;
+ commandSearch: string;
+ derivedState: AppDerivedState;
+ desktopRuntime: boolean;
+ folderInputRef: RefObject;
+ handleApproveSpec: () => void;
+ handleOpenChat: () => void;
+ handlePickProjectFolder: () => Promise;
+ hasSavedProjectSettings: boolean;
+ isImporting: boolean;
+ isProjectLoading: boolean;
+ isProjectSaving: boolean;
+ isSearchOpen: boolean;
+ reviewVisibleDiff: string;
+ prdGenerationError: string;
+ prdGenerationPrompt: string;
+ projectErrorMessage: string;
+ projectRootName: string;
+ projectRootPath: string;
+ projectSettingsHandlers: ProjectSettingsHandlers;
+ projectState: ProjectStoreSlice;
+ projectStatusMessage: string;
+ searchInputRef: RefObject;
+ settingsState: SettingsStoreSlice;
+ specGenerationError: string;
+ specGenerationPrompt: string;
+ uiHandlers: AppUiHandlers;
+ workspaceNotice: string;
+}
+
+export function useAppScreenProps({
+ agentState,
+ commandSearch,
+ derivedState,
+ desktopRuntime,
+ folderInputRef,
+ handleApproveSpec,
+ handleOpenChat,
+ handlePickProjectFolder,
+ hasSavedProjectSettings,
+ isImporting,
+ isProjectLoading,
+ isProjectSaving,
+ isSearchOpen,
+ reviewVisibleDiff,
+ prdGenerationError,
+ prdGenerationPrompt,
+ projectErrorMessage,
+ projectRootName,
+ projectRootPath,
+ projectSettingsHandlers,
+ projectState,
+ projectStatusMessage,
+ searchInputRef,
+ settingsState,
+ specGenerationError,
+ specGenerationPrompt,
+ uiHandlers,
+ workspaceNotice
+}: UseAppScreenPropsOptions) {
+ const controlColumnProps = useMemo(
+ () => ({
+ configuredModelProviders: derivedState.configuredModelProviders,
+ autonomyMode: projectState.autonomyMode,
+ mcpItems: derivedState.mcpItems,
+ onModeChange: projectState.setAutonomyMode,
+ onModelChange: projectSettingsHandlers.handleProjectModelChange,
+ onReasoningChange: projectSettingsHandlers.handleProjectReasoningChange,
+ selectedModel: projectState.selectedModel,
+ selectedReasoning: projectState.selectedReasoning
+ }),
+ [derivedState, projectSettingsHandlers, projectState]
+ );
+
+ const mainWorkspaceProps = useMemo(
+ () => ({
+ activeTab: projectState.activeTab,
+ agentStatus: agentState.status,
+ canGeneratePrd: derivedState.canGeneratePrd,
+ canGenerateSpec: derivedState.canGenerateSpec,
+ configPath: derivedState.configPathDisplay,
+ executionSummary: agentState.executionSummary,
+ isGeneratingPrd: derivedState.isGeneratingPrd,
+ isGeneratingSpec: derivedState.isGeneratingSpec,
+ isSpecApproved: projectState.isSpecApproved,
+ executionControlsEnabled: false,
+ onActiveTabChange: projectState.setActiveTab,
+ onApproveExecutionGate: uiHandlers.handleApproveExecutionGateClick,
+ onApproveSpec: handleApproveSpec,
+ onEditorTabChange: projectState.updateEditorTabContent,
+ onEditorTabClose: projectState.closeEditorTab,
+ onEmergencyStop: uiHandlers.handleEmergencyStopClick,
+ onGeneratePrd: uiHandlers.handleGeneratePrdClick,
+ onGenerateSpec: uiHandlers.handleGenerateSpecClick,
+ onLoadPrd: uiHandlers.handleOpenPrdImportClick,
+ onLoadSpec: uiHandlers.handleOpenSpecImportClick,
+ onPrdContentChange: uiHandlers.handlePrdContentChange,
+ onPrdGenerationPromptChange: uiHandlers.handlePrdGenerationPromptChange,
+ onPrdPaneModeChange: projectState.setPrdPaneMode,
+ onSpecContentChange: uiHandlers.handleSpecContentChange,
+ onSpecGenerationPromptChange: uiHandlers.handleSpecGenerationPromptChange,
+ onSpecPaneModeChange: projectState.setSpecPaneMode,
+ onSpecSelect: uiHandlers.handleSpecSelect,
+ openEditorTabs: projectState.openEditorTabs,
+ prdContent: projectState.prdContent,
+ prdGenerationError,
+ prdGenerationHelperText: derivedState.prdGenerationHelperText,
+ prdGenerationPrompt,
+ prdPaneMode: projectState.prdPaneMode,
+ prdPath: projectState.prdPath,
+ prdPromptTemplate: projectState.prdPromptTemplate,
+ specContent: projectState.specContent,
+ specGenerationError,
+ specGenerationHelperText: derivedState.specGenerationHelperText,
+ specGenerationPrompt,
+ specPaneMode: projectState.specPaneMode,
+ specPath: projectState.specPath,
+ specPromptTemplate: projectState.specPromptTemplate,
+ terminalOutput: agentState.terminalOutput,
+ visibleDiff: reviewVisibleDiff,
+ workspaceRootName: projectRootName
+ }),
+ [
+ agentState,
+ derivedState,
+ handleApproveSpec,
+ prdGenerationError,
+ prdGenerationPrompt,
+ projectRootName,
+ projectState,
+ reviewVisibleDiff,
+ specGenerationError,
+ specGenerationPrompt,
+ uiHandlers
+ ]
+ );
+
+ const inspectorColumnProps = useMemo(
+ () => ({
+ emptyStateMessage: derivedState.deferredSearch.trim()
+ ? `No files match "${derivedState.deferredSearch.trim()}".`
+ : "Choose another project folder from setup if you want to switch workspaces.",
+ folderInputRef,
+ hasWorkspaceEntries: settingsState.workspaceEntries.length > 0,
+ onFileOpen: uiHandlers.handleWorkspaceFileOpenClick,
+ onFolderChange: uiHandlers.handleWorkspaceFolderSelection,
+ onOpenFolder: handlePickProjectFolder,
+ workspaceEntries: derivedState.filteredWorkspaceEntries,
+ workspaceNotice,
+ workspaceRootName: projectRootName
+ }),
+ [
+ derivedState,
+ folderInputRef,
+ handlePickProjectFolder,
+ projectRootName,
+ settingsState.workspaceEntries.length,
+ uiHandlers,
+ workspaceNotice
+ ]
+ );
+
+ const reviewScreenProps = useMemo>(
+ () => ({
+ agentStatus: agentState.status,
+ commandSearch,
+ controlColumnProps,
+ inspectorColumnProps,
+ isSearchOpen,
+ isSpecApproved: projectState.isSpecApproved,
+ mainWorkspaceProps,
+ onCommandSearchChange: uiHandlers.handleCommandSearchChange,
+ onOpenChat: handleOpenChat,
+ onRefresh: uiHandlers.handleRefresh,
+ searchInputRef,
+ workspaceRootName: projectRootName
+ }),
+ [
+ agentState.status,
+ commandSearch,
+ controlColumnProps,
+ handleOpenChat,
+ inspectorColumnProps,
+ isSearchOpen,
+ mainWorkspaceProps,
+ projectRootName,
+ projectState.isSpecApproved,
+ searchInputRef,
+ uiHandlers
+ ]
+ );
+
+ const settingsScreenProps = useMemo>(
+ () => ({
+ agentStatus: agentState.status,
+ onRefresh: uiHandlers.handleRefresh,
+ settingsViewProps: {
+ annotations: projectState.annotations,
+ claudePath: settingsState.claudePath,
+ codexPath: settingsState.codexPath,
+ configPath: derivedState.configPathDisplay,
+ environment: settingsState.environment,
+ onClaudePathChange: settingsState.setClaudePath,
+ onCodexPathChange: settingsState.setCodexPath,
+ onModelChange: projectSettingsHandlers.handleProjectModelChange,
+ onPrdPathChange: projectSettingsHandlers.handleConfiguredPrdPathChange,
+ onPrdPromptChange: projectSettingsHandlers.handlePrdPromptTemplateChange,
+ onReasoningChange: projectSettingsHandlers.handleProjectReasoningChange,
+ onSpecPathChange: projectSettingsHandlers.handleConfiguredSpecPathChange,
+ onSpecPromptChange: projectSettingsHandlers.handleSpecPromptTemplateChange,
+ onSupportingDocumentsChange: projectSettingsHandlers.handleSupportingDocumentsChange,
+ onThemeChange: settingsState.setTheme,
+ prdPath: projectState.configuredPrdPath,
+ prdPrompt: projectState.prdPromptTemplate,
+ projectErrorMessage,
+ projectStatusMessage,
+ selectedModel: projectState.selectedModel,
+ selectedReasoning: projectState.selectedReasoning,
+ specPath: projectState.configuredSpecPath,
+ specPrompt: projectState.specPromptTemplate,
+ supportingDocumentsValue: derivedState.supportingDocumentsValue,
+ theme: settingsState.theme,
+ workspaceRootName: projectRootName
+ }
+ }),
+ [
+ agentState.status,
+ derivedState,
+ projectErrorMessage,
+ projectRootName,
+ projectSettingsHandlers,
+ projectState,
+ projectStatusMessage,
+ settingsState,
+ uiHandlers
+ ]
+ );
+
+ const configurationScreenProps = useMemo>(
+ () => ({
+ claudePath: settingsState.claudePath,
+ codexPath: settingsState.codexPath,
+ desktopRuntime,
+ environment: settingsState.environment,
+ errorMessage: projectErrorMessage,
+ hasSavedSettings: hasSavedProjectSettings,
+ isProjectLoading: isProjectLoading || isImporting,
+ isSaving: isProjectSaving,
+ onClaudePathChange: settingsState.setClaudePath,
+ onCodexPathChange: settingsState.setCodexPath,
+ onContinue: projectSettingsHandlers.handleSaveConfigurationAndContinue,
+ onModelChange: projectSettingsHandlers.handleProjectModelChange,
+ onPickFolder: handlePickProjectFolder,
+ onPrdPathChange: projectSettingsHandlers.handleConfiguredPrdPathChange,
+ onPrdPromptChange: projectSettingsHandlers.handlePrdPromptTemplateChange,
+ onReasoningChange: projectSettingsHandlers.handleProjectReasoningChange,
+ onRefresh: uiHandlers.handleRefresh,
+ onSpecPathChange: projectSettingsHandlers.handleConfiguredSpecPathChange,
+ onSpecPromptChange: projectSettingsHandlers.handleSpecPromptTemplateChange,
+ onSupportingDocumentsChange: projectSettingsHandlers.handleSupportingDocumentsChange,
+ prdPath: projectState.configuredPrdPath,
+ prdPrompt: projectState.prdPromptTemplate,
+ selectedModel: projectState.selectedModel,
+ selectedReasoning: projectState.selectedReasoning,
+ settingsPath: derivedState.configPathDisplay,
+ specPath: projectState.configuredSpecPath,
+ specPrompt: projectState.specPromptTemplate,
+ statusMessage: projectStatusMessage,
+ supportingDocumentsValue: derivedState.supportingDocumentsValue,
+ workspaceRootName: projectRootName,
+ workspaceRootPath: projectRootPath
+ }),
+ [
+ derivedState,
+ desktopRuntime,
+ handlePickProjectFolder,
+ hasSavedProjectSettings,
+ isImporting,
+ isProjectLoading,
+ isProjectSaving,
+ projectErrorMessage,
+ projectRootName,
+ projectRootPath,
+ projectSettingsHandlers,
+ projectState,
+ projectStatusMessage,
+ settingsState,
+ uiHandlers
+ ]
+ );
+
+ return {
+ configurationScreenProps,
+ reviewScreenProps,
+ settingsScreenProps
+ };
+}
diff --git a/src/lib/appState.ts b/src/lib/appState.ts
new file mode 100644
index 0000000..5ddcffe
--- /dev/null
+++ b/src/lib/appState.ts
@@ -0,0 +1,209 @@
+import { getModelLabel } from "./agentConfig";
+import {
+ DEFAULT_PROJECT_PRD_PATH,
+ DEFAULT_PROJECT_SPEC_PATH,
+ SPECFORGE_SETTINGS_RELATIVE_PATH,
+ getWorkspaceDisplayPath,
+ normalizeProjectSettings
+} from "./projectConfig";
+import type {
+ EnvironmentStatus,
+ ModelId,
+ ModelProvider,
+ ProjectContext,
+ ReasoningProfileId
+} from "../types";
+
+interface BuildCurrentProjectSettingsOptions {
+ configuredPrdPath: string;
+ configuredSpecPath: string;
+ prdPromptTemplate: string;
+ selectedModel: ModelId;
+ selectedReasoning: ReasoningProfileId;
+ specPromptTemplate: string;
+ supportingDocumentPaths: string[];
+}
+
+interface GenerationHelperTextOptions {
+ configPathDisplay: string;
+ configuredDocumentPath: string;
+ desktopRuntime: boolean;
+ generationPrompt: string;
+ projectRootPath: string;
+ selectedModel: ModelId;
+ selectedProviderStatus: EnvironmentStatus["claude"];
+}
+
+interface SpecGenerationHelperTextOptions extends GenerationHelperTextOptions {
+ prdContent: string;
+}
+
+export interface McpListItem {
+ name: string;
+ detail: string;
+ status?: string;
+}
+
+export function buildConfiguredModelProviders(
+ environment: EnvironmentStatus
+): ModelProvider[] {
+ const providers: ModelProvider[] = [];
+
+ if (environment.claude.status === "found") {
+ providers.push("claude");
+ }
+
+ if (environment.codex.status === "found") {
+ providers.push("codex");
+ }
+
+ return providers;
+}
+
+export function buildMcpItems(environment: EnvironmentStatus): McpListItem[] {
+ return [
+ {
+ name: environment.codex.name,
+ detail: environment.codex.detail,
+ status: environment.codex.status
+ },
+ {
+ name: environment.claude.name,
+ detail: environment.claude.detail,
+ status: environment.claude.status
+ },
+ {
+ name: environment.git.name,
+ detail: environment.git.detail,
+ status: environment.git.status
+ }
+ ];
+}
+
+export function buildCurrentProjectSettings({
+ configuredPrdPath,
+ configuredSpecPath,
+ prdPromptTemplate,
+ selectedModel,
+ selectedReasoning,
+ specPromptTemplate,
+ supportingDocumentPaths
+}: BuildCurrentProjectSettingsOptions) {
+ return normalizeProjectSettings({
+ selectedModel,
+ selectedReasoning,
+ prdPrompt: prdPromptTemplate,
+ specPrompt: specPromptTemplate,
+ prdPath: configuredPrdPath || DEFAULT_PROJECT_PRD_PATH,
+ specPath: configuredSpecPath || DEFAULT_PROJECT_SPEC_PATH,
+ supportingDocumentPaths
+ });
+}
+
+export function buildConfigPathDisplay(
+ projectConfigPath: string,
+ projectRootName: string
+) {
+ if (projectConfigPath.trim()) {
+ return getWorkspaceDisplayPath(projectConfigPath, projectRootName);
+ }
+
+ return SPECFORGE_SETTINGS_RELATIVE_PATH;
+}
+
+export function getPrdGenerationHelperText({
+ configPathDisplay,
+ configuredDocumentPath,
+ desktopRuntime,
+ generationPrompt,
+ projectRootPath,
+ selectedModel,
+ selectedProviderStatus
+}: GenerationHelperTextOptions) {
+ if (!desktopRuntime) {
+ return "AI PRD generation requires the desktop runtime.";
+ }
+
+ if (!projectRootPath.trim()) {
+ return "Choose a project folder in setup before generating a PRD.";
+ }
+
+ if (!configuredDocumentPath.trim()) {
+ return "Configure a PRD path in setup or settings first.";
+ }
+
+ if (!configuredDocumentPath.toLowerCase().endsWith(".md")) {
+ return "Configure the PRD path as a Markdown file if you want generated output saved into the workspace.";
+ }
+
+ if (!generationPrompt.trim()) {
+ return "Add the product context you want to append after the saved PRD prompt.";
+ }
+
+ if (selectedProviderStatus.status !== "found") {
+ return `${selectedProviderStatus.name} is not currently marked ready. Update its path in Settings and refresh if generation fails.`;
+ }
+
+ return `This appends your note after the saved PRD prompt from ${configPathDisplay}, runs ${getModelLabel(selectedModel)}, and writes markdown to ${configuredDocumentPath}.`;
+}
+
+export function getSpecGenerationHelperText({
+ configPathDisplay,
+ configuredDocumentPath,
+ desktopRuntime,
+ generationPrompt,
+ prdContent,
+ projectRootPath,
+ selectedProviderStatus
+}: SpecGenerationHelperTextOptions) {
+ if (!desktopRuntime) {
+ return "AI spec generation requires the desktop runtime.";
+ }
+
+ if (!projectRootPath.trim()) {
+ return "Choose a project folder in setup before generating a spec.";
+ }
+
+ if (!prdContent.trim()) {
+ return "Load or generate a PRD first. The spec generator appends your note after the saved spec prompt and includes the current PRD content.";
+ }
+
+ if (!configuredDocumentPath.trim()) {
+ return "Configure a spec path in setup or settings first.";
+ }
+
+ if (!configuredDocumentPath.toLowerCase().endsWith(".md")) {
+ return "Configure the spec path as a Markdown file if you want generated output saved into the workspace.";
+ }
+
+ if (!generationPrompt.trim()) {
+ return "Add the technical guidance you want to append after the saved spec prompt.";
+ }
+
+ if (selectedProviderStatus.status !== "found") {
+ return `${selectedProviderStatus.name} is not currently marked ready. Update its path in Settings and refresh if generation fails.`;
+ }
+
+ return `This appends your note after the saved spec prompt from ${configPathDisplay}, includes the current PRD content, and writes markdown to ${configuredDocumentPath}.`;
+}
+
+export function buildWorkspaceNotice(context: ProjectContext) {
+ const loadedDocuments = [
+ context.prdDocument?.fileName ? `PRD: ${context.prdDocument.fileName}` : null,
+ context.specDocument?.fileName ? `SPEC: ${context.specDocument.fileName}` : null
+ ].filter((value): value is string => value !== null);
+
+ if (loadedDocuments.length === 0) {
+ return `${context.rootName} is configured. No document exists yet at ${context.settings.prdPath} or ${context.settings.specPath}.`;
+ }
+
+ return `${context.rootName} is configured. Loaded ${loadedDocuments.join(" and ")} from the saved project paths.`;
+}
+
+export function waitForNextPaint(): Promise {
+ return new Promise((resolve) => {
+ window.requestAnimationFrame(() => {
+ window.setTimeout(resolve, 0);
+ });
+ });
+}
diff --git a/src/lib/runtime.ts b/src/lib/runtime.ts
index 7a938ed..9f02a84 100644
--- a/src/lib/runtime.ts
+++ b/src/lib/runtime.ts
@@ -3,6 +3,11 @@ import { listen, type UnlistenFn } from "@tauri-apps/api/event";
import type {
AgentEventPayload,
+ CavemanStatus,
+ ChatContextItem,
+ ChatEventPayload,
+ ChatSession,
+ ChatSessionSummary,
AutonomyMode,
EnvironmentStatus,
ModelId,
@@ -232,6 +237,109 @@ export async function emergencyStop(): Promise {
await invoke("kill_agent_process");
}
+export async function createChatSession(title?: string): Promise {
+ if (!isTauriRuntime()) {
+ throw new Error("Chat sessions require the desktop runtime.");
+ }
+
+ return invoke("create_chat_session", { title: emptyToNull(title) });
+}
+
+export async function loadChatSession(sessionId: string): Promise {
+ if (!isTauriRuntime()) {
+ throw new Error("Chat sessions require the desktop runtime.");
+ }
+
+ return invoke("load_chat_session", { sessionId });
+}
+
+export async function saveChatSession(payload: {
+ sessionId: string;
+ selectedModel: ModelId;
+ selectedReasoning: ReasoningProfileId;
+ autonomyMode: AutonomyMode;
+ contextItems: ChatContextItem[];
+}): Promise {
+ if (!isTauriRuntime()) {
+ throw new Error("Chat sessions require the desktop runtime.");
+ }
+
+ return invoke("save_chat_session", {
+ sessionId: payload.sessionId,
+ selectedModel: payload.selectedModel,
+ selectedReasoning: payload.selectedReasoning,
+ autonomyMode: payload.autonomyMode,
+ contextItems: payload.contextItems
+ });
+}
+
+export async function renameChatSession(payload: {
+ sessionId: string;
+ title: string;
+}): Promise {
+ if (!isTauriRuntime()) {
+ throw new Error("Chat sessions require the desktop runtime.");
+ }
+
+ return invoke("rename_chat_session", {
+ sessionId: payload.sessionId,
+ title: payload.title
+ });
+}
+
+export async function deleteChatSession(sessionId: string) {
+ if (!isTauriRuntime()) {
+ throw new Error("Chat sessions require the desktop runtime.");
+ }
+
+ return invoke<{
+ sessions: ChatSessionSummary[];
+ lastActiveSessionId: string | null;
+ }>("delete_chat_session", { sessionId });
+}
+
+export async function sendChatMessage(payload: {
+ sessionId: string;
+ message: string;
+ claudePath?: string;
+ codexPath?: string;
+}): Promise {
+ if (!isTauriRuntime()) {
+ throw new Error("Chat sessions require the desktop runtime.");
+ }
+
+ await invoke("send_chat_message", {
+ sessionId: payload.sessionId,
+ message: payload.message,
+ claudePath: emptyToNull(payload.claudePath),
+ codexPath: emptyToNull(payload.codexPath)
+ });
+}
+
+export async function approveChatSession(sessionId: string): Promise {
+ if (!isTauriRuntime()) {
+ throw new Error("Chat sessions require the desktop runtime.");
+ }
+
+ await invoke("approve_chat_session", { sessionId });
+}
+
+export async function stopChatSession(sessionId: string): Promise {
+ if (!isTauriRuntime()) {
+ throw new Error("Chat sessions require the desktop runtime.");
+ }
+
+ await invoke("stop_chat_session", { sessionId });
+}
+
+export async function ensureCavemanSkill(): Promise {
+ if (!isTauriRuntime()) {
+ throw new Error("Chat sessions require the desktop runtime.");
+ }
+
+ return invoke("ensure_caveman_skill");
+}
+
export async function subscribeToAgentEvents(handlers: {
onLine: (line: string) => void;
onState: (payload: AgentEventPayload) => void;
@@ -251,6 +359,22 @@ export async function subscribeToAgentEvents(handlers: {
};
}
+export async function subscribeToChatSessionEvents(
+ onEvent: (payload: ChatEventPayload) => void
+): Promise<() => void> {
+ if (!isTauriRuntime()) {
+ return () => undefined;
+ }
+
+ const unlisten = await listen("chat-session-event", (event) =>
+ onEvent(event.payload)
+ );
+
+ return () => {
+ callUnlisten(unlisten);
+ };
+}
+
function callUnlisten(unlisten: UnlistenFn) {
unlisten();
}
diff --git a/src/screens/ChatScreen.tsx b/src/screens/ChatScreen.tsx
new file mode 100644
index 0000000..4e55a3f
--- /dev/null
+++ b/src/screens/ChatScreen.tsx
@@ -0,0 +1,368 @@
+import {
+ ArrowRight,
+ Attachment,
+ ChatBubble,
+ ChatLines,
+ CheckCircle,
+ EditPencil,
+ Refresh,
+ SendSolid,
+ Settings,
+ Trash,
+ WarningCircle,
+ XmarkCircle
+} from "iconoir-react";
+import { useMemo, useState } from "react";
+
+import { getModelOptions, getReasoningOptions } from "../lib/agentConfig";
+import { isOpenableWorkspacePath } from "../lib/appShell";
+import { DiffPreview } from "../components/DiffPreview";
+import type {
+ AutonomyMode,
+ ChatContextItem,
+ ChatSession,
+ ChatSessionSummary,
+ ModelProvider,
+ WorkspaceEntry
+} from "../types";
+
+interface ChatScreenProps {
+ workspaceRootName: string;
+ sessions: ChatSessionSummary[];
+ activeSession: ChatSession | null;
+ activeDraft: string;
+ workspaceEntries: WorkspaceEntry[];
+ configuredModelProviders: ModelProvider[];
+ cavemanReady: boolean;
+ cavemanMessage: string;
+ cavemanChecking: boolean;
+ onCreateSession: () => void;
+ onDeleteSession: (sessionId: string) => void;
+ onDraftChange: (value: string) => void;
+ onRefresh: () => void;
+ onRemoveContextItem: (itemId: string) => void;
+ onRenameSession: (sessionId: string, title: string) => void;
+ onSaveSessionConfig: (payload: {
+ sessionId: string;
+ selectedModel: ChatSession["selectedModel"];
+ selectedReasoning: ChatSession["selectedReasoning"];
+ autonomyMode: AutonomyMode;
+ contextItems: ChatContextItem[];
+ }) => void;
+ onSelectSession: (sessionId: string) => void;
+ onSend: () => void;
+ onStop: () => void;
+ onApprove: () => void;
+ onAttachFile: (path: string) => void;
+ onOpenReview: () => void;
+}
+
+const AUTONOMY_OPTIONS: AutonomyMode[] = ["stepped", "milestone", "god_mode"];
+
+export function ChatScreen({
+ workspaceRootName,
+ sessions,
+ activeSession,
+ activeDraft,
+ workspaceEntries,
+ configuredModelProviders,
+ cavemanReady,
+ cavemanMessage,
+ cavemanChecking,
+ onCreateSession,
+ onDeleteSession,
+ onDraftChange,
+ onRefresh,
+ onRemoveContextItem,
+ onRenameSession,
+ onSaveSessionConfig,
+ onSelectSession,
+ onSend,
+ onStop,
+ onApprove,
+ onAttachFile,
+ onOpenReview
+}: ChatScreenProps) {
+ const [topicSearch, setTopicSearch] = useState("");
+ const [contextSearch, setContextSearch] = useState("");
+ const mentionQuery = useMemo(() => {
+ const match = activeDraft.match(/(?:^|\s)@([^\s@]*)$/);
+ return match?.[1]?.toLowerCase() ?? "";
+ }, [activeDraft]);
+ const visibleSessions = useMemo(
+ () =>
+ sessions.filter((session) =>
+ `${session.title} ${session.lastMessagePreview}`
+ .toLowerCase()
+ .includes(topicSearch.trim().toLowerCase())
+ ),
+ [sessions, topicSearch]
+ );
+ const attachableFiles = useMemo(
+ () =>
+ workspaceEntries.filter(
+ (entry) =>
+ entry.kind === "file" &&
+ isOpenableWorkspacePath(entry.path) &&
+ `${entry.name} ${entry.path}`
+ .toLowerCase()
+ .includes((mentionQuery || contextSearch).trim().toLowerCase())
+ ),
+ [contextSearch, mentionQuery, workspaceEntries]
+ );
+ const messageCount = activeSession?.messages.length ?? 0;
+ const canSend = Boolean(activeSession && activeDraft.trim() && cavemanReady && !activeSession.runtime.isBusy);
+
+ return (
+
+
+
+
+
+
Topics
+
{workspaceRootName}
+
+
+
+ New
+
+
+
+ setTopicSearch(event.target.value)}
+ placeholder="Search topics"
+ value={topicSearch}
+ />
+
+
+ {visibleSessions.map((session) => (
+
onSelectSession(session.id)}
+ type="button"
+ >
+
+
+
{session.title}
+
{session.lastMessagePreview || "No messages yet."}
+
+
+ {session.status.replace("_", " ")}
+
+
+
+ ))}
+
+
+ {activeSession ? (
+
+ {
+ const nextTitle = window.prompt("Rename topic", activeSession.title)?.trim();
+ if (nextTitle) {
+ onRenameSession(activeSession.id, nextTitle);
+ }
+ }}
+ type="button"
+ >
+
+ Rename
+
+ onDeleteSession(activeSession.id)} type="button">
+
+ Delete
+
+
+ ) : null}
+
+
+
+
+
+
Agent Chat
+
{activeSession?.title ?? "No topic selected"}
+
+
+
+
+ {!cavemanReady ? (
+
+
+
{cavemanChecking ? "Verifying Caveman skill..." : cavemanMessage}
+
+ ) : null}
+
+
+ {activeSession ? (
+
+ {activeSession.messages.map((message) => (
+
+ {message.role}
+ {message.content}
+
+ ))}
+ {messageCount === 0 ? (
+
+ Start a topic. PRD, SPEC, supporting docs, and the workspace tree are already attached to the first turn.
+
+ ) : null}
+
+ ) : (
+
+ Create a topic to start the agent chat workspace.
+
+ )}
+
+
+
+ onDraftChange(event.target.value)}
+ placeholder="Ask the agent anything about this topic. Type @ to attach a workspace file."
+ value={activeDraft}
+ />
+ {mentionQuery.length > 0 ? (
+
+ {attachableFiles.slice(0, 8).map((entry) => (
+
onAttachFile(entry.path)} type="button">
+
+ {entry.path}
+
+ ))}
+
+ ) : null}
+
+
+
+ {activeSession?.runtime.executionSummary ?? "Ready for the next prompt."}
+
+
+ {activeSession?.runtime.awaitingApproval ? (
+
+
+ Approve
+
+ ) : null}
+ {activeSession?.runtime.isBusy ? (
+
+
+ Stop
+
+ ) : null}
+
+
+ Send
+
+
+
+
+
+
+
+
+
+
Context & Artifacts
+
+ {activeSession ? (
+ <>
+
+ onSaveSessionConfig({ sessionId: activeSession.id, selectedModel, selectedReasoning: activeSession.selectedReasoning, autonomyMode: activeSession.autonomyMode, contextItems: activeSession.contextItems })} />
+ onSaveSessionConfig({ sessionId: activeSession.id, selectedModel: activeSession.selectedModel, selectedReasoning, autonomyMode: activeSession.autonomyMode, contextItems: activeSession.contextItems })} />
+ ({ value, label: value.replace("_", " ") }))} value={activeSession.autonomyMode} onChange={(autonomyMode) => onSaveSessionConfig({ sessionId: activeSession.id, selectedModel: activeSession.selectedModel, selectedReasoning: activeSession.selectedReasoning, autonomyMode, contextItems: activeSession.contextItems })} />
+
+
+
+
Attached Context
+
+ {activeSession.contextItems.map((item) => (
+ onRemoveContextItem(item.id)} type="button">
+ {item.label}
+ x
+
+ ))}
+
+
+
+
+
setContextSearch(event.target.value)} placeholder="Attach workspace files" value={contextSearch} />
+
+ {attachableFiles.slice(0, 18).map((entry) => (
+
onAttachFile(entry.path)} type="button">
+
+ {entry.path}
+
+ ))}
+
+
+
+
+
+
+ {activeSession.runtime.terminalOutput.length === 0 ? (
+
Terminal output will appear here for the active topic.
+ ) : (
+ activeSession.runtime.terminalOutput.map((line, index) =>
{line}
)
+ )}
+
+ >
+ ) : null}
+
+
+
+ );
+}
+
+function SelectField({
+ label,
+ options,
+ value,
+ onChange
+}: {
+ label: string;
+ options: Array<{ value: Value; label: string }>;
+ value: Value;
+ onChange: (value: Value) => void;
+}) {
+ return (
+
+ {label}
+ onChange(event.target.value as Value)} value={value}>
+ {options.map((option) => (
+
+ {option.label}
+
+ ))}
+
+
+ );
+}
+
+const PANEL_CLASS = "flex min-h-0 flex-col gap-4 overflow-hidden rounded-[1.5rem] border border-[var(--border-strong)] bg-[var(--bg-panel)] p-5 shadow-[var(--shadow)] backdrop-blur-[30px]";
+const INPUT_CLASS = "w-full rounded-[1rem] border border-[var(--border-soft)] bg-black/15 px-4 py-3 text-[15px] text-[var(--text-main)] outline-none transition placeholder:text-[var(--text-muted)] focus:border-[var(--accent)]";
+const PRIMARY_BUTTON_CLASS = "inline-flex items-center justify-center gap-2 rounded-[1rem] border-0 bg-[linear-gradient(135deg,var(--accent),#ff79c6)] px-4 py-3 font-semibold text-[#15131c] transition hover:-translate-y-0.5 hover:opacity-95";
+const SECONDARY_BUTTON_CLASS = "inline-flex items-center justify-center gap-2 rounded-[1rem] border border-[var(--border-soft)] bg-white/5 px-4 py-3 font-medium text-[var(--text-main)] transition hover:-translate-y-0.5 hover:bg-white/8";
+const DANGER_BUTTON_CLASS = "inline-flex items-center justify-center gap-2 rounded-[1rem] border border-[rgba(255,85,85,0.32)] bg-[rgba(255,85,85,0.16)] px-4 py-3 font-medium text-[var(--danger)] transition hover:-translate-y-0.5";
+const TOPIC_CARD_CLASS = "w-full rounded-[1rem] border border-[var(--border-soft)] bg-[var(--bg-surface)]/70 px-4 py-4 transition hover:-translate-y-0.5 hover:bg-[var(--bg-surface)]/90";
+const CONTEXT_CHIP_CLASS = "inline-flex items-center gap-2 rounded-full border border-[var(--border-soft)] bg-white/6 px-3 py-2 text-xs font-medium text-[var(--text-main)]";
+const LIST_ITEM_CLASS = "flex w-full items-center gap-2 rounded-[0.9rem] px-3 py-2 text-left text-sm text-[var(--text-main)] transition hover:bg-white/8";
diff --git a/src/screens/PrdScreen.tsx b/src/screens/PrdScreen.tsx
index 0da1603..fbbb925 100644
--- a/src/screens/PrdScreen.tsx
+++ b/src/screens/PrdScreen.tsx
@@ -15,8 +15,8 @@ interface PrdScreenProps {
isSpecApproved: boolean;
workspaceRootName: string;
onCommandSearchChange: (event: ChangeEvent) => void;
+ onOpenChat: () => void;
onRefresh: () => void;
- onStartBuild: () => void;
searchInputRef: RefObject;
controlColumnProps: ComponentProps;
mainWorkspaceProps: ComponentProps;
@@ -30,8 +30,8 @@ export function PrdScreen({
isSpecApproved,
workspaceRootName,
onCommandSearchChange,
+ onOpenChat,
onRefresh,
- onStartBuild,
searchInputRef,
controlColumnProps,
mainWorkspaceProps,
@@ -60,14 +60,9 @@ export function PrdScreen({
Refresh
-
+
- Start Build
+ Open Chat
diff --git a/src/store/useAgentStore.ts b/src/store/useAgentStore.ts
index 7f37c25..f110413 100644
--- a/src/store/useAgentStore.ts
+++ b/src/store/useAgentStore.ts
@@ -1,6 +1,6 @@
import { create } from "zustand";
-import type { AgentEventPayload, AgentStatus } from "../types";
+import type { AgentEventPayload, AgentStatus, ChatRuntimeState } from "../types";
interface AgentState {
status: AgentStatus;
@@ -14,6 +14,7 @@ interface AgentState {
setCurrentMilestone: (milestone: string | null) => void;
setPendingDiff: (diff: string | null) => void;
setExecutionSummary: (summary: string | null) => void;
+ syncFromChatRuntime: (runtime: ChatRuntimeState) => void;
applyEvent: (payload: AgentEventPayload) => void;
}
@@ -39,6 +40,14 @@ export const useAgentStore = create
((set) => ({
setCurrentMilestone: (currentMilestone) => set({ currentMilestone }),
setPendingDiff: (pendingDiff) => set({ pendingDiff }),
setExecutionSummary: (executionSummary) => set({ executionSummary }),
+ syncFromChatRuntime: (runtime) =>
+ set({
+ status: runtime.status,
+ terminalOutput: runtime.terminalOutput,
+ currentMilestone: runtime.currentMilestone,
+ pendingDiff: runtime.pendingDiff,
+ executionSummary: runtime.executionSummary
+ }),
applyEvent: (payload) =>
set({
status: payload.status,
diff --git a/src/store/useChatStore.ts b/src/store/useChatStore.ts
new file mode 100644
index 0000000..2873ee6
--- /dev/null
+++ b/src/store/useChatStore.ts
@@ -0,0 +1,197 @@
+import { create } from "zustand";
+
+import type {
+ AgentStatus,
+ ChatContextItem,
+ ChatRuntimeState,
+ ChatSession,
+ ChatSessionSummary
+} from "../types";
+
+interface ChatStoreState {
+ sessions: ChatSessionSummary[];
+ activeSessionId: string | null;
+ loadedSessions: Record;
+ drafts: Record;
+ cavemanReady: boolean;
+ cavemanMessage: string;
+ cavemanChecking: boolean;
+ setSessions: (sessions: ChatSessionSummary[]) => void;
+ setActiveSessionId: (sessionId: string | null) => void;
+ upsertSession: (session: ChatSession) => void;
+ setDraft: (sessionId: string, draft: string) => void;
+ setContextItems: (sessionId: string, items: ChatContextItem[]) => void;
+ setSessionConfig: (payload: {
+ sessionId: string;
+ selectedModel: ChatSession["selectedModel"];
+ selectedReasoning: ChatSession["selectedReasoning"];
+ autonomyMode: ChatSession["autonomyMode"];
+ }) => void;
+ deleteSession: (sessionId: string, nextActiveSessionId: string | null) => void;
+ setCavemanStatus: (payload: {
+ ready: boolean;
+ message: string;
+ checking?: boolean;
+ }) => void;
+}
+
+function buildIdleRuntime(): ChatRuntimeState {
+ return {
+ status: "idle",
+ terminalOutput: [],
+ currentMilestone: null,
+ pendingDiff: null,
+ executionSummary: null,
+ awaitingApproval: false,
+ lastError: null,
+ isBusy: false,
+ pendingRequest: null
+ };
+}
+
+function toSummary(session: ChatSession): ChatSessionSummary {
+ return {
+ id: session.id,
+ title: session.title,
+ createdAt: session.createdAt,
+ updatedAt: session.updatedAt,
+ status: session.status,
+ lastMessagePreview: session.lastMessagePreview,
+ selectedModel: session.selectedModel,
+ selectedReasoning: session.selectedReasoning,
+ autonomyMode: session.autonomyMode
+ };
+}
+
+function sortSessions(sessions: ChatSessionSummary[]) {
+ return [...sessions].sort((left, right) => right.updatedAt.localeCompare(left.updatedAt));
+}
+
+export function getActiveChatRuntime(
+ sessions: Record,
+ activeSessionId: string | null
+): ChatRuntimeState {
+ if (!activeSessionId) {
+ return buildIdleRuntime();
+ }
+
+ return sessions[activeSessionId]?.runtime ?? buildIdleRuntime();
+}
+
+export function getActiveChatStatus(
+ sessions: Record,
+ activeSessionId: string | null
+): AgentStatus {
+ return getActiveChatRuntime(sessions, activeSessionId).status;
+}
+
+export const useChatStore = create((set) => ({
+ sessions: [],
+ activeSessionId: null,
+ loadedSessions: {},
+ drafts: {},
+ cavemanReady: false,
+ cavemanMessage: "Caveman has not been verified yet.",
+ cavemanChecking: false,
+ setSessions: (sessions) =>
+ set((state) => {
+ const visibleSessionIds = new Set(sessions.map((session) => session.id));
+ const nextLoadedSessions = Object.fromEntries(
+ Object.entries(state.loadedSessions).filter(([sessionId]) =>
+ visibleSessionIds.has(sessionId)
+ )
+ );
+ const nextDrafts = Object.fromEntries(
+ Object.entries(state.drafts).filter(([sessionId]) => visibleSessionIds.has(sessionId))
+ );
+
+ return {
+ sessions: sortSessions(sessions),
+ loadedSessions: nextLoadedSessions,
+ drafts: nextDrafts
+ };
+ }),
+ setActiveSessionId: (activeSessionId) => set({ activeSessionId }),
+ upsertSession: (session) =>
+ set((state) => ({
+ loadedSessions: {
+ ...state.loadedSessions,
+ [session.id]: session
+ },
+ sessions: sortSessions([
+ ...state.sessions.filter((entry) => entry.id !== session.id),
+ toSummary(session)
+ ])
+ })),
+ setDraft: (sessionId, draft) =>
+ set((state) => ({
+ drafts: {
+ ...state.drafts,
+ [sessionId]: draft
+ }
+ })),
+ setContextItems: (sessionId, items) =>
+ set((state) => {
+ const session = state.loadedSessions[sessionId];
+
+ if (!session) {
+ return state;
+ }
+
+ return {
+ loadedSessions: {
+ ...state.loadedSessions,
+ [sessionId]: {
+ ...session,
+ contextItems: items
+ }
+ }
+ };
+ }),
+ setSessionConfig: (payload) =>
+ set((state) => {
+ const session = state.loadedSessions[payload.sessionId];
+
+ if (!session) {
+ return state;
+ }
+
+ const nextSession: ChatSession = {
+ ...session,
+ selectedModel: payload.selectedModel,
+ selectedReasoning: payload.selectedReasoning,
+ autonomyMode: payload.autonomyMode
+ };
+
+ return {
+ loadedSessions: {
+ ...state.loadedSessions,
+ [payload.sessionId]: nextSession
+ },
+ sessions: sortSessions([
+ ...state.sessions.filter((entry) => entry.id !== payload.sessionId),
+ toSummary(nextSession)
+ ])
+ };
+ }),
+ deleteSession: (sessionId, nextActiveSessionId) =>
+ set((state) => {
+ const nextLoadedSessions = { ...state.loadedSessions };
+ const nextDrafts = { ...state.drafts };
+ delete nextLoadedSessions[sessionId];
+ delete nextDrafts[sessionId];
+
+ return {
+ sessions: state.sessions.filter((entry) => entry.id !== sessionId),
+ loadedSessions: nextLoadedSessions,
+ drafts: nextDrafts,
+ activeSessionId: nextActiveSessionId
+ };
+ }),
+ setCavemanStatus: ({ ready, message, checking = false }) =>
+ set({
+ cavemanReady: ready,
+ cavemanMessage: message,
+ cavemanChecking: checking
+ })
+}));
diff --git a/src/types.ts b/src/types.ts
index fc154d2..94b138b 100644
--- a/src/types.ts
+++ b/src/types.ts
@@ -29,6 +29,22 @@ export type AgentStatus =
| "completed";
export type CliHealth = "found" | "missing" | "unauthorized";
export type AnnotationTone = "info" | "warning" | "success";
+export type ChatContextKind =
+ | "prd"
+ | "spec"
+ | "supporting_document"
+ | "workspace_summary"
+ | "file";
+export type ChatMessageRole = "user" | "assistant";
+export type ChatEventType =
+ | "messageStarted"
+ | "messageDelta"
+ | "terminalLine"
+ | "approvalRequired"
+ | "completed"
+ | "halted"
+ | "error"
+ | "sessionUpdated";
export interface SelectionRange {
start: number;
@@ -90,6 +106,8 @@ export interface ProjectContext {
ignoredFileCount: number;
prdDocument: WorkspaceDocument | null;
specDocument: WorkspaceDocument | null;
+ chatSessions: ChatSessionSummary[];
+ lastActiveSessionId: string | null;
}
export interface WorkspaceScanResult {
@@ -113,3 +131,69 @@ export interface AgentEventPayload {
pendingDiff: string | null;
summary: string | null;
}
+
+export interface ChatContextItem {
+ id: string;
+ kind: ChatContextKind;
+ label: string;
+ path: string | null;
+ isDefault: boolean;
+}
+
+export interface ChatMessage {
+ id: string;
+ role: ChatMessageRole;
+ content: string;
+ createdAt: string;
+}
+
+export interface ChatRuntimeState {
+ status: AgentStatus;
+ terminalOutput: string[];
+ currentMilestone: string | null;
+ pendingDiff: string | null;
+ executionSummary: string | null;
+ awaitingApproval: boolean;
+ lastError: string | null;
+ isBusy: boolean;
+ pendingRequest: string | null;
+}
+
+export interface ChatSessionSummary {
+ id: string;
+ title: string;
+ createdAt: string;
+ updatedAt: string;
+ status: AgentStatus;
+ lastMessagePreview: string;
+ selectedModel: ModelId;
+ selectedReasoning: ReasoningProfileId;
+ autonomyMode: AutonomyMode;
+}
+
+export interface ChatSession extends ChatSessionSummary {
+ contextItems: ChatContextItem[];
+ messages: ChatMessage[];
+ runtime: ChatRuntimeState;
+}
+
+export interface ChatSessionIndex {
+ sessions: ChatSessionSummary[];
+ lastActiveSessionId: string | null;
+}
+
+export interface ChatEventPayload {
+ sessionId: string;
+ eventType: ChatEventType;
+ message: ChatMessage | null;
+ messageDelta: string | null;
+ terminalLine: string | null;
+ session: ChatSession | null;
+ runtime: ChatRuntimeState | null;
+ summary: ChatSessionSummary | null;
+}
+
+export interface CavemanStatus {
+ ready: boolean;
+ detail: string;
+}
From 3a316981eb6e36a6e4ee45a4acbe6e76c3842ee1 Mon Sep 17 00:00:00 2001
From: matheusBBarni
Date: Sun, 12 Apr 2026 01:37:26 -0300
Subject: [PATCH 06/32] perf(app): skip redundant agent resets on session
change
- Guard resetRun with idle check to avoid unnecessary state transitions
- Update useEffect dependencies for ChatRuntime sync
- Set package to public in package.json
---
package.json | 2 +-
src/App.tsx | 14 ++++++++++++--
2 files changed, 13 insertions(+), 3 deletions(-)
diff --git a/package.json b/package.json
index e7a6e70..fc39bc8 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "specforge",
- "private": true,
+ "private": false,
"version": "0.1.0",
"type": "module",
"packageManager": "bun@1.3.6",
diff --git a/src/App.tsx b/src/App.tsx
index 472a767..54921a0 100644
--- a/src/App.tsx
+++ b/src/App.tsx
@@ -1265,8 +1265,18 @@ function App() {
return;
}
- agentState.resetRun();
- }, [activeChatSession, agentState]);
+ const nextAgentState = useAgentStore.getState();
+ const isAlreadyReset =
+ nextAgentState.status === "idle" &&
+ nextAgentState.terminalOutput.length === 0 &&
+ nextAgentState.currentMilestone === null &&
+ nextAgentState.pendingDiff === null &&
+ nextAgentState.executionSummary === null;
+
+ if (!isAlreadyReset) {
+ agentState.resetRun();
+ }
+ }, [activeChatSession, agentState.resetRun, agentState.syncFromChatRuntime]);
useAgentEventSubscription({
appendTerminalOutput: agentState.appendTerminalOutput,
From c5b1cce7330842e0de16ed6b7db2e9c482a910e7 Mon Sep 17 00:00:00 2001
From: matheusBBarni
Date: Sun, 12 Apr 2026 01:38:09 -0300
Subject: [PATCH 07/32] chore(workspace): add AI assistant configuration files
Commit workspace settings and skills configuration for AI agents (claude, kiro, pi, trae, etc.)
---
.agents/skills/caveman-commit/SKILL.md | 65 ++++++
.agents/skills/caveman-compress/README.md | 163 +++++++++++++++
.agents/skills/caveman-compress/SECURITY.md | 31 +++
.agents/skills/caveman-compress/SKILL.md | 111 ++++++++++
.../caveman-compress/scripts/__init__.py | 9 +
.../caveman-compress/scripts/__main__.py | 3 +
.../caveman-compress/scripts/benchmark.py | 78 ++++++++
.../skills/caveman-compress/scripts/cli.py | 73 +++++++
.../caveman-compress/scripts/compress.py | 176 ++++++++++++++++
.../skills/caveman-compress/scripts/detect.py | 121 +++++++++++
.../caveman-compress/scripts/validate.py | 189 ++++++++++++++++++
.agents/skills/caveman-help/SKILL.md | 59 ++++++
.agents/skills/caveman-review/SKILL.md | 55 +++++
.agents/skills/caveman/SKILL.md | 67 +++++++
.claude/skills/caveman-commit/SKILL.md | 65 ++++++
.claude/skills/caveman-compress/README.md | 163 +++++++++++++++
.claude/skills/caveman-compress/SECURITY.md | 31 +++
.claude/skills/caveman-compress/SKILL.md | 111 ++++++++++
.../caveman-compress/scripts/__init__.py | 9 +
.../caveman-compress/scripts/__main__.py | 3 +
.../caveman-compress/scripts/benchmark.py | 78 ++++++++
.../skills/caveman-compress/scripts/cli.py | 73 +++++++
.../caveman-compress/scripts/compress.py | 176 ++++++++++++++++
.../skills/caveman-compress/scripts/detect.py | 121 +++++++++++
.../caveman-compress/scripts/validate.py | 189 ++++++++++++++++++
.claude/skills/caveman-help/SKILL.md | 59 ++++++
.claude/skills/caveman-review/SKILL.md | 55 +++++
.claude/skills/caveman/SKILL.md | 67 +++++++
.kiro/skills/caveman-commit/SKILL.md | 65 ++++++
.kiro/skills/caveman-compress/README.md | 163 +++++++++++++++
.kiro/skills/caveman-compress/SECURITY.md | 31 +++
.kiro/skills/caveman-compress/SKILL.md | 111 ++++++++++
.../caveman-compress/scripts/__init__.py | 9 +
.../caveman-compress/scripts/__main__.py | 3 +
.../caveman-compress/scripts/benchmark.py | 78 ++++++++
.kiro/skills/caveman-compress/scripts/cli.py | 73 +++++++
.../caveman-compress/scripts/compress.py | 176 ++++++++++++++++
.../skills/caveman-compress/scripts/detect.py | 121 +++++++++++
.../caveman-compress/scripts/validate.py | 189 ++++++++++++++++++
.kiro/skills/caveman-help/SKILL.md | 59 ++++++
.kiro/skills/caveman-review/SKILL.md | 55 +++++
.kiro/skills/caveman/SKILL.md | 67 +++++++
.pi/skills/caveman-commit/SKILL.md | 65 ++++++
.pi/skills/caveman-compress/README.md | 163 +++++++++++++++
.pi/skills/caveman-compress/SECURITY.md | 31 +++
.pi/skills/caveman-compress/SKILL.md | 111 ++++++++++
.../caveman-compress/scripts/__init__.py | 9 +
.../caveman-compress/scripts/__main__.py | 3 +
.../caveman-compress/scripts/benchmark.py | 78 ++++++++
.pi/skills/caveman-compress/scripts/cli.py | 73 +++++++
.../caveman-compress/scripts/compress.py | 176 ++++++++++++++++
.pi/skills/caveman-compress/scripts/detect.py | 121 +++++++++++
.../caveman-compress/scripts/validate.py | 189 ++++++++++++++++++
.pi/skills/caveman-help/SKILL.md | 59 ++++++
.pi/skills/caveman-review/SKILL.md | 55 +++++
.pi/skills/caveman/SKILL.md | 67 +++++++
.trae/skills/caveman-commit/SKILL.md | 65 ++++++
.trae/skills/caveman-compress/README.md | 163 +++++++++++++++
.trae/skills/caveman-compress/SECURITY.md | 31 +++
.trae/skills/caveman-compress/SKILL.md | 111 ++++++++++
.../caveman-compress/scripts/__init__.py | 9 +
.../caveman-compress/scripts/__main__.py | 3 +
.../caveman-compress/scripts/benchmark.py | 78 ++++++++
.trae/skills/caveman-compress/scripts/cli.py | 73 +++++++
.../caveman-compress/scripts/compress.py | 176 ++++++++++++++++
.../skills/caveman-compress/scripts/detect.py | 121 +++++++++++
.../caveman-compress/scripts/validate.py | 189 ++++++++++++++++++
.trae/skills/caveman-help/SKILL.md | 59 ++++++
.trae/skills/caveman-review/SKILL.md | 55 +++++
.trae/skills/caveman/SKILL.md | 67 +++++++
skills-lock.json | 30 +++
71 files changed, 6030 insertions(+)
create mode 100644 .agents/skills/caveman-commit/SKILL.md
create mode 100644 .agents/skills/caveman-compress/README.md
create mode 100644 .agents/skills/caveman-compress/SECURITY.md
create mode 100644 .agents/skills/caveman-compress/SKILL.md
create mode 100644 .agents/skills/caveman-compress/scripts/__init__.py
create mode 100644 .agents/skills/caveman-compress/scripts/__main__.py
create mode 100644 .agents/skills/caveman-compress/scripts/benchmark.py
create mode 100644 .agents/skills/caveman-compress/scripts/cli.py
create mode 100644 .agents/skills/caveman-compress/scripts/compress.py
create mode 100644 .agents/skills/caveman-compress/scripts/detect.py
create mode 100644 .agents/skills/caveman-compress/scripts/validate.py
create mode 100644 .agents/skills/caveman-help/SKILL.md
create mode 100644 .agents/skills/caveman-review/SKILL.md
create mode 100644 .agents/skills/caveman/SKILL.md
create mode 100644 .claude/skills/caveman-commit/SKILL.md
create mode 100644 .claude/skills/caveman-compress/README.md
create mode 100644 .claude/skills/caveman-compress/SECURITY.md
create mode 100644 .claude/skills/caveman-compress/SKILL.md
create mode 100644 .claude/skills/caveman-compress/scripts/__init__.py
create mode 100644 .claude/skills/caveman-compress/scripts/__main__.py
create mode 100644 .claude/skills/caveman-compress/scripts/benchmark.py
create mode 100644 .claude/skills/caveman-compress/scripts/cli.py
create mode 100644 .claude/skills/caveman-compress/scripts/compress.py
create mode 100644 .claude/skills/caveman-compress/scripts/detect.py
create mode 100644 .claude/skills/caveman-compress/scripts/validate.py
create mode 100644 .claude/skills/caveman-help/SKILL.md
create mode 100644 .claude/skills/caveman-review/SKILL.md
create mode 100644 .claude/skills/caveman/SKILL.md
create mode 100644 .kiro/skills/caveman-commit/SKILL.md
create mode 100644 .kiro/skills/caveman-compress/README.md
create mode 100644 .kiro/skills/caveman-compress/SECURITY.md
create mode 100644 .kiro/skills/caveman-compress/SKILL.md
create mode 100644 .kiro/skills/caveman-compress/scripts/__init__.py
create mode 100644 .kiro/skills/caveman-compress/scripts/__main__.py
create mode 100644 .kiro/skills/caveman-compress/scripts/benchmark.py
create mode 100644 .kiro/skills/caveman-compress/scripts/cli.py
create mode 100644 .kiro/skills/caveman-compress/scripts/compress.py
create mode 100644 .kiro/skills/caveman-compress/scripts/detect.py
create mode 100644 .kiro/skills/caveman-compress/scripts/validate.py
create mode 100644 .kiro/skills/caveman-help/SKILL.md
create mode 100644 .kiro/skills/caveman-review/SKILL.md
create mode 100644 .kiro/skills/caveman/SKILL.md
create mode 100644 .pi/skills/caveman-commit/SKILL.md
create mode 100644 .pi/skills/caveman-compress/README.md
create mode 100644 .pi/skills/caveman-compress/SECURITY.md
create mode 100644 .pi/skills/caveman-compress/SKILL.md
create mode 100644 .pi/skills/caveman-compress/scripts/__init__.py
create mode 100644 .pi/skills/caveman-compress/scripts/__main__.py
create mode 100644 .pi/skills/caveman-compress/scripts/benchmark.py
create mode 100644 .pi/skills/caveman-compress/scripts/cli.py
create mode 100644 .pi/skills/caveman-compress/scripts/compress.py
create mode 100644 .pi/skills/caveman-compress/scripts/detect.py
create mode 100644 .pi/skills/caveman-compress/scripts/validate.py
create mode 100644 .pi/skills/caveman-help/SKILL.md
create mode 100644 .pi/skills/caveman-review/SKILL.md
create mode 100644 .pi/skills/caveman/SKILL.md
create mode 100644 .trae/skills/caveman-commit/SKILL.md
create mode 100644 .trae/skills/caveman-compress/README.md
create mode 100644 .trae/skills/caveman-compress/SECURITY.md
create mode 100644 .trae/skills/caveman-compress/SKILL.md
create mode 100644 .trae/skills/caveman-compress/scripts/__init__.py
create mode 100644 .trae/skills/caveman-compress/scripts/__main__.py
create mode 100644 .trae/skills/caveman-compress/scripts/benchmark.py
create mode 100644 .trae/skills/caveman-compress/scripts/cli.py
create mode 100644 .trae/skills/caveman-compress/scripts/compress.py
create mode 100644 .trae/skills/caveman-compress/scripts/detect.py
create mode 100644 .trae/skills/caveman-compress/scripts/validate.py
create mode 100644 .trae/skills/caveman-help/SKILL.md
create mode 100644 .trae/skills/caveman-review/SKILL.md
create mode 100644 .trae/skills/caveman/SKILL.md
create mode 100644 skills-lock.json
diff --git a/.agents/skills/caveman-commit/SKILL.md b/.agents/skills/caveman-commit/SKILL.md
new file mode 100644
index 0000000..729318c
--- /dev/null
+++ b/.agents/skills/caveman-commit/SKILL.md
@@ -0,0 +1,65 @@
+---
+name: caveman-commit
+description: >
+ Ultra-compressed commit message generator. Cuts noise from commit messages while preserving
+ intent and reasoning. Conventional Commits format. Subject ≤50 chars, body only when "why"
+ isn't obvious. Use when user says "write a commit", "commit message", "generate commit",
+ "/commit", or invokes /caveman-commit. Auto-triggers when staging changes.
+---
+
+Write commit messages terse and exact. Conventional Commits format. No fluff. Why over what.
+
+## Rules
+
+**Subject line:**
+- `(): ` — `` optional
+- Types: `feat`, `fix`, `refactor`, `perf`, `docs`, `test`, `chore`, `build`, `ci`, `style`, `revert`
+- Imperative mood: "add", "fix", "remove" — not "added", "adds", "adding"
+- ≤50 chars when possible, hard cap 72
+- No trailing period
+- Match project convention for capitalization after the colon
+
+**Body (only if needed):**
+- Skip entirely when subject is self-explanatory
+- Add body only for: non-obvious *why*, breaking changes, migration notes, linked issues
+- Wrap at 72 chars
+- Bullets `-` not `*`
+- Reference issues/PRs at end: `Closes #42`, `Refs #17`
+
+**What NEVER goes in:**
+- "This commit does X", "I", "we", "now", "currently" — the diff says what
+- "As requested by..." — use Co-authored-by trailer
+- "Generated with Claude Code" or any AI attribution
+- Emoji (unless project convention requires)
+- Restating the file name when scope already says it
+
+## Examples
+
+Diff: new endpoint for user profile with body explaining the why
+- ❌ "feat: add a new endpoint to get user profile information from the database"
+- ✅
+ ```
+ feat(api): add GET /users/:id/profile
+
+ Mobile client needs profile data without the full user payload
+ to reduce LTE bandwidth on cold-launch screens.
+
+ Closes #128
+ ```
+
+Diff: breaking API change
+- ✅
+ ```
+ feat(api)!: rename /v1/orders to /v1/checkout
+
+ BREAKING CHANGE: clients on /v1/orders must migrate to /v1/checkout
+ before 2026-06-01. Old route returns 410 after that date.
+ ```
+
+## Auto-Clarity
+
+Always include body for: breaking changes, security fixes, data migrations, anything reverting a prior commit. Never compress these into subject-only — future debuggers need the context.
+
+## Boundaries
+
+Only generates the commit message. Does not run `git commit`, does not stage files, does not amend. Output the message as a code block ready to paste. "stop caveman-commit" or "normal mode": revert to verbose commit style.
\ No newline at end of file
diff --git a/.agents/skills/caveman-compress/README.md b/.agents/skills/caveman-compress/README.md
new file mode 100644
index 0000000..7c0e8ba
--- /dev/null
+++ b/.agents/skills/caveman-compress/README.md
@@ -0,0 +1,163 @@
+
+
+
+
+caveman-compress
+
+
+ shrink memory file. save token every session.
+
+
+---
+
+A Claude Code skill that compresses your project memory files (`CLAUDE.md`, todos, preferences) into caveman format — so every session loads fewer tokens automatically.
+
+Claude read `CLAUDE.md` on every session start. If file big, cost big. Caveman make file small. Cost go down forever.
+
+## What It Do
+
+```
+/caveman:compress CLAUDE.md
+```
+
+```
+CLAUDE.md ← compressed (Claude reads this — fewer tokens every session)
+CLAUDE.original.md ← human-readable backup (you edit this)
+```
+
+Original never lost. You can read and edit `.original.md`. Run skill again to re-compress after edits.
+
+## Benchmarks
+
+Real results on real project files:
+
+| File | Original | Compressed | Saved |
+|------|----------:|----------:|------:|
+| `claude-md-preferences.md` | 706 | 285 | **59.6%** |
+| `project-notes.md` | 1145 | 535 | **53.3%** |
+| `claude-md-project.md` | 1122 | 636 | **43.3%** |
+| `todo-list.md` | 627 | 388 | **38.1%** |
+| `mixed-with-code.md` | 888 | 560 | **36.9%** |
+| **Average** | **898** | **481** | **46%** |
+
+All validations passed ✅ — headings, code blocks, URLs, file paths preserved exactly.
+
+## Before / After
+
+
+
+
+
+### 📄 Original (706 tokens)
+
+> "I strongly prefer TypeScript with strict mode enabled for all new code. Please don't use `any` type unless there's genuinely no way around it, and if you do, leave a comment explaining the reasoning. I find that taking the time to properly type things catches a lot of bugs before they ever make it to runtime."
+
+
+
+
+### 🪨 Caveman (285 tokens)
+
+> "Prefer TypeScript strict mode always. No `any` unless unavoidable — comment why if used. Proper types catch bugs early."
+
+
+
+
+
+**Same instructions. 60% fewer tokens. Every. Single. Session.**
+
+## Security
+
+`caveman-compress` is flagged as Snyk High Risk due to subprocess and file I/O patterns detected by static analysis. This is a false positive — see [SECURITY.md](./SECURITY.md) for a full explanation of what the skill does and does not do.
+
+## Install
+
+Compress is built in with the `caveman` plugin. Install `caveman` once, then use `/caveman:compress`.
+
+If you need local files, the compress skill lives at:
+
+```bash
+caveman-compress/
+```
+
+**Requires:** Python 3.10+
+
+## Usage
+
+```
+/caveman:compress
+```
+
+Examples:
+```
+/caveman:compress CLAUDE.md
+/caveman:compress docs/preferences.md
+/caveman:compress todos.md
+```
+
+### What files work
+
+| Type | Compress? |
+|------|-----------|
+| `.md`, `.txt`, `.rst` | ✅ Yes |
+| Extensionless natural language | ✅ Yes |
+| `.py`, `.js`, `.ts`, `.json`, `.yaml` | ❌ Skip (code/config) |
+| `*.original.md` | ❌ Skip (backup files) |
+
+## How It Work
+
+```
+/caveman:compress CLAUDE.md
+ ↓
+detect file type (no tokens)
+ ↓
+Claude compresses (tokens — one call)
+ ↓
+validate output (no tokens)
+ checks: headings, code blocks, URLs, file paths, bullets
+ ↓
+if errors: Claude fixes cherry-picked issues only (tokens — targeted fix)
+ does NOT recompress — only patches broken parts
+ ↓
+retry up to 2 times
+ ↓
+write compressed → CLAUDE.md
+write original → CLAUDE.original.md
+```
+
+Only two things use tokens: initial compression + targeted fix if validation fails. Everything else is local Python.
+
+## What Is Preserved
+
+Caveman compress natural language. It never touch:
+
+- Code blocks (` ``` ` fenced or indented)
+- Inline code (`` `backtick content` ``)
+- URLs and links
+- File paths (`/src/components/...`)
+- Commands (`npm install`, `git commit`)
+- Technical terms, library names, API names
+- Headings (exact text preserved)
+- Tables (structure preserved, cell text compressed)
+- Dates, version numbers, numeric values
+
+## Why This Matter
+
+`CLAUDE.md` loads on **every session start**. A 1000-token project memory file costs tokens every single time you open a project. Over 100 sessions that's 100,000 tokens of overhead — just for context you already wrote.
+
+Caveman cut that by ~46% on average. Same instructions. Same accuracy. Less waste.
+
+```
+┌────────────────────────────────────────────┐
+│ TOKEN SAVINGS PER FILE █████ 46% │
+│ SESSIONS THAT BENEFIT ██████████ 100% │
+│ INFORMATION PRESERVED ██████████ 100% │
+│ SETUP TIME █ 1x │
+└────────────────────────────────────────────┘
+```
+
+## Part of Caveman
+
+This skill is part of the [caveman](https://github.com/JuliusBrussee/caveman) toolkit — making Claude use fewer tokens without losing accuracy.
+
+- **caveman** — make Claude *speak* like caveman (cuts response tokens ~65%)
+- **caveman-compress** — make Claude *read* less (cuts context tokens ~46%)
diff --git a/.agents/skills/caveman-compress/SECURITY.md b/.agents/skills/caveman-compress/SECURITY.md
new file mode 100644
index 0000000..693108c
--- /dev/null
+++ b/.agents/skills/caveman-compress/SECURITY.md
@@ -0,0 +1,31 @@
+# Security
+
+## Snyk High Risk Rating
+
+`caveman-compress` receives a Snyk High Risk rating due to static analysis heuristics. This document explains what the skill does and does not do.
+
+### What triggers the rating
+
+1. **subprocess usage**: The skill calls the `claude` CLI via `subprocess.run()` as a fallback when `ANTHROPIC_API_KEY` is not set. The subprocess call uses a fixed argument list — no shell interpolation occurs. User file content is passed via stdin, not as a shell argument.
+
+2. **File read/write**: The skill reads the file the user explicitly points it at, compresses it, and writes the result back to the same path. A `.original.md` backup is saved alongside it. No files outside the user-specified path are read or written.
+
+### What the skill does NOT do
+
+- Does not execute user file content as code
+- Does not make network requests except to Anthropic's API (via SDK or CLI)
+- Does not access files outside the path the user provides
+- Does not use shell=True or string interpolation in subprocess calls
+- Does not collect or transmit any data beyond the file being compressed
+
+### Auth behavior
+
+If `ANTHROPIC_API_KEY` is set, the skill uses the Anthropic Python SDK directly (no subprocess). If not set, it falls back to the `claude` CLI, which uses the user's existing Claude desktop authentication.
+
+### File size limit
+
+Files larger than 500KB are rejected before any API call is made.
+
+### Reporting a vulnerability
+
+If you believe you've found a genuine security issue, please open a GitHub issue with the label `security`.
diff --git a/.agents/skills/caveman-compress/SKILL.md b/.agents/skills/caveman-compress/SKILL.md
new file mode 100644
index 0000000..7b3e3aa
--- /dev/null
+++ b/.agents/skills/caveman-compress/SKILL.md
@@ -0,0 +1,111 @@
+---
+name: caveman-compress
+description: >
+ Compress natural language memory files (CLAUDE.md, todos, preferences) into caveman format
+ to save input tokens. Preserves all technical substance, code, URLs, and structure.
+ Compressed version overwrites the original file. Human-readable backup saved as FILE.original.md.
+ Trigger: /caveman:compress or "compress memory file"
+---
+
+# Caveman Compress
+
+## Purpose
+
+Compress natural language files (CLAUDE.md, todos, preferences) into caveman-speak to reduce input tokens. Compressed version overwrites original. Human-readable backup saved as `.original.md`.
+
+## Trigger
+
+`/caveman:compress ` or when user asks to compress a memory file.
+
+## Process
+
+1. The compression scripts live in `caveman-compress/scripts/` (adjacent to this SKILL.md). If the path is not immediately available, search for `caveman-compress/scripts/__main__.py`.
+
+2. Run:
+
+cd caveman-compress && python3 -m scripts
+
+3. The CLI will:
+- detect file type (no tokens)
+- call Claude to compress
+- validate output (no tokens)
+- if errors: cherry-pick fix with Claude (targeted fixes only, no recompression)
+- retry up to 2 times
+- if still failing after 2 retries: report error to user, leave original file untouched
+
+4. Return result to user
+
+## Compression Rules
+
+### Remove
+- Articles: a, an, the
+- Filler: just, really, basically, actually, simply, essentially, generally
+- Pleasantries: "sure", "certainly", "of course", "happy to", "I'd recommend"
+- Hedging: "it might be worth", "you could consider", "it would be good to"
+- Redundant phrasing: "in order to" → "to", "make sure to" → "ensure", "the reason is because" → "because"
+- Connective fluff: "however", "furthermore", "additionally", "in addition"
+
+### Preserve EXACTLY (never modify)
+- Code blocks (fenced ``` and indented)
+- Inline code (`backtick content`)
+- URLs and links (full URLs, markdown links)
+- File paths (`/src/components/...`, `./config.yaml`)
+- Commands (`npm install`, `git commit`, `docker build`)
+- Technical terms (library names, API names, protocols, algorithms)
+- Proper nouns (project names, people, companies)
+- Dates, version numbers, numeric values
+- Environment variables (`$HOME`, `NODE_ENV`)
+
+### Preserve Structure
+- All markdown headings (keep exact heading text, compress body below)
+- Bullet point hierarchy (keep nesting level)
+- Numbered lists (keep numbering)
+- Tables (compress cell text, keep structure)
+- Frontmatter/YAML headers in markdown files
+
+### Compress
+- Use short synonyms: "big" not "extensive", "fix" not "implement a solution for", "use" not "utilize"
+- Fragments OK: "Run tests before commit" not "You should always run tests before committing"
+- Drop "you should", "make sure to", "remember to" — just state the action
+- Merge redundant bullets that say the same thing differently
+- Keep one example where multiple examples show the same pattern
+
+CRITICAL RULE:
+Anything inside ``` ... ``` must be copied EXACTLY.
+Do not:
+- remove comments
+- remove spacing
+- reorder lines
+- shorten commands
+- simplify anything
+
+Inline code (`...`) must be preserved EXACTLY.
+Do not modify anything inside backticks.
+
+If file contains code blocks:
+- Treat code blocks as read-only regions
+- Only compress text outside them
+- Do not merge sections around code
+
+## Pattern
+
+Original:
+> You should always make sure to run the test suite before pushing any changes to the main branch. This is important because it helps catch bugs early and prevents broken builds from being deployed to production.
+
+Compressed:
+> Run tests before push to main. Catch bugs early, prevent broken prod deploys.
+
+Original:
+> The application uses a microservices architecture with the following components. The API gateway handles all incoming requests and routes them to the appropriate service. The authentication service is responsible for managing user sessions and JWT tokens.
+
+Compressed:
+> Microservices architecture. API gateway route all requests to services. Auth service manage user sessions + JWT tokens.
+
+## Boundaries
+
+- ONLY compress natural language files (.md, .txt, extensionless)
+- NEVER modify: .py, .js, .ts, .json, .yaml, .yml, .toml, .env, .lock, .css, .html, .xml, .sql, .sh
+- If file has mixed content (prose + code), compress ONLY the prose sections
+- If unsure whether something is code or prose, leave it unchanged
+- Original file is backed up as FILE.original.md before overwriting
+- Never compress FILE.original.md (skip it)
diff --git a/.agents/skills/caveman-compress/scripts/__init__.py b/.agents/skills/caveman-compress/scripts/__init__.py
new file mode 100644
index 0000000..16b8c53
--- /dev/null
+++ b/.agents/skills/caveman-compress/scripts/__init__.py
@@ -0,0 +1,9 @@
+"""Caveman compress scripts.
+
+This package provides tools to compress natural language markdown files
+into caveman format to save input tokens.
+"""
+
+__all__ = ["cli", "compress", "detect", "validate"]
+
+__version__ = "1.0.0"
diff --git a/.agents/skills/caveman-compress/scripts/__main__.py b/.agents/skills/caveman-compress/scripts/__main__.py
new file mode 100644
index 0000000..4e28416
--- /dev/null
+++ b/.agents/skills/caveman-compress/scripts/__main__.py
@@ -0,0 +1,3 @@
+from .cli import main
+
+main()
diff --git a/.agents/skills/caveman-compress/scripts/benchmark.py b/.agents/skills/caveman-compress/scripts/benchmark.py
new file mode 100644
index 0000000..eac927d
--- /dev/null
+++ b/.agents/skills/caveman-compress/scripts/benchmark.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+from pathlib import Path
+import sys
+
+# Support both direct execution and module import
+try:
+ from .validate import validate
+except ImportError:
+ sys.path.insert(0, str(Path(__file__).parent))
+ from validate import validate
+
+try:
+ import tiktoken
+ _enc = tiktoken.get_encoding("o200k_base")
+except ImportError:
+ _enc = None
+
+
+def count_tokens(text):
+ if _enc is None:
+ return len(text.split()) # fallback: word count
+ return len(_enc.encode(text))
+
+
+def benchmark_pair(orig_path: Path, comp_path: Path):
+ orig_text = orig_path.read_text()
+ comp_text = comp_path.read_text()
+
+ orig_tokens = count_tokens(orig_text)
+ comp_tokens = count_tokens(comp_text)
+ saved = 100 * (orig_tokens - comp_tokens) / orig_tokens if orig_tokens > 0 else 0.0
+ result = validate(orig_path, comp_path)
+
+ return (comp_path.name, orig_tokens, comp_tokens, saved, result.is_valid)
+
+
+def print_table(rows):
+ print("\n| File | Original | Compressed | Saved % | Valid |")
+ print("|------|----------|------------|---------|-------|")
+ for r in rows:
+ print(f"| {r[0]} | {r[1]} | {r[2]} | {r[3]:.1f}% | {'✅' if r[4] else '❌'} |")
+
+
+def main():
+ # Direct file pair: python3 benchmark.py original.md compressed.md
+ if len(sys.argv) == 3:
+ orig = Path(sys.argv[1]).resolve()
+ comp = Path(sys.argv[2]).resolve()
+ if not orig.exists():
+ print(f"❌ Not found: {orig}")
+ sys.exit(1)
+ if not comp.exists():
+ print(f"❌ Not found: {comp}")
+ sys.exit(1)
+ print_table([benchmark_pair(orig, comp)])
+ return
+
+ # Glob mode: repo_root/tests/caveman-compress/
+ tests_dir = Path(__file__).parent.parent.parent / "tests" / "caveman-compress"
+ if not tests_dir.exists():
+ print(f"❌ Tests dir not found: {tests_dir}")
+ sys.exit(1)
+
+ rows = []
+ for orig in sorted(tests_dir.glob("*.original.md")):
+ comp = orig.with_name(orig.stem.removesuffix(".original") + ".md")
+ if comp.exists():
+ rows.append(benchmark_pair(orig, comp))
+
+ if not rows:
+ print("No compressed file pairs found.")
+ return
+
+ print_table(rows)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.agents/skills/caveman-compress/scripts/cli.py b/.agents/skills/caveman-compress/scripts/cli.py
new file mode 100644
index 0000000..428fd86
--- /dev/null
+++ b/.agents/skills/caveman-compress/scripts/cli.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+"""
+Caveman Compress CLI
+
+Usage:
+ caveman
+"""
+
+import sys
+from pathlib import Path
+
+from .compress import compress_file
+from .detect import detect_file_type, should_compress
+
+
+def print_usage():
+ print("Usage: caveman ")
+
+
+def main():
+ if len(sys.argv) != 2:
+ print_usage()
+ sys.exit(1)
+
+ filepath = Path(sys.argv[1])
+
+ # Check file exists
+ if not filepath.exists():
+ print(f"❌ File not found: {filepath}")
+ sys.exit(1)
+
+ if not filepath.is_file():
+ print(f"❌ Not a file: {filepath}")
+ sys.exit(1)
+
+ filepath = filepath.resolve()
+
+ # Detect file type
+ file_type = detect_file_type(filepath)
+
+ print(f"Detected: {file_type}")
+
+ # Check if compressible
+ if not should_compress(filepath):
+ print("Skipping: file is not natural language (code/config)")
+ sys.exit(0)
+
+ print("Starting caveman compression...\n")
+
+ try:
+ success = compress_file(filepath)
+
+ if success:
+ print("\nCompression completed successfully")
+ backup_path = filepath.with_name(filepath.stem + ".original.md")
+ print(f"Compressed: {filepath}")
+ print(f"Original: {backup_path}")
+ sys.exit(0)
+ else:
+ print("\n❌ Compression failed after retries")
+ sys.exit(2)
+
+ except KeyboardInterrupt:
+ print("\nInterrupted by user")
+ sys.exit(130)
+
+ except Exception as e:
+ print(f"\n❌ Error: {e}")
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.agents/skills/caveman-compress/scripts/compress.py b/.agents/skills/caveman-compress/scripts/compress.py
new file mode 100644
index 0000000..1622a7a
--- /dev/null
+++ b/.agents/skills/caveman-compress/scripts/compress.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python3
+"""
+Caveman Memory Compression Orchestrator
+
+Usage:
+ python scripts/compress.py
+"""
+
+import os
+import re
+import subprocess
+from pathlib import Path
+from typing import List
+
+OUTER_FENCE_REGEX = re.compile(
+ r"\A\s*(`{3,}|~{3,})[^\n]*\n(.*)\n\1\s*\Z", re.DOTALL
+)
+
+
+def strip_llm_wrapper(text: str) -> str:
+ """Strip outer ```markdown ... ``` fence when it wraps the entire output."""
+ m = OUTER_FENCE_REGEX.match(text)
+ if m:
+ return m.group(2)
+ return text
+
+from .detect import should_compress
+from .validate import validate
+
+MAX_RETRIES = 2
+
+
+# ---------- Claude Calls ----------
+
+
+def call_claude(prompt: str) -> str:
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
+ if api_key:
+ try:
+ import anthropic
+
+ client = anthropic.Anthropic(api_key=api_key)
+ msg = client.messages.create(
+ model=os.environ.get("CAVEMAN_MODEL", "claude-sonnet-4-5"),
+ max_tokens=8192,
+ messages=[{"role": "user", "content": prompt}],
+ )
+ return strip_llm_wrapper(msg.content[0].text.strip())
+ except ImportError:
+ pass # anthropic not installed, fall back to CLI
+ # Fallback: use claude CLI (handles desktop auth)
+ try:
+ result = subprocess.run(
+ ["claude", "--print"],
+ input=prompt,
+ text=True,
+ capture_output=True,
+ check=True,
+ )
+ return strip_llm_wrapper(result.stdout.strip())
+ except subprocess.CalledProcessError as e:
+ raise RuntimeError(f"Claude call failed:\n{e.stderr}")
+
+
+def build_compress_prompt(original: str) -> str:
+ return f"""
+Compress this markdown into caveman format.
+
+STRICT RULES:
+- Do NOT modify anything inside ``` code blocks
+- Do NOT modify anything inside inline backticks
+- Preserve ALL URLs exactly
+- Preserve ALL headings exactly
+- Preserve file paths and commands
+- Return ONLY the compressed markdown body — do NOT wrap the entire output in a ```markdown fence or any other fence. Inner code blocks from the original stay as-is; do not add a new outer fence around the whole file.
+
+Only compress natural language.
+
+TEXT:
+{original}
+"""
+
+
+def build_fix_prompt(original: str, compressed: str, errors: List[str]) -> str:
+ errors_str = "\n".join(f"- {e}" for e in errors)
+ return f"""You are fixing a caveman-compressed markdown file. Specific validation errors were found.
+
+CRITICAL RULES:
+- DO NOT recompress or rephrase the file
+- ONLY fix the listed errors — leave everything else exactly as-is
+- The ORIGINAL is provided as reference only (to restore missing content)
+- Preserve caveman style in all untouched sections
+
+ERRORS TO FIX:
+{errors_str}
+
+HOW TO FIX:
+- Missing URL: find it in ORIGINAL, restore it exactly where it belongs in COMPRESSED
+- Code block mismatch: find the exact code block in ORIGINAL, restore it in COMPRESSED
+- Heading mismatch: restore the exact heading text from ORIGINAL into COMPRESSED
+- Do not touch any section not mentioned in the errors
+
+ORIGINAL (reference only):
+{original}
+
+COMPRESSED (fix this):
+{compressed}
+
+Return ONLY the fixed compressed file. No explanation.
+"""
+
+
+# ---------- Core Logic ----------
+
+
+def compress_file(filepath: Path) -> bool:
+ # Resolve and validate path
+ filepath = filepath.resolve()
+ MAX_FILE_SIZE = 500_000 # 500KB
+ if not filepath.exists():
+ raise FileNotFoundError(f"File not found: {filepath}")
+ if filepath.stat().st_size > MAX_FILE_SIZE:
+ raise ValueError(f"File too large to compress safely (max 500KB): {filepath}")
+
+ print(f"Processing: {filepath}")
+
+ if not should_compress(filepath):
+ print("Skipping (not natural language)")
+ return False
+
+ original_text = filepath.read_text(errors="ignore")
+ backup_path = filepath.with_name(filepath.stem + ".original.md")
+
+ # Check if backup already exists to prevent accidental overwriting
+ if backup_path.exists():
+ print(f"⚠️ Backup file already exists: {backup_path}")
+ print("The original backup may contain important content.")
+ print("Aborting to prevent data loss. Please remove or rename the backup file if you want to proceed.")
+ return False
+
+ # Step 1: Compress
+ print("Compressing with Claude...")
+ compressed = call_claude(build_compress_prompt(original_text))
+
+ # Save original as backup, write compressed to original path
+ backup_path.write_text(original_text)
+ filepath.write_text(compressed)
+
+ # Step 2: Validate + Retry
+ for attempt in range(MAX_RETRIES):
+ print(f"\nValidation attempt {attempt + 1}")
+
+ result = validate(backup_path, filepath)
+
+ if result.is_valid:
+ print("Validation passed")
+ break
+
+ print("❌ Validation failed:")
+ for err in result.errors:
+ print(f" - {err}")
+
+ if attempt == MAX_RETRIES - 1:
+ # Restore original on failure
+ filepath.write_text(original_text)
+ backup_path.unlink(missing_ok=True)
+ print("❌ Failed after retries — original restored")
+ return False
+
+ print("Fixing with Claude...")
+ compressed = call_claude(
+ build_fix_prompt(original_text, compressed, result.errors)
+ )
+ filepath.write_text(compressed)
+
+ return True
diff --git a/.agents/skills/caveman-compress/scripts/detect.py b/.agents/skills/caveman-compress/scripts/detect.py
new file mode 100644
index 0000000..5f50fd3
--- /dev/null
+++ b/.agents/skills/caveman-compress/scripts/detect.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+"""Detect whether a file is natural language (compressible) or code/config (skip)."""
+
+import json
+import re
+from pathlib import Path
+
+# Extensions that are natural language and compressible
+COMPRESSIBLE_EXTENSIONS = {".md", ".txt", ".markdown", ".rst"}
+
+# Extensions that are code/config and should be skipped
+SKIP_EXTENSIONS = {
+ ".py", ".js", ".ts", ".tsx", ".jsx", ".json", ".yaml", ".yml",
+ ".toml", ".env", ".lock", ".css", ".scss", ".html", ".xml",
+ ".sql", ".sh", ".bash", ".zsh", ".go", ".rs", ".java", ".c",
+ ".cpp", ".h", ".hpp", ".rb", ".php", ".swift", ".kt", ".lua",
+ ".dockerfile", ".makefile", ".csv", ".ini", ".cfg",
+}
+
+# Patterns that indicate a line is code
+CODE_PATTERNS = [
+ re.compile(r"^\s*(import |from .+ import |require\(|const |let |var )"),
+ re.compile(r"^\s*(def |class |function |async function |export )"),
+ re.compile(r"^\s*(if\s*\(|for\s*\(|while\s*\(|switch\s*\(|try\s*\{)"),
+ re.compile(r"^\s*[\}\]\);]+\s*$"), # closing braces/brackets
+ re.compile(r"^\s*@\w+"), # decorators/annotations
+ re.compile(r'^\s*"[^"]+"\s*:\s*'), # JSON-like key-value
+ re.compile(r"^\s*\w+\s*=\s*[{\[\(\"']"), # assignment with literal
+]
+
+
+def _is_code_line(line: str) -> bool:
+ """Check if a line looks like code."""
+ return any(p.match(line) for p in CODE_PATTERNS)
+
+
+def _is_json_content(text: str) -> bool:
+ """Check if content is valid JSON."""
+ try:
+ json.loads(text)
+ return True
+ except (json.JSONDecodeError, ValueError):
+ return False
+
+
+def _is_yaml_content(lines: list[str]) -> bool:
+ """Heuristic: check if content looks like YAML."""
+ yaml_indicators = 0
+ for line in lines[:30]:
+ stripped = line.strip()
+ if stripped.startswith("---"):
+ yaml_indicators += 1
+ elif re.match(r"^\w[\w\s]*:\s", stripped):
+ yaml_indicators += 1
+ elif stripped.startswith("- ") and ":" in stripped:
+ yaml_indicators += 1
+ # If most non-empty lines look like YAML
+ non_empty = sum(1 for l in lines[:30] if l.strip())
+ return non_empty > 0 and yaml_indicators / non_empty > 0.6
+
+
+def detect_file_type(filepath: Path) -> str:
+ """Classify a file as 'natural_language', 'code', 'config', or 'unknown'.
+
+ Returns:
+ One of: 'natural_language', 'code', 'config', 'unknown'
+ """
+ ext = filepath.suffix.lower()
+
+ # Extension-based classification
+ if ext in COMPRESSIBLE_EXTENSIONS:
+ return "natural_language"
+ if ext in SKIP_EXTENSIONS:
+ return "code" if ext not in {".json", ".yaml", ".yml", ".toml", ".ini", ".cfg", ".env"} else "config"
+
+ # Extensionless files (like CLAUDE.md, TODO) — check content
+ if not ext:
+ try:
+ text = filepath.read_text(errors="ignore")
+ except (OSError, PermissionError):
+ return "unknown"
+
+ lines = text.splitlines()[:50]
+
+ if _is_json_content(text[:10000]):
+ return "config"
+ if _is_yaml_content(lines):
+ return "config"
+
+ code_lines = sum(1 for l in lines if l.strip() and _is_code_line(l))
+ non_empty = sum(1 for l in lines if l.strip())
+ if non_empty > 0 and code_lines / non_empty > 0.4:
+ return "code"
+
+ return "natural_language"
+
+ return "unknown"
+
+
+def should_compress(filepath: Path) -> bool:
+ """Return True if the file is natural language and should be compressed."""
+ if not filepath.is_file():
+ return False
+ # Skip backup files
+ if filepath.name.endswith(".original.md"):
+ return False
+ return detect_file_type(filepath) == "natural_language"
+
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) < 2:
+ print("Usage: python detect.py [file2] ...")
+ sys.exit(1)
+
+ for path_str in sys.argv[1:]:
+ p = Path(path_str).resolve()
+ file_type = detect_file_type(p)
+ compress = should_compress(p)
+ print(f" {p.name:30s} type={file_type:20s} compress={compress}")
diff --git a/.agents/skills/caveman-compress/scripts/validate.py b/.agents/skills/caveman-compress/scripts/validate.py
new file mode 100644
index 0000000..3c4d4c1
--- /dev/null
+++ b/.agents/skills/caveman-compress/scripts/validate.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python3
+import re
+from pathlib import Path
+
+URL_REGEX = re.compile(r"https?://[^\s)]+")
+FENCE_OPEN_REGEX = re.compile(r"^(\s{0,3})(`{3,}|~{3,})(.*)$")
+HEADING_REGEX = re.compile(r"^(#{1,6})\s+(.*)", re.MULTILINE)
+BULLET_REGEX = re.compile(r"^\s*[-*+]\s+", re.MULTILINE)
+
+# crude but effective path detection
+# Requires either a path prefix (./ ../ / or drive letter) or a slash/backslash within the match
+PATH_REGEX = re.compile(r"(?:\./|\.\./|/|[A-Za-z]:\\)[\w\-/\\\.]+|[\w\-\.]+[/\\][\w\-/\\\.]+")
+
+
+class ValidationResult:
+ def __init__(self):
+ self.is_valid = True
+ self.errors = []
+ self.warnings = []
+
+ def add_error(self, msg):
+ self.is_valid = False
+ self.errors.append(msg)
+
+ def add_warning(self, msg):
+ self.warnings.append(msg)
+
+
+def read_file(path: Path) -> str:
+ return path.read_text(errors="ignore")
+
+
+# ---------- Extractors ----------
+
+
+def extract_headings(text):
+ return [(level, title.strip()) for level, title in HEADING_REGEX.findall(text)]
+
+
+def extract_code_blocks(text):
+ """Line-based fenced code block extractor.
+
+ Handles ``` and ~~~ fences with variable length (CommonMark: closing
+ fence must use same char and be at least as long as opening). Supports
+ nested fences (e.g. an outer 4-backtick block wrapping inner 3-backtick
+ content).
+ """
+ blocks = []
+ lines = text.split("\n")
+ i = 0
+ n = len(lines)
+ while i < n:
+ m = FENCE_OPEN_REGEX.match(lines[i])
+ if not m:
+ i += 1
+ continue
+ fence_char = m.group(2)[0]
+ fence_len = len(m.group(2))
+ open_line = lines[i]
+ block_lines = [open_line]
+ i += 1
+ closed = False
+ while i < n:
+ close_m = FENCE_OPEN_REGEX.match(lines[i])
+ if (
+ close_m
+ and close_m.group(2)[0] == fence_char
+ and len(close_m.group(2)) >= fence_len
+ and close_m.group(3).strip() == ""
+ ):
+ block_lines.append(lines[i])
+ closed = True
+ i += 1
+ break
+ block_lines.append(lines[i])
+ i += 1
+ if closed:
+ blocks.append("\n".join(block_lines))
+ # Unclosed fences are silently skipped — they indicate malformed markdown
+ # and including them would cause false-positive validation failures.
+ return blocks
+
+
+def extract_urls(text):
+ return set(URL_REGEX.findall(text))
+
+
+def extract_paths(text):
+ return set(PATH_REGEX.findall(text))
+
+
+def count_bullets(text):
+ return len(BULLET_REGEX.findall(text))
+
+
+# ---------- Validators ----------
+
+
+def validate_headings(orig, comp, result):
+ h1 = extract_headings(orig)
+ h2 = extract_headings(comp)
+
+ if len(h1) != len(h2):
+ result.add_error(f"Heading count mismatch: {len(h1)} vs {len(h2)}")
+
+ if h1 != h2:
+ result.add_warning("Heading text/order changed")
+
+
+def validate_code_blocks(orig, comp, result):
+ c1 = extract_code_blocks(orig)
+ c2 = extract_code_blocks(comp)
+
+ if c1 != c2:
+ result.add_error("Code blocks not preserved exactly")
+
+
+def validate_urls(orig, comp, result):
+ u1 = extract_urls(orig)
+ u2 = extract_urls(comp)
+
+ if u1 != u2:
+ result.add_error(f"URL mismatch: lost={u1 - u2}, added={u2 - u1}")
+
+
+def validate_paths(orig, comp, result):
+ p1 = extract_paths(orig)
+ p2 = extract_paths(comp)
+
+ if p1 != p2:
+ result.add_warning(f"Path mismatch: lost={p1 - p2}, added={p2 - p1}")
+
+
+def validate_bullets(orig, comp, result):
+ b1 = count_bullets(orig)
+ b2 = count_bullets(comp)
+
+ if b1 == 0:
+ return
+
+ diff = abs(b1 - b2) / b1
+
+ if diff > 0.15:
+ result.add_warning(f"Bullet count changed too much: {b1} -> {b2}")
+
+
+# ---------- Main ----------
+
+
+def validate(original_path: Path, compressed_path: Path) -> ValidationResult:
+ result = ValidationResult()
+
+ orig = read_file(original_path)
+ comp = read_file(compressed_path)
+
+ validate_headings(orig, comp, result)
+ validate_code_blocks(orig, comp, result)
+ validate_urls(orig, comp, result)
+ validate_paths(orig, comp, result)
+ validate_bullets(orig, comp, result)
+
+ return result
+
+
+# ---------- CLI ----------
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) != 3:
+ print("Usage: python validate.py ")
+ sys.exit(1)
+
+ orig = Path(sys.argv[1]).resolve()
+ comp = Path(sys.argv[2]).resolve()
+
+ res = validate(orig, comp)
+
+ print(f"\nValid: {res.is_valid}")
+
+ if res.errors:
+ print("\nErrors:")
+ for e in res.errors:
+ print(f" - {e}")
+
+ if res.warnings:
+ print("\nWarnings:")
+ for w in res.warnings:
+ print(f" - {w}")
diff --git a/.agents/skills/caveman-help/SKILL.md b/.agents/skills/caveman-help/SKILL.md
new file mode 100644
index 0000000..078e487
--- /dev/null
+++ b/.agents/skills/caveman-help/SKILL.md
@@ -0,0 +1,59 @@
+---
+name: caveman-help
+description: >
+ Quick-reference card for all caveman modes, skills, and commands.
+ One-shot display, not a persistent mode. Trigger: /caveman-help,
+ "caveman help", "what caveman commands", "how do I use caveman".
+---
+
+# Caveman Help
+
+Display this reference card when invoked. One-shot — do NOT change mode, write flag files, or persist anything. Output in caveman style.
+
+## Modes
+
+| Mode | Trigger | What change |
+|------|---------|-------------|
+| **Lite** | `/caveman lite` | Drop filler. Keep sentence structure. |
+| **Full** | `/caveman` | Drop articles, filler, pleasantries, hedging. Fragments OK. Default. |
+| **Ultra** | `/caveman ultra` | Extreme compression. Bare fragments. Tables over prose. |
+| **Wenyan-Lite** | `/caveman wenyan-lite` | Classical Chinese style, light compression. |
+| **Wenyan-Full** | `/caveman wenyan` | Full 文言文. Maximum classical terseness. |
+| **Wenyan-Ultra** | `/caveman wenyan-ultra` | Extreme. Ancient scholar on a budget. |
+
+Mode stick until changed or session end.
+
+## Skills
+
+| Skill | Trigger | What it do |
+|-------|---------|-----------|
+| **caveman-commit** | `/caveman-commit` | Terse commit messages. Conventional Commits. ≤50 char subject. |
+| **caveman-review** | `/caveman-review` | One-line PR comments: `L42: bug: user null. Add guard.` |
+| **caveman-compress** | `/caveman:compress ` | Compress .md files to caveman prose. Saves ~46% input tokens. |
+| **caveman-help** | `/caveman-help` | This card. |
+
+## Deactivate
+
+Say "stop caveman" or "normal mode". Resume anytime with `/caveman`.
+
+## Configure Default Mode
+
+Default mode = `full`. Change it:
+
+**Environment variable** (highest priority):
+```bash
+export CAVEMAN_DEFAULT_MODE=ultra
+```
+
+**Config file** (`~/.config/caveman/config.json`):
+```json
+{ "defaultMode": "lite" }
+```
+
+Set `"off"` to disable auto-activation on session start. User can still activate manually with `/caveman`.
+
+Resolution: env var > config file > `full`.
+
+## More
+
+Full docs: https://github.com/JuliusBrussee/caveman
diff --git a/.agents/skills/caveman-review/SKILL.md b/.agents/skills/caveman-review/SKILL.md
new file mode 100644
index 0000000..48f4adb
--- /dev/null
+++ b/.agents/skills/caveman-review/SKILL.md
@@ -0,0 +1,55 @@
+---
+name: caveman-review
+description: >
+ Ultra-compressed code review comments. Cuts noise from PR feedback while preserving
+ the actionable signal. Each comment is one line: location, problem, fix. Use when user
+ says "review this PR", "code review", "review the diff", "/review", or invokes
+ /caveman-review. Auto-triggers when reviewing pull requests.
+---
+
+Write code review comments terse and actionable. One line per finding. Location, problem, fix. No throat-clearing.
+
+## Rules
+
+**Format:** `L: . .` — or `:L: ...` when reviewing multi-file diffs.
+
+**Severity prefix (optional, when mixed):**
+- `🔴 bug:` — broken behavior, will cause incident
+- `🟡 risk:` — works but fragile (race, missing null check, swallowed error)
+- `🔵 nit:` — style, naming, micro-optim. Author can ignore
+- `❓ q:` — genuine question, not a suggestion
+
+**Drop:**
+- "I noticed that...", "It seems like...", "You might want to consider..."
+- "This is just a suggestion but..." — use `nit:` instead
+- "Great work!", "Looks good overall but..." — say it once at the top, not per comment
+- Restating what the line does — the reviewer can read the diff
+- Hedging ("perhaps", "maybe", "I think") — if unsure use `q:`
+
+**Keep:**
+- Exact line numbers
+- Exact symbol/function/variable names in backticks
+- Concrete fix, not "consider refactoring this"
+- The *why* if the fix isn't obvious from the problem statement
+
+## Examples
+
+❌ "I noticed that on line 42 you're not checking if the user object is null before accessing the email property. This could potentially cause a crash if the user is not found in the database. You might want to add a null check here."
+
+✅ `L42: 🔴 bug: user can be null after .find(). Add guard before .email.`
+
+❌ "It looks like this function is doing a lot of things and might benefit from being broken up into smaller functions for readability."
+
+✅ `L88-140: 🔵 nit: 50-line fn does 4 things. Extract validate/normalize/persist.`
+
+❌ "Have you considered what happens if the API returns a 429? I think we should probably handle that case."
+
+✅ `L23: 🟡 risk: no retry on 429. Wrap in withBackoff(3).`
+
+## Auto-Clarity
+
+Drop terse mode for: security findings (CVE-class bugs need full explanation + reference), architectural disagreements (need rationale, not just a one-liner), and onboarding contexts where the author is new and needs the "why". In those cases write a normal paragraph, then resume terse for the rest.
+
+## Boundaries
+
+Reviews only — does not write the code fix, does not approve/request-changes, does not run linters. Output the comment(s) ready to paste into the PR. "stop caveman-review" or "normal mode": revert to verbose review style.
\ No newline at end of file
diff --git a/.agents/skills/caveman/SKILL.md b/.agents/skills/caveman/SKILL.md
new file mode 100644
index 0000000..2ab498b
--- /dev/null
+++ b/.agents/skills/caveman/SKILL.md
@@ -0,0 +1,67 @@
+---
+name: caveman
+description: >
+ Ultra-compressed communication mode. Cuts token usage ~75% by speaking like caveman
+ while keeping full technical accuracy. Supports intensity levels: lite, full (default), ultra,
+ wenyan-lite, wenyan-full, wenyan-ultra.
+ Use when user says "caveman mode", "talk like caveman", "use caveman", "less tokens",
+ "be brief", or invokes /caveman. Also auto-triggers when token efficiency is requested.
+---
+
+Respond terse like smart caveman. All technical substance stay. Only fluff die.
+
+## Persistence
+
+ACTIVE EVERY RESPONSE. No revert after many turns. No filler drift. Still active if unsure. Off only: "stop caveman" / "normal mode".
+
+Default: **full**. Switch: `/caveman lite|full|ultra`.
+
+## Rules
+
+Drop: articles (a/an/the), filler (just/really/basically/actually/simply), pleasantries (sure/certainly/of course/happy to), hedging. Fragments OK. Short synonyms (big not extensive, fix not "implement a solution for"). Technical terms exact. Code blocks unchanged. Errors quoted exact.
+
+Pattern: `[thing] [action] [reason]. [next step].`
+
+Not: "Sure! I'd be happy to help you with that. The issue you're experiencing is likely caused by..."
+Yes: "Bug in auth middleware. Token expiry check use `<` not `<=`. Fix:"
+
+## Intensity
+
+| Level | What change |
+|-------|------------|
+| **lite** | No filler/hedging. Keep articles + full sentences. Professional but tight |
+| **full** | Drop articles, fragments OK, short synonyms. Classic caveman |
+| **ultra** | Abbreviate (DB/auth/config/req/res/fn/impl), strip conjunctions, arrows for causality (X → Y), one word when one word enough |
+| **wenyan-lite** | Semi-classical. Drop filler/hedging but keep grammar structure, classical register |
+| **wenyan-full** | Maximum classical terseness. Fully 文言文. 80-90% character reduction. Classical sentence patterns, verbs precede objects, subjects often omitted, classical particles (之/乃/為/其) |
+| **wenyan-ultra** | Extreme abbreviation while keeping classical Chinese feel. Maximum compression, ultra terse |
+
+Example — "Why React component re-render?"
+- lite: "Your component re-renders because you create a new object reference each render. Wrap it in `useMemo`."
+- full: "New object ref each render. Inline object prop = new ref = re-render. Wrap in `useMemo`."
+- ultra: "Inline obj prop → new ref → re-render. `useMemo`."
+- wenyan-lite: "組件頻重繪,以每繪新生對象參照故。以 useMemo 包之。"
+- wenyan-full: "物出新參照,致重繪。useMemo .Wrap之。"
+- wenyan-ultra: "新參照→重繪。useMemo Wrap。"
+
+Example — "Explain database connection pooling."
+- lite: "Connection pooling reuses open connections instead of creating new ones per request. Avoids repeated handshake overhead."
+- full: "Pool reuse open DB connections. No new connection per request. Skip handshake overhead."
+- ultra: "Pool = reuse DB conn. Skip handshake → fast under load."
+- wenyan-full: "池reuse open connection。不每req新開。skip handshake overhead。"
+- wenyan-ultra: "池reuse conn。skip handshake → fast。"
+
+## Auto-Clarity
+
+Drop caveman for: security warnings, irreversible action confirmations, multi-step sequences where fragment order risks misread, user asks to clarify or repeats question. Resume caveman after clear part done.
+
+Example — destructive op:
+> **Warning:** This will permanently delete all rows in the `users` table and cannot be undone.
+> ```sql
+> DROP TABLE users;
+> ```
+> Caveman resume. Verify backup exist first.
+
+## Boundaries
+
+Code/commits/PRs: write normal. "stop caveman" or "normal mode": revert. Level persist until changed or session end.
\ No newline at end of file
diff --git a/.claude/skills/caveman-commit/SKILL.md b/.claude/skills/caveman-commit/SKILL.md
new file mode 100644
index 0000000..729318c
--- /dev/null
+++ b/.claude/skills/caveman-commit/SKILL.md
@@ -0,0 +1,65 @@
+---
+name: caveman-commit
+description: >
+ Ultra-compressed commit message generator. Cuts noise from commit messages while preserving
+ intent and reasoning. Conventional Commits format. Subject ≤50 chars, body only when "why"
+ isn't obvious. Use when user says "write a commit", "commit message", "generate commit",
+ "/commit", or invokes /caveman-commit. Auto-triggers when staging changes.
+---
+
+Write commit messages terse and exact. Conventional Commits format. No fluff. Why over what.
+
+## Rules
+
+**Subject line:**
+- `(): ` — `` optional
+- Types: `feat`, `fix`, `refactor`, `perf`, `docs`, `test`, `chore`, `build`, `ci`, `style`, `revert`
+- Imperative mood: "add", "fix", "remove" — not "added", "adds", "adding"
+- ≤50 chars when possible, hard cap 72
+- No trailing period
+- Match project convention for capitalization after the colon
+
+**Body (only if needed):**
+- Skip entirely when subject is self-explanatory
+- Add body only for: non-obvious *why*, breaking changes, migration notes, linked issues
+- Wrap at 72 chars
+- Bullets `-` not `*`
+- Reference issues/PRs at end: `Closes #42`, `Refs #17`
+
+**What NEVER goes in:**
+- "This commit does X", "I", "we", "now", "currently" — the diff says what
+- "As requested by..." — use Co-authored-by trailer
+- "Generated with Claude Code" or any AI attribution
+- Emoji (unless project convention requires)
+- Restating the file name when scope already says it
+
+## Examples
+
+Diff: new endpoint for user profile with body explaining the why
+- ❌ "feat: add a new endpoint to get user profile information from the database"
+- ✅
+ ```
+ feat(api): add GET /users/:id/profile
+
+ Mobile client needs profile data without the full user payload
+ to reduce LTE bandwidth on cold-launch screens.
+
+ Closes #128
+ ```
+
+Diff: breaking API change
+- ✅
+ ```
+ feat(api)!: rename /v1/orders to /v1/checkout
+
+ BREAKING CHANGE: clients on /v1/orders must migrate to /v1/checkout
+ before 2026-06-01. Old route returns 410 after that date.
+ ```
+
+## Auto-Clarity
+
+Always include body for: breaking changes, security fixes, data migrations, anything reverting a prior commit. Never compress these into subject-only — future debuggers need the context.
+
+## Boundaries
+
+Only generates the commit message. Does not run `git commit`, does not stage files, does not amend. Output the message as a code block ready to paste. "stop caveman-commit" or "normal mode": revert to verbose commit style.
\ No newline at end of file
diff --git a/.claude/skills/caveman-compress/README.md b/.claude/skills/caveman-compress/README.md
new file mode 100644
index 0000000..7c0e8ba
--- /dev/null
+++ b/.claude/skills/caveman-compress/README.md
@@ -0,0 +1,163 @@
+
+
+
+
+caveman-compress
+
+
+ shrink memory file. save token every session.
+
+
+---
+
+A Claude Code skill that compresses your project memory files (`CLAUDE.md`, todos, preferences) into caveman format — so every session loads fewer tokens automatically.
+
+Claude read `CLAUDE.md` on every session start. If file big, cost big. Caveman make file small. Cost go down forever.
+
+## What It Do
+
+```
+/caveman:compress CLAUDE.md
+```
+
+```
+CLAUDE.md ← compressed (Claude reads this — fewer tokens every session)
+CLAUDE.original.md ← human-readable backup (you edit this)
+```
+
+Original never lost. You can read and edit `.original.md`. Run skill again to re-compress after edits.
+
+## Benchmarks
+
+Real results on real project files:
+
+| File | Original | Compressed | Saved |
+|------|----------:|----------:|------:|
+| `claude-md-preferences.md` | 706 | 285 | **59.6%** |
+| `project-notes.md` | 1145 | 535 | **53.3%** |
+| `claude-md-project.md` | 1122 | 636 | **43.3%** |
+| `todo-list.md` | 627 | 388 | **38.1%** |
+| `mixed-with-code.md` | 888 | 560 | **36.9%** |
+| **Average** | **898** | **481** | **46%** |
+
+All validations passed ✅ — headings, code blocks, URLs, file paths preserved exactly.
+
+## Before / After
+
+
+
+
+
+### 📄 Original (706 tokens)
+
+> "I strongly prefer TypeScript with strict mode enabled for all new code. Please don't use `any` type unless there's genuinely no way around it, and if you do, leave a comment explaining the reasoning. I find that taking the time to properly type things catches a lot of bugs before they ever make it to runtime."
+
+
+
+
+### 🪨 Caveman (285 tokens)
+
+> "Prefer TypeScript strict mode always. No `any` unless unavoidable — comment why if used. Proper types catch bugs early."
+
+
+
+
+
+**Same instructions. 60% fewer tokens. Every. Single. Session.**
+
+## Security
+
+`caveman-compress` is flagged as Snyk High Risk due to subprocess and file I/O patterns detected by static analysis. This is a false positive — see [SECURITY.md](./SECURITY.md) for a full explanation of what the skill does and does not do.
+
+## Install
+
+Compress is built in with the `caveman` plugin. Install `caveman` once, then use `/caveman:compress`.
+
+If you need local files, the compress skill lives at:
+
+```bash
+caveman-compress/
+```
+
+**Requires:** Python 3.10+
+
+## Usage
+
+```
+/caveman:compress
+```
+
+Examples:
+```
+/caveman:compress CLAUDE.md
+/caveman:compress docs/preferences.md
+/caveman:compress todos.md
+```
+
+### What files work
+
+| Type | Compress? |
+|------|-----------|
+| `.md`, `.txt`, `.rst` | ✅ Yes |
+| Extensionless natural language | ✅ Yes |
+| `.py`, `.js`, `.ts`, `.json`, `.yaml` | ❌ Skip (code/config) |
+| `*.original.md` | ❌ Skip (backup files) |
+
+## How It Work
+
+```
+/caveman:compress CLAUDE.md
+ ↓
+detect file type (no tokens)
+ ↓
+Claude compresses (tokens — one call)
+ ↓
+validate output (no tokens)
+ checks: headings, code blocks, URLs, file paths, bullets
+ ↓
+if errors: Claude fixes cherry-picked issues only (tokens — targeted fix)
+ does NOT recompress — only patches broken parts
+ ↓
+retry up to 2 times
+ ↓
+write compressed → CLAUDE.md
+write original → CLAUDE.original.md
+```
+
+Only two things use tokens: initial compression + targeted fix if validation fails. Everything else is local Python.
+
+## What Is Preserved
+
+Caveman compress natural language. It never touch:
+
+- Code blocks (` ``` ` fenced or indented)
+- Inline code (`` `backtick content` ``)
+- URLs and links
+- File paths (`/src/components/...`)
+- Commands (`npm install`, `git commit`)
+- Technical terms, library names, API names
+- Headings (exact text preserved)
+- Tables (structure preserved, cell text compressed)
+- Dates, version numbers, numeric values
+
+## Why This Matter
+
+`CLAUDE.md` loads on **every session start**. A 1000-token project memory file costs tokens every single time you open a project. Over 100 sessions that's 100,000 tokens of overhead — just for context you already wrote.
+
+Caveman cut that by ~46% on average. Same instructions. Same accuracy. Less waste.
+
+```
+┌────────────────────────────────────────────┐
+│ TOKEN SAVINGS PER FILE █████ 46% │
+│ SESSIONS THAT BENEFIT ██████████ 100% │
+│ INFORMATION PRESERVED ██████████ 100% │
+│ SETUP TIME █ 1x │
+└────────────────────────────────────────────┘
+```
+
+## Part of Caveman
+
+This skill is part of the [caveman](https://github.com/JuliusBrussee/caveman) toolkit — making Claude use fewer tokens without losing accuracy.
+
+- **caveman** — make Claude *speak* like caveman (cuts response tokens ~65%)
+- **caveman-compress** — make Claude *read* less (cuts context tokens ~46%)
diff --git a/.claude/skills/caveman-compress/SECURITY.md b/.claude/skills/caveman-compress/SECURITY.md
new file mode 100644
index 0000000..693108c
--- /dev/null
+++ b/.claude/skills/caveman-compress/SECURITY.md
@@ -0,0 +1,31 @@
+# Security
+
+## Snyk High Risk Rating
+
+`caveman-compress` receives a Snyk High Risk rating due to static analysis heuristics. This document explains what the skill does and does not do.
+
+### What triggers the rating
+
+1. **subprocess usage**: The skill calls the `claude` CLI via `subprocess.run()` as a fallback when `ANTHROPIC_API_KEY` is not set. The subprocess call uses a fixed argument list — no shell interpolation occurs. User file content is passed via stdin, not as a shell argument.
+
+2. **File read/write**: The skill reads the file the user explicitly points it at, compresses it, and writes the result back to the same path. A `.original.md` backup is saved alongside it. No files outside the user-specified path are read or written.
+
+### What the skill does NOT do
+
+- Does not execute user file content as code
+- Does not make network requests except to Anthropic's API (via SDK or CLI)
+- Does not access files outside the path the user provides
+- Does not use shell=True or string interpolation in subprocess calls
+- Does not collect or transmit any data beyond the file being compressed
+
+### Auth behavior
+
+If `ANTHROPIC_API_KEY` is set, the skill uses the Anthropic Python SDK directly (no subprocess). If not set, it falls back to the `claude` CLI, which uses the user's existing Claude desktop authentication.
+
+### File size limit
+
+Files larger than 500KB are rejected before any API call is made.
+
+### Reporting a vulnerability
+
+If you believe you've found a genuine security issue, please open a GitHub issue with the label `security`.
diff --git a/.claude/skills/caveman-compress/SKILL.md b/.claude/skills/caveman-compress/SKILL.md
new file mode 100644
index 0000000..7b3e3aa
--- /dev/null
+++ b/.claude/skills/caveman-compress/SKILL.md
@@ -0,0 +1,111 @@
+---
+name: caveman-compress
+description: >
+ Compress natural language memory files (CLAUDE.md, todos, preferences) into caveman format
+ to save input tokens. Preserves all technical substance, code, URLs, and structure.
+ Compressed version overwrites the original file. Human-readable backup saved as FILE.original.md.
+ Trigger: /caveman:compress or "compress memory file"
+---
+
+# Caveman Compress
+
+## Purpose
+
+Compress natural language files (CLAUDE.md, todos, preferences) into caveman-speak to reduce input tokens. Compressed version overwrites original. Human-readable backup saved as `.original.md`.
+
+## Trigger
+
+`/caveman:compress ` or when user asks to compress a memory file.
+
+## Process
+
+1. The compression scripts live in `caveman-compress/scripts/` (adjacent to this SKILL.md). If the path is not immediately available, search for `caveman-compress/scripts/__main__.py`.
+
+2. Run:
+
+cd caveman-compress && python3 -m scripts
+
+3. The CLI will:
+- detect file type (no tokens)
+- call Claude to compress
+- validate output (no tokens)
+- if errors: cherry-pick fix with Claude (targeted fixes only, no recompression)
+- retry up to 2 times
+- if still failing after 2 retries: report error to user, leave original file untouched
+
+4. Return result to user
+
+## Compression Rules
+
+### Remove
+- Articles: a, an, the
+- Filler: just, really, basically, actually, simply, essentially, generally
+- Pleasantries: "sure", "certainly", "of course", "happy to", "I'd recommend"
+- Hedging: "it might be worth", "you could consider", "it would be good to"
+- Redundant phrasing: "in order to" → "to", "make sure to" → "ensure", "the reason is because" → "because"
+- Connective fluff: "however", "furthermore", "additionally", "in addition"
+
+### Preserve EXACTLY (never modify)
+- Code blocks (fenced ``` and indented)
+- Inline code (`backtick content`)
+- URLs and links (full URLs, markdown links)
+- File paths (`/src/components/...`, `./config.yaml`)
+- Commands (`npm install`, `git commit`, `docker build`)
+- Technical terms (library names, API names, protocols, algorithms)
+- Proper nouns (project names, people, companies)
+- Dates, version numbers, numeric values
+- Environment variables (`$HOME`, `NODE_ENV`)
+
+### Preserve Structure
+- All markdown headings (keep exact heading text, compress body below)
+- Bullet point hierarchy (keep nesting level)
+- Numbered lists (keep numbering)
+- Tables (compress cell text, keep structure)
+- Frontmatter/YAML headers in markdown files
+
+### Compress
+- Use short synonyms: "big" not "extensive", "fix" not "implement a solution for", "use" not "utilize"
+- Fragments OK: "Run tests before commit" not "You should always run tests before committing"
+- Drop "you should", "make sure to", "remember to" — just state the action
+- Merge redundant bullets that say the same thing differently
+- Keep one example where multiple examples show the same pattern
+
+CRITICAL RULE:
+Anything inside ``` ... ``` must be copied EXACTLY.
+Do not:
+- remove comments
+- remove spacing
+- reorder lines
+- shorten commands
+- simplify anything
+
+Inline code (`...`) must be preserved EXACTLY.
+Do not modify anything inside backticks.
+
+If file contains code blocks:
+- Treat code blocks as read-only regions
+- Only compress text outside them
+- Do not merge sections around code
+
+## Pattern
+
+Original:
+> You should always make sure to run the test suite before pushing any changes to the main branch. This is important because it helps catch bugs early and prevents broken builds from being deployed to production.
+
+Compressed:
+> Run tests before push to main. Catch bugs early, prevent broken prod deploys.
+
+Original:
+> The application uses a microservices architecture with the following components. The API gateway handles all incoming requests and routes them to the appropriate service. The authentication service is responsible for managing user sessions and JWT tokens.
+
+Compressed:
+> Microservices architecture. API gateway route all requests to services. Auth service manage user sessions + JWT tokens.
+
+## Boundaries
+
+- ONLY compress natural language files (.md, .txt, extensionless)
+- NEVER modify: .py, .js, .ts, .json, .yaml, .yml, .toml, .env, .lock, .css, .html, .xml, .sql, .sh
+- If file has mixed content (prose + code), compress ONLY the prose sections
+- If unsure whether something is code or prose, leave it unchanged
+- Original file is backed up as FILE.original.md before overwriting
+- Never compress FILE.original.md (skip it)
diff --git a/.claude/skills/caveman-compress/scripts/__init__.py b/.claude/skills/caveman-compress/scripts/__init__.py
new file mode 100644
index 0000000..16b8c53
--- /dev/null
+++ b/.claude/skills/caveman-compress/scripts/__init__.py
@@ -0,0 +1,9 @@
+"""Caveman compress scripts.
+
+This package provides tools to compress natural language markdown files
+into caveman format to save input tokens.
+"""
+
+__all__ = ["cli", "compress", "detect", "validate"]
+
+__version__ = "1.0.0"
diff --git a/.claude/skills/caveman-compress/scripts/__main__.py b/.claude/skills/caveman-compress/scripts/__main__.py
new file mode 100644
index 0000000..4e28416
--- /dev/null
+++ b/.claude/skills/caveman-compress/scripts/__main__.py
@@ -0,0 +1,3 @@
+from .cli import main
+
+main()
diff --git a/.claude/skills/caveman-compress/scripts/benchmark.py b/.claude/skills/caveman-compress/scripts/benchmark.py
new file mode 100644
index 0000000..eac927d
--- /dev/null
+++ b/.claude/skills/caveman-compress/scripts/benchmark.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+from pathlib import Path
+import sys
+
+# Support both direct execution and module import
+try:
+ from .validate import validate
+except ImportError:
+ sys.path.insert(0, str(Path(__file__).parent))
+ from validate import validate
+
+try:
+ import tiktoken
+ _enc = tiktoken.get_encoding("o200k_base")
+except ImportError:
+ _enc = None
+
+
+def count_tokens(text):
+ if _enc is None:
+ return len(text.split()) # fallback: word count
+ return len(_enc.encode(text))
+
+
+def benchmark_pair(orig_path: Path, comp_path: Path):
+ orig_text = orig_path.read_text()
+ comp_text = comp_path.read_text()
+
+ orig_tokens = count_tokens(orig_text)
+ comp_tokens = count_tokens(comp_text)
+ saved = 100 * (orig_tokens - comp_tokens) / orig_tokens if orig_tokens > 0 else 0.0
+ result = validate(orig_path, comp_path)
+
+ return (comp_path.name, orig_tokens, comp_tokens, saved, result.is_valid)
+
+
+def print_table(rows):
+ print("\n| File | Original | Compressed | Saved % | Valid |")
+ print("|------|----------|------------|---------|-------|")
+ for r in rows:
+ print(f"| {r[0]} | {r[1]} | {r[2]} | {r[3]:.1f}% | {'✅' if r[4] else '❌'} |")
+
+
+def main():
+ # Direct file pair: python3 benchmark.py original.md compressed.md
+ if len(sys.argv) == 3:
+ orig = Path(sys.argv[1]).resolve()
+ comp = Path(sys.argv[2]).resolve()
+ if not orig.exists():
+ print(f"❌ Not found: {orig}")
+ sys.exit(1)
+ if not comp.exists():
+ print(f"❌ Not found: {comp}")
+ sys.exit(1)
+ print_table([benchmark_pair(orig, comp)])
+ return
+
+ # Glob mode: repo_root/tests/caveman-compress/
+ tests_dir = Path(__file__).parent.parent.parent / "tests" / "caveman-compress"
+ if not tests_dir.exists():
+ print(f"❌ Tests dir not found: {tests_dir}")
+ sys.exit(1)
+
+ rows = []
+ for orig in sorted(tests_dir.glob("*.original.md")):
+ comp = orig.with_name(orig.stem.removesuffix(".original") + ".md")
+ if comp.exists():
+ rows.append(benchmark_pair(orig, comp))
+
+ if not rows:
+ print("No compressed file pairs found.")
+ return
+
+ print_table(rows)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/caveman-compress/scripts/cli.py b/.claude/skills/caveman-compress/scripts/cli.py
new file mode 100644
index 0000000..428fd86
--- /dev/null
+++ b/.claude/skills/caveman-compress/scripts/cli.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+"""
+Caveman Compress CLI
+
+Usage:
+ caveman
+"""
+
+import sys
+from pathlib import Path
+
+from .compress import compress_file
+from .detect import detect_file_type, should_compress
+
+
+def print_usage():
+ print("Usage: caveman ")
+
+
+def main():
+ if len(sys.argv) != 2:
+ print_usage()
+ sys.exit(1)
+
+ filepath = Path(sys.argv[1])
+
+ # Check file exists
+ if not filepath.exists():
+ print(f"❌ File not found: {filepath}")
+ sys.exit(1)
+
+ if not filepath.is_file():
+ print(f"❌ Not a file: {filepath}")
+ sys.exit(1)
+
+ filepath = filepath.resolve()
+
+ # Detect file type
+ file_type = detect_file_type(filepath)
+
+ print(f"Detected: {file_type}")
+
+ # Check if compressible
+ if not should_compress(filepath):
+ print("Skipping: file is not natural language (code/config)")
+ sys.exit(0)
+
+ print("Starting caveman compression...\n")
+
+ try:
+ success = compress_file(filepath)
+
+ if success:
+ print("\nCompression completed successfully")
+ backup_path = filepath.with_name(filepath.stem + ".original.md")
+ print(f"Compressed: {filepath}")
+ print(f"Original: {backup_path}")
+ sys.exit(0)
+ else:
+ print("\n❌ Compression failed after retries")
+ sys.exit(2)
+
+ except KeyboardInterrupt:
+ print("\nInterrupted by user")
+ sys.exit(130)
+
+ except Exception as e:
+ print(f"\n❌ Error: {e}")
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/caveman-compress/scripts/compress.py b/.claude/skills/caveman-compress/scripts/compress.py
new file mode 100644
index 0000000..1622a7a
--- /dev/null
+++ b/.claude/skills/caveman-compress/scripts/compress.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python3
+"""
+Caveman Memory Compression Orchestrator
+
+Usage:
+ python scripts/compress.py
+"""
+
+import os
+import re
+import subprocess
+from pathlib import Path
+from typing import List
+
+OUTER_FENCE_REGEX = re.compile(
+ r"\A\s*(`{3,}|~{3,})[^\n]*\n(.*)\n\1\s*\Z", re.DOTALL
+)
+
+
+def strip_llm_wrapper(text: str) -> str:
+ """Strip outer ```markdown ... ``` fence when it wraps the entire output."""
+ m = OUTER_FENCE_REGEX.match(text)
+ if m:
+ return m.group(2)
+ return text
+
+from .detect import should_compress
+from .validate import validate
+
+MAX_RETRIES = 2
+
+
+# ---------- Claude Calls ----------
+
+
+def call_claude(prompt: str) -> str:
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
+ if api_key:
+ try:
+ import anthropic
+
+ client = anthropic.Anthropic(api_key=api_key)
+ msg = client.messages.create(
+ model=os.environ.get("CAVEMAN_MODEL", "claude-sonnet-4-5"),
+ max_tokens=8192,
+ messages=[{"role": "user", "content": prompt}],
+ )
+ return strip_llm_wrapper(msg.content[0].text.strip())
+ except ImportError:
+ pass # anthropic not installed, fall back to CLI
+ # Fallback: use claude CLI (handles desktop auth)
+ try:
+ result = subprocess.run(
+ ["claude", "--print"],
+ input=prompt,
+ text=True,
+ capture_output=True,
+ check=True,
+ )
+ return strip_llm_wrapper(result.stdout.strip())
+ except subprocess.CalledProcessError as e:
+ raise RuntimeError(f"Claude call failed:\n{e.stderr}")
+
+
+def build_compress_prompt(original: str) -> str:
+ return f"""
+Compress this markdown into caveman format.
+
+STRICT RULES:
+- Do NOT modify anything inside ``` code blocks
+- Do NOT modify anything inside inline backticks
+- Preserve ALL URLs exactly
+- Preserve ALL headings exactly
+- Preserve file paths and commands
+- Return ONLY the compressed markdown body — do NOT wrap the entire output in a ```markdown fence or any other fence. Inner code blocks from the original stay as-is; do not add a new outer fence around the whole file.
+
+Only compress natural language.
+
+TEXT:
+{original}
+"""
+
+
+def build_fix_prompt(original: str, compressed: str, errors: List[str]) -> str:
+ errors_str = "\n".join(f"- {e}" for e in errors)
+ return f"""You are fixing a caveman-compressed markdown file. Specific validation errors were found.
+
+CRITICAL RULES:
+- DO NOT recompress or rephrase the file
+- ONLY fix the listed errors — leave everything else exactly as-is
+- The ORIGINAL is provided as reference only (to restore missing content)
+- Preserve caveman style in all untouched sections
+
+ERRORS TO FIX:
+{errors_str}
+
+HOW TO FIX:
+- Missing URL: find it in ORIGINAL, restore it exactly where it belongs in COMPRESSED
+- Code block mismatch: find the exact code block in ORIGINAL, restore it in COMPRESSED
+- Heading mismatch: restore the exact heading text from ORIGINAL into COMPRESSED
+- Do not touch any section not mentioned in the errors
+
+ORIGINAL (reference only):
+{original}
+
+COMPRESSED (fix this):
+{compressed}
+
+Return ONLY the fixed compressed file. No explanation.
+"""
+
+
+# ---------- Core Logic ----------
+
+
+def compress_file(filepath: Path) -> bool:
+ # Resolve and validate path
+ filepath = filepath.resolve()
+ MAX_FILE_SIZE = 500_000 # 500KB
+ if not filepath.exists():
+ raise FileNotFoundError(f"File not found: {filepath}")
+ if filepath.stat().st_size > MAX_FILE_SIZE:
+ raise ValueError(f"File too large to compress safely (max 500KB): {filepath}")
+
+ print(f"Processing: {filepath}")
+
+ if not should_compress(filepath):
+ print("Skipping (not natural language)")
+ return False
+
+ original_text = filepath.read_text(errors="ignore")
+ backup_path = filepath.with_name(filepath.stem + ".original.md")
+
+ # Check if backup already exists to prevent accidental overwriting
+ if backup_path.exists():
+ print(f"⚠️ Backup file already exists: {backup_path}")
+ print("The original backup may contain important content.")
+ print("Aborting to prevent data loss. Please remove or rename the backup file if you want to proceed.")
+ return False
+
+ # Step 1: Compress
+ print("Compressing with Claude...")
+ compressed = call_claude(build_compress_prompt(original_text))
+
+ # Save original as backup, write compressed to original path
+ backup_path.write_text(original_text)
+ filepath.write_text(compressed)
+
+ # Step 2: Validate + Retry
+ for attempt in range(MAX_RETRIES):
+ print(f"\nValidation attempt {attempt + 1}")
+
+ result = validate(backup_path, filepath)
+
+ if result.is_valid:
+ print("Validation passed")
+ break
+
+ print("❌ Validation failed:")
+ for err in result.errors:
+ print(f" - {err}")
+
+ if attempt == MAX_RETRIES - 1:
+ # Restore original on failure
+ filepath.write_text(original_text)
+ backup_path.unlink(missing_ok=True)
+ print("❌ Failed after retries — original restored")
+ return False
+
+ print("Fixing with Claude...")
+ compressed = call_claude(
+ build_fix_prompt(original_text, compressed, result.errors)
+ )
+ filepath.write_text(compressed)
+
+ return True
diff --git a/.claude/skills/caveman-compress/scripts/detect.py b/.claude/skills/caveman-compress/scripts/detect.py
new file mode 100644
index 0000000..5f50fd3
--- /dev/null
+++ b/.claude/skills/caveman-compress/scripts/detect.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+"""Detect whether a file is natural language (compressible) or code/config (skip)."""
+
+import json
+import re
+from pathlib import Path
+
+# Extensions that are natural language and compressible
+COMPRESSIBLE_EXTENSIONS = {".md", ".txt", ".markdown", ".rst"}
+
+# Extensions that are code/config and should be skipped
+SKIP_EXTENSIONS = {
+ ".py", ".js", ".ts", ".tsx", ".jsx", ".json", ".yaml", ".yml",
+ ".toml", ".env", ".lock", ".css", ".scss", ".html", ".xml",
+ ".sql", ".sh", ".bash", ".zsh", ".go", ".rs", ".java", ".c",
+ ".cpp", ".h", ".hpp", ".rb", ".php", ".swift", ".kt", ".lua",
+ ".dockerfile", ".makefile", ".csv", ".ini", ".cfg",
+}
+
+# Patterns that indicate a line is code
+CODE_PATTERNS = [
+ re.compile(r"^\s*(import |from .+ import |require\(|const |let |var )"),
+ re.compile(r"^\s*(def |class |function |async function |export )"),
+ re.compile(r"^\s*(if\s*\(|for\s*\(|while\s*\(|switch\s*\(|try\s*\{)"),
+ re.compile(r"^\s*[\}\]\);]+\s*$"), # closing braces/brackets
+ re.compile(r"^\s*@\w+"), # decorators/annotations
+ re.compile(r'^\s*"[^"]+"\s*:\s*'), # JSON-like key-value
+ re.compile(r"^\s*\w+\s*=\s*[{\[\(\"']"), # assignment with literal
+]
+
+
+def _is_code_line(line: str) -> bool:
+ """Check if a line looks like code."""
+ return any(p.match(line) for p in CODE_PATTERNS)
+
+
+def _is_json_content(text: str) -> bool:
+ """Check if content is valid JSON."""
+ try:
+ json.loads(text)
+ return True
+ except (json.JSONDecodeError, ValueError):
+ return False
+
+
+def _is_yaml_content(lines: list[str]) -> bool:
+ """Heuristic: check if content looks like YAML."""
+ yaml_indicators = 0
+ for line in lines[:30]:
+ stripped = line.strip()
+ if stripped.startswith("---"):
+ yaml_indicators += 1
+ elif re.match(r"^\w[\w\s]*:\s", stripped):
+ yaml_indicators += 1
+ elif stripped.startswith("- ") and ":" in stripped:
+ yaml_indicators += 1
+ # If most non-empty lines look like YAML
+ non_empty = sum(1 for l in lines[:30] if l.strip())
+ return non_empty > 0 and yaml_indicators / non_empty > 0.6
+
+
+def detect_file_type(filepath: Path) -> str:
+ """Classify a file as 'natural_language', 'code', 'config', or 'unknown'.
+
+ Returns:
+ One of: 'natural_language', 'code', 'config', 'unknown'
+ """
+ ext = filepath.suffix.lower()
+
+ # Extension-based classification
+ if ext in COMPRESSIBLE_EXTENSIONS:
+ return "natural_language"
+ if ext in SKIP_EXTENSIONS:
+ return "code" if ext not in {".json", ".yaml", ".yml", ".toml", ".ini", ".cfg", ".env"} else "config"
+
+ # Extensionless files (like CLAUDE.md, TODO) — check content
+ if not ext:
+ try:
+ text = filepath.read_text(errors="ignore")
+ except (OSError, PermissionError):
+ return "unknown"
+
+ lines = text.splitlines()[:50]
+
+ if _is_json_content(text[:10000]):
+ return "config"
+ if _is_yaml_content(lines):
+ return "config"
+
+ code_lines = sum(1 for l in lines if l.strip() and _is_code_line(l))
+ non_empty = sum(1 for l in lines if l.strip())
+ if non_empty > 0 and code_lines / non_empty > 0.4:
+ return "code"
+
+ return "natural_language"
+
+ return "unknown"
+
+
+def should_compress(filepath: Path) -> bool:
+ """Return True if the file is natural language and should be compressed."""
+ if not filepath.is_file():
+ return False
+ # Skip backup files
+ if filepath.name.endswith(".original.md"):
+ return False
+ return detect_file_type(filepath) == "natural_language"
+
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) < 2:
+ print("Usage: python detect.py [file2] ...")
+ sys.exit(1)
+
+ for path_str in sys.argv[1:]:
+ p = Path(path_str).resolve()
+ file_type = detect_file_type(p)
+ compress = should_compress(p)
+ print(f" {p.name:30s} type={file_type:20s} compress={compress}")
diff --git a/.claude/skills/caveman-compress/scripts/validate.py b/.claude/skills/caveman-compress/scripts/validate.py
new file mode 100644
index 0000000..3c4d4c1
--- /dev/null
+++ b/.claude/skills/caveman-compress/scripts/validate.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python3
+import re
+from pathlib import Path
+
+URL_REGEX = re.compile(r"https?://[^\s)]+")
+FENCE_OPEN_REGEX = re.compile(r"^(\s{0,3})(`{3,}|~{3,})(.*)$")
+HEADING_REGEX = re.compile(r"^(#{1,6})\s+(.*)", re.MULTILINE)
+BULLET_REGEX = re.compile(r"^\s*[-*+]\s+", re.MULTILINE)
+
+# crude but effective path detection
+# Requires either a path prefix (./ ../ / or drive letter) or a slash/backslash within the match
+PATH_REGEX = re.compile(r"(?:\./|\.\./|/|[A-Za-z]:\\)[\w\-/\\\.]+|[\w\-\.]+[/\\][\w\-/\\\.]+")
+
+
+class ValidationResult:
+ def __init__(self):
+ self.is_valid = True
+ self.errors = []
+ self.warnings = []
+
+ def add_error(self, msg):
+ self.is_valid = False
+ self.errors.append(msg)
+
+ def add_warning(self, msg):
+ self.warnings.append(msg)
+
+
+def read_file(path: Path) -> str:
+ return path.read_text(errors="ignore")
+
+
+# ---------- Extractors ----------
+
+
+def extract_headings(text):
+ return [(level, title.strip()) for level, title in HEADING_REGEX.findall(text)]
+
+
+def extract_code_blocks(text):
+ """Line-based fenced code block extractor.
+
+ Handles ``` and ~~~ fences with variable length (CommonMark: closing
+ fence must use same char and be at least as long as opening). Supports
+ nested fences (e.g. an outer 4-backtick block wrapping inner 3-backtick
+ content).
+ """
+ blocks = []
+ lines = text.split("\n")
+ i = 0
+ n = len(lines)
+ while i < n:
+ m = FENCE_OPEN_REGEX.match(lines[i])
+ if not m:
+ i += 1
+ continue
+ fence_char = m.group(2)[0]
+ fence_len = len(m.group(2))
+ open_line = lines[i]
+ block_lines = [open_line]
+ i += 1
+ closed = False
+ while i < n:
+ close_m = FENCE_OPEN_REGEX.match(lines[i])
+ if (
+ close_m
+ and close_m.group(2)[0] == fence_char
+ and len(close_m.group(2)) >= fence_len
+ and close_m.group(3).strip() == ""
+ ):
+ block_lines.append(lines[i])
+ closed = True
+ i += 1
+ break
+ block_lines.append(lines[i])
+ i += 1
+ if closed:
+ blocks.append("\n".join(block_lines))
+ # Unclosed fences are silently skipped — they indicate malformed markdown
+ # and including them would cause false-positive validation failures.
+ return blocks
+
+
+def extract_urls(text):
+ return set(URL_REGEX.findall(text))
+
+
+def extract_paths(text):
+ return set(PATH_REGEX.findall(text))
+
+
+def count_bullets(text):
+ return len(BULLET_REGEX.findall(text))
+
+
+# ---------- Validators ----------
+
+
+def validate_headings(orig, comp, result):
+ h1 = extract_headings(orig)
+ h2 = extract_headings(comp)
+
+ if len(h1) != len(h2):
+ result.add_error(f"Heading count mismatch: {len(h1)} vs {len(h2)}")
+
+ if h1 != h2:
+ result.add_warning("Heading text/order changed")
+
+
+def validate_code_blocks(orig, comp, result):
+ c1 = extract_code_blocks(orig)
+ c2 = extract_code_blocks(comp)
+
+ if c1 != c2:
+ result.add_error("Code blocks not preserved exactly")
+
+
+def validate_urls(orig, comp, result):
+ u1 = extract_urls(orig)
+ u2 = extract_urls(comp)
+
+ if u1 != u2:
+ result.add_error(f"URL mismatch: lost={u1 - u2}, added={u2 - u1}")
+
+
+def validate_paths(orig, comp, result):
+ p1 = extract_paths(orig)
+ p2 = extract_paths(comp)
+
+ if p1 != p2:
+ result.add_warning(f"Path mismatch: lost={p1 - p2}, added={p2 - p1}")
+
+
+def validate_bullets(orig, comp, result):
+ b1 = count_bullets(orig)
+ b2 = count_bullets(comp)
+
+ if b1 == 0:
+ return
+
+ diff = abs(b1 - b2) / b1
+
+ if diff > 0.15:
+ result.add_warning(f"Bullet count changed too much: {b1} -> {b2}")
+
+
+# ---------- Main ----------
+
+
+def validate(original_path: Path, compressed_path: Path) -> ValidationResult:
+ result = ValidationResult()
+
+ orig = read_file(original_path)
+ comp = read_file(compressed_path)
+
+ validate_headings(orig, comp, result)
+ validate_code_blocks(orig, comp, result)
+ validate_urls(orig, comp, result)
+ validate_paths(orig, comp, result)
+ validate_bullets(orig, comp, result)
+
+ return result
+
+
+# ---------- CLI ----------
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) != 3:
+ print("Usage: python validate.py ")
+ sys.exit(1)
+
+ orig = Path(sys.argv[1]).resolve()
+ comp = Path(sys.argv[2]).resolve()
+
+ res = validate(orig, comp)
+
+ print(f"\nValid: {res.is_valid}")
+
+ if res.errors:
+ print("\nErrors:")
+ for e in res.errors:
+ print(f" - {e}")
+
+ if res.warnings:
+ print("\nWarnings:")
+ for w in res.warnings:
+ print(f" - {w}")
diff --git a/.claude/skills/caveman-help/SKILL.md b/.claude/skills/caveman-help/SKILL.md
new file mode 100644
index 0000000..078e487
--- /dev/null
+++ b/.claude/skills/caveman-help/SKILL.md
@@ -0,0 +1,59 @@
+---
+name: caveman-help
+description: >
+ Quick-reference card for all caveman modes, skills, and commands.
+ One-shot display, not a persistent mode. Trigger: /caveman-help,
+ "caveman help", "what caveman commands", "how do I use caveman".
+---
+
+# Caveman Help
+
+Display this reference card when invoked. One-shot — do NOT change mode, write flag files, or persist anything. Output in caveman style.
+
+## Modes
+
+| Mode | Trigger | What change |
+|------|---------|-------------|
+| **Lite** | `/caveman lite` | Drop filler. Keep sentence structure. |
+| **Full** | `/caveman` | Drop articles, filler, pleasantries, hedging. Fragments OK. Default. |
+| **Ultra** | `/caveman ultra` | Extreme compression. Bare fragments. Tables over prose. |
+| **Wenyan-Lite** | `/caveman wenyan-lite` | Classical Chinese style, light compression. |
+| **Wenyan-Full** | `/caveman wenyan` | Full 文言文. Maximum classical terseness. |
+| **Wenyan-Ultra** | `/caveman wenyan-ultra` | Extreme. Ancient scholar on a budget. |
+
+Mode stick until changed or session end.
+
+## Skills
+
+| Skill | Trigger | What it do |
+|-------|---------|-----------|
+| **caveman-commit** | `/caveman-commit` | Terse commit messages. Conventional Commits. ≤50 char subject. |
+| **caveman-review** | `/caveman-review` | One-line PR comments: `L42: bug: user null. Add guard.` |
+| **caveman-compress** | `/caveman:compress ` | Compress .md files to caveman prose. Saves ~46% input tokens. |
+| **caveman-help** | `/caveman-help` | This card. |
+
+## Deactivate
+
+Say "stop caveman" or "normal mode". Resume anytime with `/caveman`.
+
+## Configure Default Mode
+
+Default mode = `full`. Change it:
+
+**Environment variable** (highest priority):
+```bash
+export CAVEMAN_DEFAULT_MODE=ultra
+```
+
+**Config file** (`~/.config/caveman/config.json`):
+```json
+{ "defaultMode": "lite" }
+```
+
+Set `"off"` to disable auto-activation on session start. User can still activate manually with `/caveman`.
+
+Resolution: env var > config file > `full`.
+
+## More
+
+Full docs: https://github.com/JuliusBrussee/caveman
diff --git a/.claude/skills/caveman-review/SKILL.md b/.claude/skills/caveman-review/SKILL.md
new file mode 100644
index 0000000..48f4adb
--- /dev/null
+++ b/.claude/skills/caveman-review/SKILL.md
@@ -0,0 +1,55 @@
+---
+name: caveman-review
+description: >
+ Ultra-compressed code review comments. Cuts noise from PR feedback while preserving
+ the actionable signal. Each comment is one line: location, problem, fix. Use when user
+ says "review this PR", "code review", "review the diff", "/review", or invokes
+ /caveman-review. Auto-triggers when reviewing pull requests.
+---
+
+Write code review comments terse and actionable. One line per finding. Location, problem, fix. No throat-clearing.
+
+## Rules
+
+**Format:** `L: . .` — or `:L: ...` when reviewing multi-file diffs.
+
+**Severity prefix (optional, when mixed):**
+- `🔴 bug:` — broken behavior, will cause incident
+- `🟡 risk:` — works but fragile (race, missing null check, swallowed error)
+- `🔵 nit:` — style, naming, micro-optim. Author can ignore
+- `❓ q:` — genuine question, not a suggestion
+
+**Drop:**
+- "I noticed that...", "It seems like...", "You might want to consider..."
+- "This is just a suggestion but..." — use `nit:` instead
+- "Great work!", "Looks good overall but..." — say it once at the top, not per comment
+- Restating what the line does — the reviewer can read the diff
+- Hedging ("perhaps", "maybe", "I think") — if unsure use `q:`
+
+**Keep:**
+- Exact line numbers
+- Exact symbol/function/variable names in backticks
+- Concrete fix, not "consider refactoring this"
+- The *why* if the fix isn't obvious from the problem statement
+
+## Examples
+
+❌ "I noticed that on line 42 you're not checking if the user object is null before accessing the email property. This could potentially cause a crash if the user is not found in the database. You might want to add a null check here."
+
+✅ `L42: 🔴 bug: user can be null after .find(). Add guard before .email.`
+
+❌ "It looks like this function is doing a lot of things and might benefit from being broken up into smaller functions for readability."
+
+✅ `L88-140: 🔵 nit: 50-line fn does 4 things. Extract validate/normalize/persist.`
+
+❌ "Have you considered what happens if the API returns a 429? I think we should probably handle that case."
+
+✅ `L23: 🟡 risk: no retry on 429. Wrap in withBackoff(3).`
+
+## Auto-Clarity
+
+Drop terse mode for: security findings (CVE-class bugs need full explanation + reference), architectural disagreements (need rationale, not just a one-liner), and onboarding contexts where the author is new and needs the "why". In those cases write a normal paragraph, then resume terse for the rest.
+
+## Boundaries
+
+Reviews only — does not write the code fix, does not approve/request-changes, does not run linters. Output the comment(s) ready to paste into the PR. "stop caveman-review" or "normal mode": revert to verbose review style.
\ No newline at end of file
diff --git a/.claude/skills/caveman/SKILL.md b/.claude/skills/caveman/SKILL.md
new file mode 100644
index 0000000..2ab498b
--- /dev/null
+++ b/.claude/skills/caveman/SKILL.md
@@ -0,0 +1,67 @@
+---
+name: caveman
+description: >
+ Ultra-compressed communication mode. Cuts token usage ~75% by speaking like caveman
+ while keeping full technical accuracy. Supports intensity levels: lite, full (default), ultra,
+ wenyan-lite, wenyan-full, wenyan-ultra.
+ Use when user says "caveman mode", "talk like caveman", "use caveman", "less tokens",
+ "be brief", or invokes /caveman. Also auto-triggers when token efficiency is requested.
+---
+
+Respond terse like smart caveman. All technical substance stay. Only fluff die.
+
+## Persistence
+
+ACTIVE EVERY RESPONSE. No revert after many turns. No filler drift. Still active if unsure. Off only: "stop caveman" / "normal mode".
+
+Default: **full**. Switch: `/caveman lite|full|ultra`.
+
+## Rules
+
+Drop: articles (a/an/the), filler (just/really/basically/actually/simply), pleasantries (sure/certainly/of course/happy to), hedging. Fragments OK. Short synonyms (big not extensive, fix not "implement a solution for"). Technical terms exact. Code blocks unchanged. Errors quoted exact.
+
+Pattern: `[thing] [action] [reason]. [next step].`
+
+Not: "Sure! I'd be happy to help you with that. The issue you're experiencing is likely caused by..."
+Yes: "Bug in auth middleware. Token expiry check use `<` not `<=`. Fix:"
+
+## Intensity
+
+| Level | What change |
+|-------|------------|
+| **lite** | No filler/hedging. Keep articles + full sentences. Professional but tight |
+| **full** | Drop articles, fragments OK, short synonyms. Classic caveman |
+| **ultra** | Abbreviate (DB/auth/config/req/res/fn/impl), strip conjunctions, arrows for causality (X → Y), one word when one word enough |
+| **wenyan-lite** | Semi-classical. Drop filler/hedging but keep grammar structure, classical register |
+| **wenyan-full** | Maximum classical terseness. Fully 文言文. 80-90% character reduction. Classical sentence patterns, verbs precede objects, subjects often omitted, classical particles (之/乃/為/其) |
+| **wenyan-ultra** | Extreme abbreviation while keeping classical Chinese feel. Maximum compression, ultra terse |
+
+Example — "Why React component re-render?"
+- lite: "Your component re-renders because you create a new object reference each render. Wrap it in `useMemo`."
+- full: "New object ref each render. Inline object prop = new ref = re-render. Wrap in `useMemo`."
+- ultra: "Inline obj prop → new ref → re-render. `useMemo`."
+- wenyan-lite: "組件頻重繪,以每繪新生對象參照故。以 useMemo 包之。"
+- wenyan-full: "物出新參照,致重繪。useMemo .Wrap之。"
+- wenyan-ultra: "新參照→重繪。useMemo Wrap。"
+
+Example — "Explain database connection pooling."
+- lite: "Connection pooling reuses open connections instead of creating new ones per request. Avoids repeated handshake overhead."
+- full: "Pool reuse open DB connections. No new connection per request. Skip handshake overhead."
+- ultra: "Pool = reuse DB conn. Skip handshake → fast under load."
+- wenyan-full: "池reuse open connection。不每req新開。skip handshake overhead。"
+- wenyan-ultra: "池reuse conn。skip handshake → fast。"
+
+## Auto-Clarity
+
+Drop caveman for: security warnings, irreversible action confirmations, multi-step sequences where fragment order risks misread, user asks to clarify or repeats question. Resume caveman after clear part done.
+
+Example — destructive op:
+> **Warning:** This will permanently delete all rows in the `users` table and cannot be undone.
+> ```sql
+> DROP TABLE users;
+> ```
+> Caveman resume. Verify backup exist first.
+
+## Boundaries
+
+Code/commits/PRs: write normal. "stop caveman" or "normal mode": revert. Level persist until changed or session end.
\ No newline at end of file
diff --git a/.kiro/skills/caveman-commit/SKILL.md b/.kiro/skills/caveman-commit/SKILL.md
new file mode 100644
index 0000000..729318c
--- /dev/null
+++ b/.kiro/skills/caveman-commit/SKILL.md
@@ -0,0 +1,65 @@
+---
+name: caveman-commit
+description: >
+ Ultra-compressed commit message generator. Cuts noise from commit messages while preserving
+ intent and reasoning. Conventional Commits format. Subject ≤50 chars, body only when "why"
+ isn't obvious. Use when user says "write a commit", "commit message", "generate commit",
+ "/commit", or invokes /caveman-commit. Auto-triggers when staging changes.
+---
+
+Write commit messages terse and exact. Conventional Commits format. No fluff. Why over what.
+
+## Rules
+
+**Subject line:**
+- `(): ` — `` optional
+- Types: `feat`, `fix`, `refactor`, `perf`, `docs`, `test`, `chore`, `build`, `ci`, `style`, `revert`
+- Imperative mood: "add", "fix", "remove" — not "added", "adds", "adding"
+- ≤50 chars when possible, hard cap 72
+- No trailing period
+- Match project convention for capitalization after the colon
+
+**Body (only if needed):**
+- Skip entirely when subject is self-explanatory
+- Add body only for: non-obvious *why*, breaking changes, migration notes, linked issues
+- Wrap at 72 chars
+- Bullets `-` not `*`
+- Reference issues/PRs at end: `Closes #42`, `Refs #17`
+
+**What NEVER goes in:**
+- "This commit does X", "I", "we", "now", "currently" — the diff says what
+- "As requested by..." — use Co-authored-by trailer
+- "Generated with Claude Code" or any AI attribution
+- Emoji (unless project convention requires)
+- Restating the file name when scope already says it
+
+## Examples
+
+Diff: new endpoint for user profile with body explaining the why
+- ❌ "feat: add a new endpoint to get user profile information from the database"
+- ✅
+ ```
+ feat(api): add GET /users/:id/profile
+
+ Mobile client needs profile data without the full user payload
+ to reduce LTE bandwidth on cold-launch screens.
+
+ Closes #128
+ ```
+
+Diff: breaking API change
+- ✅
+ ```
+ feat(api)!: rename /v1/orders to /v1/checkout
+
+ BREAKING CHANGE: clients on /v1/orders must migrate to /v1/checkout
+ before 2026-06-01. Old route returns 410 after that date.
+ ```
+
+## Auto-Clarity
+
+Always include body for: breaking changes, security fixes, data migrations, anything reverting a prior commit. Never compress these into subject-only — future debuggers need the context.
+
+## Boundaries
+
+Only generates the commit message. Does not run `git commit`, does not stage files, does not amend. Output the message as a code block ready to paste. "stop caveman-commit" or "normal mode": revert to verbose commit style.
\ No newline at end of file
diff --git a/.kiro/skills/caveman-compress/README.md b/.kiro/skills/caveman-compress/README.md
new file mode 100644
index 0000000..7c0e8ba
--- /dev/null
+++ b/.kiro/skills/caveman-compress/README.md
@@ -0,0 +1,163 @@
+
+
+
+
+caveman-compress
+
+
+ shrink memory file. save token every session.
+
+
+---
+
+A Claude Code skill that compresses your project memory files (`CLAUDE.md`, todos, preferences) into caveman format — so every session loads fewer tokens automatically.
+
+Claude read `CLAUDE.md` on every session start. If file big, cost big. Caveman make file small. Cost go down forever.
+
+## What It Do
+
+```
+/caveman:compress CLAUDE.md
+```
+
+```
+CLAUDE.md ← compressed (Claude reads this — fewer tokens every session)
+CLAUDE.original.md ← human-readable backup (you edit this)
+```
+
+Original never lost. You can read and edit `.original.md`. Run skill again to re-compress after edits.
+
+## Benchmarks
+
+Real results on real project files:
+
+| File | Original | Compressed | Saved |
+|------|----------:|----------:|------:|
+| `claude-md-preferences.md` | 706 | 285 | **59.6%** |
+| `project-notes.md` | 1145 | 535 | **53.3%** |
+| `claude-md-project.md` | 1122 | 636 | **43.3%** |
+| `todo-list.md` | 627 | 388 | **38.1%** |
+| `mixed-with-code.md` | 888 | 560 | **36.9%** |
+| **Average** | **898** | **481** | **46%** |
+
+All validations passed ✅ — headings, code blocks, URLs, file paths preserved exactly.
+
+## Before / After
+
+
+
+
+
+### 📄 Original (706 tokens)
+
+> "I strongly prefer TypeScript with strict mode enabled for all new code. Please don't use `any` type unless there's genuinely no way around it, and if you do, leave a comment explaining the reasoning. I find that taking the time to properly type things catches a lot of bugs before they ever make it to runtime."
+
+
+
+
+### 🪨 Caveman (285 tokens)
+
+> "Prefer TypeScript strict mode always. No `any` unless unavoidable — comment why if used. Proper types catch bugs early."
+
+
+
+
+
+**Same instructions. 60% fewer tokens. Every. Single. Session.**
+
+## Security
+
+`caveman-compress` is flagged as Snyk High Risk due to subprocess and file I/O patterns detected by static analysis. This is a false positive — see [SECURITY.md](./SECURITY.md) for a full explanation of what the skill does and does not do.
+
+## Install
+
+Compress is built in with the `caveman` plugin. Install `caveman` once, then use `/caveman:compress`.
+
+If you need local files, the compress skill lives at:
+
+```bash
+caveman-compress/
+```
+
+**Requires:** Python 3.10+
+
+## Usage
+
+```
+/caveman:compress
+```
+
+Examples:
+```
+/caveman:compress CLAUDE.md
+/caveman:compress docs/preferences.md
+/caveman:compress todos.md
+```
+
+### What files work
+
+| Type | Compress? |
+|------|-----------|
+| `.md`, `.txt`, `.rst` | ✅ Yes |
+| Extensionless natural language | ✅ Yes |
+| `.py`, `.js`, `.ts`, `.json`, `.yaml` | ❌ Skip (code/config) |
+| `*.original.md` | ❌ Skip (backup files) |
+
+## How It Work
+
+```
+/caveman:compress CLAUDE.md
+ ↓
+detect file type (no tokens)
+ ↓
+Claude compresses (tokens — one call)
+ ↓
+validate output (no tokens)
+ checks: headings, code blocks, URLs, file paths, bullets
+ ↓
+if errors: Claude fixes cherry-picked issues only (tokens — targeted fix)
+ does NOT recompress — only patches broken parts
+ ↓
+retry up to 2 times
+ ↓
+write compressed → CLAUDE.md
+write original → CLAUDE.original.md
+```
+
+Only two things use tokens: initial compression + targeted fix if validation fails. Everything else is local Python.
+
+## What Is Preserved
+
+Caveman compress natural language. It never touch:
+
+- Code blocks (` ``` ` fenced or indented)
+- Inline code (`` `backtick content` ``)
+- URLs and links
+- File paths (`/src/components/...`)
+- Commands (`npm install`, `git commit`)
+- Technical terms, library names, API names
+- Headings (exact text preserved)
+- Tables (structure preserved, cell text compressed)
+- Dates, version numbers, numeric values
+
+## Why This Matter
+
+`CLAUDE.md` loads on **every session start**. A 1000-token project memory file costs tokens every single time you open a project. Over 100 sessions that's 100,000 tokens of overhead — just for context you already wrote.
+
+Caveman cut that by ~46% on average. Same instructions. Same accuracy. Less waste.
+
+```
+┌────────────────────────────────────────────┐
+│ TOKEN SAVINGS PER FILE █████ 46% │
+│ SESSIONS THAT BENEFIT ██████████ 100% │
+│ INFORMATION PRESERVED ██████████ 100% │
+│ SETUP TIME █ 1x │
+└────────────────────────────────────────────┘
+```
+
+## Part of Caveman
+
+This skill is part of the [caveman](https://github.com/JuliusBrussee/caveman) toolkit — making Claude use fewer tokens without losing accuracy.
+
+- **caveman** — make Claude *speak* like caveman (cuts response tokens ~65%)
+- **caveman-compress** — make Claude *read* less (cuts context tokens ~46%)
diff --git a/.kiro/skills/caveman-compress/SECURITY.md b/.kiro/skills/caveman-compress/SECURITY.md
new file mode 100644
index 0000000..693108c
--- /dev/null
+++ b/.kiro/skills/caveman-compress/SECURITY.md
@@ -0,0 +1,31 @@
+# Security
+
+## Snyk High Risk Rating
+
+`caveman-compress` receives a Snyk High Risk rating due to static analysis heuristics. This document explains what the skill does and does not do.
+
+### What triggers the rating
+
+1. **subprocess usage**: The skill calls the `claude` CLI via `subprocess.run()` as a fallback when `ANTHROPIC_API_KEY` is not set. The subprocess call uses a fixed argument list — no shell interpolation occurs. User file content is passed via stdin, not as a shell argument.
+
+2. **File read/write**: The skill reads the file the user explicitly points it at, compresses it, and writes the result back to the same path. A `.original.md` backup is saved alongside it. No files outside the user-specified path are read or written.
+
+### What the skill does NOT do
+
+- Does not execute user file content as code
+- Does not make network requests except to Anthropic's API (via SDK or CLI)
+- Does not access files outside the path the user provides
+- Does not use shell=True or string interpolation in subprocess calls
+- Does not collect or transmit any data beyond the file being compressed
+
+### Auth behavior
+
+If `ANTHROPIC_API_KEY` is set, the skill uses the Anthropic Python SDK directly (no subprocess). If not set, it falls back to the `claude` CLI, which uses the user's existing Claude desktop authentication.
+
+### File size limit
+
+Files larger than 500KB are rejected before any API call is made.
+
+### Reporting a vulnerability
+
+If you believe you've found a genuine security issue, please open a GitHub issue with the label `security`.
diff --git a/.kiro/skills/caveman-compress/SKILL.md b/.kiro/skills/caveman-compress/SKILL.md
new file mode 100644
index 0000000..7b3e3aa
--- /dev/null
+++ b/.kiro/skills/caveman-compress/SKILL.md
@@ -0,0 +1,111 @@
+---
+name: caveman-compress
+description: >
+ Compress natural language memory files (CLAUDE.md, todos, preferences) into caveman format
+ to save input tokens. Preserves all technical substance, code, URLs, and structure.
+ Compressed version overwrites the original file. Human-readable backup saved as FILE.original.md.
+ Trigger: /caveman:compress or "compress memory file"
+---
+
+# Caveman Compress
+
+## Purpose
+
+Compress natural language files (CLAUDE.md, todos, preferences) into caveman-speak to reduce input tokens. Compressed version overwrites original. Human-readable backup saved as `.original.md`.
+
+## Trigger
+
+`/caveman:compress ` or when user asks to compress a memory file.
+
+## Process
+
+1. The compression scripts live in `caveman-compress/scripts/` (adjacent to this SKILL.md). If the path is not immediately available, search for `caveman-compress/scripts/__main__.py`.
+
+2. Run:
+
+cd caveman-compress && python3 -m scripts
+
+3. The CLI will:
+- detect file type (no tokens)
+- call Claude to compress
+- validate output (no tokens)
+- if errors: cherry-pick fix with Claude (targeted fixes only, no recompression)
+- retry up to 2 times
+- if still failing after 2 retries: report error to user, leave original file untouched
+
+4. Return result to user
+
+## Compression Rules
+
+### Remove
+- Articles: a, an, the
+- Filler: just, really, basically, actually, simply, essentially, generally
+- Pleasantries: "sure", "certainly", "of course", "happy to", "I'd recommend"
+- Hedging: "it might be worth", "you could consider", "it would be good to"
+- Redundant phrasing: "in order to" → "to", "make sure to" → "ensure", "the reason is because" → "because"
+- Connective fluff: "however", "furthermore", "additionally", "in addition"
+
+### Preserve EXACTLY (never modify)
+- Code blocks (fenced ``` and indented)
+- Inline code (`backtick content`)
+- URLs and links (full URLs, markdown links)
+- File paths (`/src/components/...`, `./config.yaml`)
+- Commands (`npm install`, `git commit`, `docker build`)
+- Technical terms (library names, API names, protocols, algorithms)
+- Proper nouns (project names, people, companies)
+- Dates, version numbers, numeric values
+- Environment variables (`$HOME`, `NODE_ENV`)
+
+### Preserve Structure
+- All markdown headings (keep exact heading text, compress body below)
+- Bullet point hierarchy (keep nesting level)
+- Numbered lists (keep numbering)
+- Tables (compress cell text, keep structure)
+- Frontmatter/YAML headers in markdown files
+
+### Compress
+- Use short synonyms: "big" not "extensive", "fix" not "implement a solution for", "use" not "utilize"
+- Fragments OK: "Run tests before commit" not "You should always run tests before committing"
+- Drop "you should", "make sure to", "remember to" — just state the action
+- Merge redundant bullets that say the same thing differently
+- Keep one example where multiple examples show the same pattern
+
+CRITICAL RULE:
+Anything inside ``` ... ``` must be copied EXACTLY.
+Do not:
+- remove comments
+- remove spacing
+- reorder lines
+- shorten commands
+- simplify anything
+
+Inline code (`...`) must be preserved EXACTLY.
+Do not modify anything inside backticks.
+
+If file contains code blocks:
+- Treat code blocks as read-only regions
+- Only compress text outside them
+- Do not merge sections around code
+
+## Pattern
+
+Original:
+> You should always make sure to run the test suite before pushing any changes to the main branch. This is important because it helps catch bugs early and prevents broken builds from being deployed to production.
+
+Compressed:
+> Run tests before push to main. Catch bugs early, prevent broken prod deploys.
+
+Original:
+> The application uses a microservices architecture with the following components. The API gateway handles all incoming requests and routes them to the appropriate service. The authentication service is responsible for managing user sessions and JWT tokens.
+
+Compressed:
+> Microservices architecture. API gateway route all requests to services. Auth service manage user sessions + JWT tokens.
+
+## Boundaries
+
+- ONLY compress natural language files (.md, .txt, extensionless)
+- NEVER modify: .py, .js, .ts, .json, .yaml, .yml, .toml, .env, .lock, .css, .html, .xml, .sql, .sh
+- If file has mixed content (prose + code), compress ONLY the prose sections
+- If unsure whether something is code or prose, leave it unchanged
+- Original file is backed up as FILE.original.md before overwriting
+- Never compress FILE.original.md (skip it)
diff --git a/.kiro/skills/caveman-compress/scripts/__init__.py b/.kiro/skills/caveman-compress/scripts/__init__.py
new file mode 100644
index 0000000..16b8c53
--- /dev/null
+++ b/.kiro/skills/caveman-compress/scripts/__init__.py
@@ -0,0 +1,9 @@
+"""Caveman compress scripts.
+
+This package provides tools to compress natural language markdown files
+into caveman format to save input tokens.
+"""
+
+__all__ = ["cli", "compress", "detect", "validate"]
+
+__version__ = "1.0.0"
diff --git a/.kiro/skills/caveman-compress/scripts/__main__.py b/.kiro/skills/caveman-compress/scripts/__main__.py
new file mode 100644
index 0000000..4e28416
--- /dev/null
+++ b/.kiro/skills/caveman-compress/scripts/__main__.py
@@ -0,0 +1,3 @@
+from .cli import main
+
+main()
diff --git a/.kiro/skills/caveman-compress/scripts/benchmark.py b/.kiro/skills/caveman-compress/scripts/benchmark.py
new file mode 100644
index 0000000..eac927d
--- /dev/null
+++ b/.kiro/skills/caveman-compress/scripts/benchmark.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+from pathlib import Path
+import sys
+
+# Support both direct execution and module import
+try:
+ from .validate import validate
+except ImportError:
+ sys.path.insert(0, str(Path(__file__).parent))
+ from validate import validate
+
+try:
+ import tiktoken
+ _enc = tiktoken.get_encoding("o200k_base")
+except ImportError:
+ _enc = None
+
+
+def count_tokens(text):
+ if _enc is None:
+ return len(text.split()) # fallback: word count
+ return len(_enc.encode(text))
+
+
+def benchmark_pair(orig_path: Path, comp_path: Path):
+ orig_text = orig_path.read_text()
+ comp_text = comp_path.read_text()
+
+ orig_tokens = count_tokens(orig_text)
+ comp_tokens = count_tokens(comp_text)
+ saved = 100 * (orig_tokens - comp_tokens) / orig_tokens if orig_tokens > 0 else 0.0
+ result = validate(orig_path, comp_path)
+
+ return (comp_path.name, orig_tokens, comp_tokens, saved, result.is_valid)
+
+
+def print_table(rows):
+ print("\n| File | Original | Compressed | Saved % | Valid |")
+ print("|------|----------|------------|---------|-------|")
+ for r in rows:
+ print(f"| {r[0]} | {r[1]} | {r[2]} | {r[3]:.1f}% | {'✅' if r[4] else '❌'} |")
+
+
+def main():
+ # Direct file pair: python3 benchmark.py original.md compressed.md
+ if len(sys.argv) == 3:
+ orig = Path(sys.argv[1]).resolve()
+ comp = Path(sys.argv[2]).resolve()
+ if not orig.exists():
+ print(f"❌ Not found: {orig}")
+ sys.exit(1)
+ if not comp.exists():
+ print(f"❌ Not found: {comp}")
+ sys.exit(1)
+ print_table([benchmark_pair(orig, comp)])
+ return
+
+ # Glob mode: repo_root/tests/caveman-compress/
+ tests_dir = Path(__file__).parent.parent.parent / "tests" / "caveman-compress"
+ if not tests_dir.exists():
+ print(f"❌ Tests dir not found: {tests_dir}")
+ sys.exit(1)
+
+ rows = []
+ for orig in sorted(tests_dir.glob("*.original.md")):
+ comp = orig.with_name(orig.stem.removesuffix(".original") + ".md")
+ if comp.exists():
+ rows.append(benchmark_pair(orig, comp))
+
+ if not rows:
+ print("No compressed file pairs found.")
+ return
+
+ print_table(rows)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.kiro/skills/caveman-compress/scripts/cli.py b/.kiro/skills/caveman-compress/scripts/cli.py
new file mode 100644
index 0000000..428fd86
--- /dev/null
+++ b/.kiro/skills/caveman-compress/scripts/cli.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+"""
+Caveman Compress CLI
+
+Usage:
+ caveman
+"""
+
+import sys
+from pathlib import Path
+
+from .compress import compress_file
+from .detect import detect_file_type, should_compress
+
+
+def print_usage():
+ print("Usage: caveman ")
+
+
+def main():
+ if len(sys.argv) != 2:
+ print_usage()
+ sys.exit(1)
+
+ filepath = Path(sys.argv[1])
+
+ # Check file exists
+ if not filepath.exists():
+ print(f"❌ File not found: {filepath}")
+ sys.exit(1)
+
+ if not filepath.is_file():
+ print(f"❌ Not a file: {filepath}")
+ sys.exit(1)
+
+ filepath = filepath.resolve()
+
+ # Detect file type
+ file_type = detect_file_type(filepath)
+
+ print(f"Detected: {file_type}")
+
+ # Check if compressible
+ if not should_compress(filepath):
+ print("Skipping: file is not natural language (code/config)")
+ sys.exit(0)
+
+ print("Starting caveman compression...\n")
+
+ try:
+ success = compress_file(filepath)
+
+ if success:
+ print("\nCompression completed successfully")
+ backup_path = filepath.with_name(filepath.stem + ".original.md")
+ print(f"Compressed: {filepath}")
+ print(f"Original: {backup_path}")
+ sys.exit(0)
+ else:
+ print("\n❌ Compression failed after retries")
+ sys.exit(2)
+
+ except KeyboardInterrupt:
+ print("\nInterrupted by user")
+ sys.exit(130)
+
+ except Exception as e:
+ print(f"\n❌ Error: {e}")
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.kiro/skills/caveman-compress/scripts/compress.py b/.kiro/skills/caveman-compress/scripts/compress.py
new file mode 100644
index 0000000..1622a7a
--- /dev/null
+++ b/.kiro/skills/caveman-compress/scripts/compress.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python3
+"""
+Caveman Memory Compression Orchestrator
+
+Usage:
+ python scripts/compress.py
+"""
+
+import os
+import re
+import subprocess
+from pathlib import Path
+from typing import List
+
+OUTER_FENCE_REGEX = re.compile(
+ r"\A\s*(`{3,}|~{3,})[^\n]*\n(.*)\n\1\s*\Z", re.DOTALL
+)
+
+
+def strip_llm_wrapper(text: str) -> str:
+ """Strip outer ```markdown ... ``` fence when it wraps the entire output."""
+ m = OUTER_FENCE_REGEX.match(text)
+ if m:
+ return m.group(2)
+ return text
+
+from .detect import should_compress
+from .validate import validate
+
+MAX_RETRIES = 2
+
+
+# ---------- Claude Calls ----------
+
+
+def call_claude(prompt: str) -> str:
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
+ if api_key:
+ try:
+ import anthropic
+
+ client = anthropic.Anthropic(api_key=api_key)
+ msg = client.messages.create(
+ model=os.environ.get("CAVEMAN_MODEL", "claude-sonnet-4-5"),
+ max_tokens=8192,
+ messages=[{"role": "user", "content": prompt}],
+ )
+ return strip_llm_wrapper(msg.content[0].text.strip())
+ except ImportError:
+ pass # anthropic not installed, fall back to CLI
+ # Fallback: use claude CLI (handles desktop auth)
+ try:
+ result = subprocess.run(
+ ["claude", "--print"],
+ input=prompt,
+ text=True,
+ capture_output=True,
+ check=True,
+ )
+ return strip_llm_wrapper(result.stdout.strip())
+ except subprocess.CalledProcessError as e:
+ raise RuntimeError(f"Claude call failed:\n{e.stderr}")
+
+
+def build_compress_prompt(original: str) -> str:
+ return f"""
+Compress this markdown into caveman format.
+
+STRICT RULES:
+- Do NOT modify anything inside ``` code blocks
+- Do NOT modify anything inside inline backticks
+- Preserve ALL URLs exactly
+- Preserve ALL headings exactly
+- Preserve file paths and commands
+- Return ONLY the compressed markdown body — do NOT wrap the entire output in a ```markdown fence or any other fence. Inner code blocks from the original stay as-is; do not add a new outer fence around the whole file.
+
+Only compress natural language.
+
+TEXT:
+{original}
+"""
+
+
+def build_fix_prompt(original: str, compressed: str, errors: List[str]) -> str:
+ errors_str = "\n".join(f"- {e}" for e in errors)
+ return f"""You are fixing a caveman-compressed markdown file. Specific validation errors were found.
+
+CRITICAL RULES:
+- DO NOT recompress or rephrase the file
+- ONLY fix the listed errors — leave everything else exactly as-is
+- The ORIGINAL is provided as reference only (to restore missing content)
+- Preserve caveman style in all untouched sections
+
+ERRORS TO FIX:
+{errors_str}
+
+HOW TO FIX:
+- Missing URL: find it in ORIGINAL, restore it exactly where it belongs in COMPRESSED
+- Code block mismatch: find the exact code block in ORIGINAL, restore it in COMPRESSED
+- Heading mismatch: restore the exact heading text from ORIGINAL into COMPRESSED
+- Do not touch any section not mentioned in the errors
+
+ORIGINAL (reference only):
+{original}
+
+COMPRESSED (fix this):
+{compressed}
+
+Return ONLY the fixed compressed file. No explanation.
+"""
+
+
+# ---------- Core Logic ----------
+
+
+def compress_file(filepath: Path) -> bool:
+ # Resolve and validate path
+ filepath = filepath.resolve()
+ MAX_FILE_SIZE = 500_000 # 500KB
+ if not filepath.exists():
+ raise FileNotFoundError(f"File not found: {filepath}")
+ if filepath.stat().st_size > MAX_FILE_SIZE:
+ raise ValueError(f"File too large to compress safely (max 500KB): {filepath}")
+
+ print(f"Processing: {filepath}")
+
+ if not should_compress(filepath):
+ print("Skipping (not natural language)")
+ return False
+
+ original_text = filepath.read_text(errors="ignore")
+ backup_path = filepath.with_name(filepath.stem + ".original.md")
+
+ # Check if backup already exists to prevent accidental overwriting
+ if backup_path.exists():
+ print(f"⚠️ Backup file already exists: {backup_path}")
+ print("The original backup may contain important content.")
+ print("Aborting to prevent data loss. Please remove or rename the backup file if you want to proceed.")
+ return False
+
+ # Step 1: Compress
+ print("Compressing with Claude...")
+ compressed = call_claude(build_compress_prompt(original_text))
+
+ # Save original as backup, write compressed to original path
+ backup_path.write_text(original_text)
+ filepath.write_text(compressed)
+
+ # Step 2: Validate + Retry
+ for attempt in range(MAX_RETRIES):
+ print(f"\nValidation attempt {attempt + 1}")
+
+ result = validate(backup_path, filepath)
+
+ if result.is_valid:
+ print("Validation passed")
+ break
+
+ print("❌ Validation failed:")
+ for err in result.errors:
+ print(f" - {err}")
+
+ if attempt == MAX_RETRIES - 1:
+ # Restore original on failure
+ filepath.write_text(original_text)
+ backup_path.unlink(missing_ok=True)
+ print("❌ Failed after retries — original restored")
+ return False
+
+ print("Fixing with Claude...")
+ compressed = call_claude(
+ build_fix_prompt(original_text, compressed, result.errors)
+ )
+ filepath.write_text(compressed)
+
+ return True
diff --git a/.kiro/skills/caveman-compress/scripts/detect.py b/.kiro/skills/caveman-compress/scripts/detect.py
new file mode 100644
index 0000000..5f50fd3
--- /dev/null
+++ b/.kiro/skills/caveman-compress/scripts/detect.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+"""Detect whether a file is natural language (compressible) or code/config (skip)."""
+
+import json
+import re
+from pathlib import Path
+
+# Extensions that are natural language and compressible
+COMPRESSIBLE_EXTENSIONS = {".md", ".txt", ".markdown", ".rst"}
+
+# Extensions that are code/config and should be skipped
+SKIP_EXTENSIONS = {
+ ".py", ".js", ".ts", ".tsx", ".jsx", ".json", ".yaml", ".yml",
+ ".toml", ".env", ".lock", ".css", ".scss", ".html", ".xml",
+ ".sql", ".sh", ".bash", ".zsh", ".go", ".rs", ".java", ".c",
+ ".cpp", ".h", ".hpp", ".rb", ".php", ".swift", ".kt", ".lua",
+ ".dockerfile", ".makefile", ".csv", ".ini", ".cfg",
+}
+
+# Patterns that indicate a line is code
+CODE_PATTERNS = [
+ re.compile(r"^\s*(import |from .+ import |require\(|const |let |var )"),
+ re.compile(r"^\s*(def |class |function |async function |export )"),
+ re.compile(r"^\s*(if\s*\(|for\s*\(|while\s*\(|switch\s*\(|try\s*\{)"),
+ re.compile(r"^\s*[\}\]\);]+\s*$"), # closing braces/brackets
+ re.compile(r"^\s*@\w+"), # decorators/annotations
+ re.compile(r'^\s*"[^"]+"\s*:\s*'), # JSON-like key-value
+ re.compile(r"^\s*\w+\s*=\s*[{\[\(\"']"), # assignment with literal
+]
+
+
+def _is_code_line(line: str) -> bool:
+ """Check if a line looks like code."""
+ return any(p.match(line) for p in CODE_PATTERNS)
+
+
+def _is_json_content(text: str) -> bool:
+ """Check if content is valid JSON."""
+ try:
+ json.loads(text)
+ return True
+ except (json.JSONDecodeError, ValueError):
+ return False
+
+
+def _is_yaml_content(lines: list[str]) -> bool:
+ """Heuristic: check if content looks like YAML."""
+ yaml_indicators = 0
+ for line in lines[:30]:
+ stripped = line.strip()
+ if stripped.startswith("---"):
+ yaml_indicators += 1
+ elif re.match(r"^\w[\w\s]*:\s", stripped):
+ yaml_indicators += 1
+ elif stripped.startswith("- ") and ":" in stripped:
+ yaml_indicators += 1
+ # If most non-empty lines look like YAML
+ non_empty = sum(1 for l in lines[:30] if l.strip())
+ return non_empty > 0 and yaml_indicators / non_empty > 0.6
+
+
+def detect_file_type(filepath: Path) -> str:
+ """Classify a file as 'natural_language', 'code', 'config', or 'unknown'.
+
+ Returns:
+ One of: 'natural_language', 'code', 'config', 'unknown'
+ """
+ ext = filepath.suffix.lower()
+
+ # Extension-based classification
+ if ext in COMPRESSIBLE_EXTENSIONS:
+ return "natural_language"
+ if ext in SKIP_EXTENSIONS:
+ return "code" if ext not in {".json", ".yaml", ".yml", ".toml", ".ini", ".cfg", ".env"} else "config"
+
+ # Extensionless files (like CLAUDE.md, TODO) — check content
+ if not ext:
+ try:
+ text = filepath.read_text(errors="ignore")
+ except (OSError, PermissionError):
+ return "unknown"
+
+ lines = text.splitlines()[:50]
+
+ if _is_json_content(text[:10000]):
+ return "config"
+ if _is_yaml_content(lines):
+ return "config"
+
+ code_lines = sum(1 for l in lines if l.strip() and _is_code_line(l))
+ non_empty = sum(1 for l in lines if l.strip())
+ if non_empty > 0 and code_lines / non_empty > 0.4:
+ return "code"
+
+ return "natural_language"
+
+ return "unknown"
+
+
+def should_compress(filepath: Path) -> bool:
+ """Return True if the file is natural language and should be compressed."""
+ if not filepath.is_file():
+ return False
+ # Skip backup files
+ if filepath.name.endswith(".original.md"):
+ return False
+ return detect_file_type(filepath) == "natural_language"
+
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) < 2:
+ print("Usage: python detect.py [file2] ...")
+ sys.exit(1)
+
+ for path_str in sys.argv[1:]:
+ p = Path(path_str).resolve()
+ file_type = detect_file_type(p)
+ compress = should_compress(p)
+ print(f" {p.name:30s} type={file_type:20s} compress={compress}")
diff --git a/.kiro/skills/caveman-compress/scripts/validate.py b/.kiro/skills/caveman-compress/scripts/validate.py
new file mode 100644
index 0000000..3c4d4c1
--- /dev/null
+++ b/.kiro/skills/caveman-compress/scripts/validate.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python3
+import re
+from pathlib import Path
+
+URL_REGEX = re.compile(r"https?://[^\s)]+")
+FENCE_OPEN_REGEX = re.compile(r"^(\s{0,3})(`{3,}|~{3,})(.*)$")
+HEADING_REGEX = re.compile(r"^(#{1,6})\s+(.*)", re.MULTILINE)
+BULLET_REGEX = re.compile(r"^\s*[-*+]\s+", re.MULTILINE)
+
+# crude but effective path detection
+# Requires either a path prefix (./ ../ / or drive letter) or a slash/backslash within the match
+PATH_REGEX = re.compile(r"(?:\./|\.\./|/|[A-Za-z]:\\)[\w\-/\\\.]+|[\w\-\.]+[/\\][\w\-/\\\.]+")
+
+
+class ValidationResult:
+ def __init__(self):
+ self.is_valid = True
+ self.errors = []
+ self.warnings = []
+
+ def add_error(self, msg):
+ self.is_valid = False
+ self.errors.append(msg)
+
+ def add_warning(self, msg):
+ self.warnings.append(msg)
+
+
+def read_file(path: Path) -> str:
+ return path.read_text(errors="ignore")
+
+
+# ---------- Extractors ----------
+
+
+def extract_headings(text):
+ return [(level, title.strip()) for level, title in HEADING_REGEX.findall(text)]
+
+
+def extract_code_blocks(text):
+ """Line-based fenced code block extractor.
+
+ Handles ``` and ~~~ fences with variable length (CommonMark: closing
+ fence must use same char and be at least as long as opening). Supports
+ nested fences (e.g. an outer 4-backtick block wrapping inner 3-backtick
+ content).
+ """
+ blocks = []
+ lines = text.split("\n")
+ i = 0
+ n = len(lines)
+ while i < n:
+ m = FENCE_OPEN_REGEX.match(lines[i])
+ if not m:
+ i += 1
+ continue
+ fence_char = m.group(2)[0]
+ fence_len = len(m.group(2))
+ open_line = lines[i]
+ block_lines = [open_line]
+ i += 1
+ closed = False
+ while i < n:
+ close_m = FENCE_OPEN_REGEX.match(lines[i])
+ if (
+ close_m
+ and close_m.group(2)[0] == fence_char
+ and len(close_m.group(2)) >= fence_len
+ and close_m.group(3).strip() == ""
+ ):
+ block_lines.append(lines[i])
+ closed = True
+ i += 1
+ break
+ block_lines.append(lines[i])
+ i += 1
+ if closed:
+ blocks.append("\n".join(block_lines))
+ # Unclosed fences are silently skipped — they indicate malformed markdown
+ # and including them would cause false-positive validation failures.
+ return blocks
+
+
+def extract_urls(text):
+ return set(URL_REGEX.findall(text))
+
+
+def extract_paths(text):
+ return set(PATH_REGEX.findall(text))
+
+
+def count_bullets(text):
+ return len(BULLET_REGEX.findall(text))
+
+
+# ---------- Validators ----------
+
+
+def validate_headings(orig, comp, result):
+ h1 = extract_headings(orig)
+ h2 = extract_headings(comp)
+
+ if len(h1) != len(h2):
+ result.add_error(f"Heading count mismatch: {len(h1)} vs {len(h2)}")
+
+ if h1 != h2:
+ result.add_warning("Heading text/order changed")
+
+
+def validate_code_blocks(orig, comp, result):
+ c1 = extract_code_blocks(orig)
+ c2 = extract_code_blocks(comp)
+
+ if c1 != c2:
+ result.add_error("Code blocks not preserved exactly")
+
+
+def validate_urls(orig, comp, result):
+ u1 = extract_urls(orig)
+ u2 = extract_urls(comp)
+
+ if u1 != u2:
+ result.add_error(f"URL mismatch: lost={u1 - u2}, added={u2 - u1}")
+
+
+def validate_paths(orig, comp, result):
+ p1 = extract_paths(orig)
+ p2 = extract_paths(comp)
+
+ if p1 != p2:
+ result.add_warning(f"Path mismatch: lost={p1 - p2}, added={p2 - p1}")
+
+
+def validate_bullets(orig, comp, result):
+ b1 = count_bullets(orig)
+ b2 = count_bullets(comp)
+
+ if b1 == 0:
+ return
+
+ diff = abs(b1 - b2) / b1
+
+ if diff > 0.15:
+ result.add_warning(f"Bullet count changed too much: {b1} -> {b2}")
+
+
+# ---------- Main ----------
+
+
+def validate(original_path: Path, compressed_path: Path) -> ValidationResult:
+ result = ValidationResult()
+
+ orig = read_file(original_path)
+ comp = read_file(compressed_path)
+
+ validate_headings(orig, comp, result)
+ validate_code_blocks(orig, comp, result)
+ validate_urls(orig, comp, result)
+ validate_paths(orig, comp, result)
+ validate_bullets(orig, comp, result)
+
+ return result
+
+
+# ---------- CLI ----------
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) != 3:
+ print("Usage: python validate.py ")
+ sys.exit(1)
+
+ orig = Path(sys.argv[1]).resolve()
+ comp = Path(sys.argv[2]).resolve()
+
+ res = validate(orig, comp)
+
+ print(f"\nValid: {res.is_valid}")
+
+ if res.errors:
+ print("\nErrors:")
+ for e in res.errors:
+ print(f" - {e}")
+
+ if res.warnings:
+ print("\nWarnings:")
+ for w in res.warnings:
+ print(f" - {w}")
diff --git a/.kiro/skills/caveman-help/SKILL.md b/.kiro/skills/caveman-help/SKILL.md
new file mode 100644
index 0000000..078e487
--- /dev/null
+++ b/.kiro/skills/caveman-help/SKILL.md
@@ -0,0 +1,59 @@
+---
+name: caveman-help
+description: >
+ Quick-reference card for all caveman modes, skills, and commands.
+ One-shot display, not a persistent mode. Trigger: /caveman-help,
+ "caveman help", "what caveman commands", "how do I use caveman".
+---
+
+# Caveman Help
+
+Display this reference card when invoked. One-shot — do NOT change mode, write flag files, or persist anything. Output in caveman style.
+
+## Modes
+
+| Mode | Trigger | What change |
+|------|---------|-------------|
+| **Lite** | `/caveman lite` | Drop filler. Keep sentence structure. |
+| **Full** | `/caveman` | Drop articles, filler, pleasantries, hedging. Fragments OK. Default. |
+| **Ultra** | `/caveman ultra` | Extreme compression. Bare fragments. Tables over prose. |
+| **Wenyan-Lite** | `/caveman wenyan-lite` | Classical Chinese style, light compression. |
+| **Wenyan-Full** | `/caveman wenyan` | Full 文言文. Maximum classical terseness. |
+| **Wenyan-Ultra** | `/caveman wenyan-ultra` | Extreme. Ancient scholar on a budget. |
+
+Mode stick until changed or session end.
+
+## Skills
+
+| Skill | Trigger | What it do |
+|-------|---------|-----------|
+| **caveman-commit** | `/caveman-commit` | Terse commit messages. Conventional Commits. ≤50 char subject. |
+| **caveman-review** | `/caveman-review` | One-line PR comments: `L42: bug: user null. Add guard.` |
+| **caveman-compress** | `/caveman:compress ` | Compress .md files to caveman prose. Saves ~46% input tokens. |
+| **caveman-help** | `/caveman-help` | This card. |
+
+## Deactivate
+
+Say "stop caveman" or "normal mode". Resume anytime with `/caveman`.
+
+## Configure Default Mode
+
+Default mode = `full`. Change it:
+
+**Environment variable** (highest priority):
+```bash
+export CAVEMAN_DEFAULT_MODE=ultra
+```
+
+**Config file** (`~/.config/caveman/config.json`):
+```json
+{ "defaultMode": "lite" }
+```
+
+Set `"off"` to disable auto-activation on session start. User can still activate manually with `/caveman`.
+
+Resolution: env var > config file > `full`.
+
+## More
+
+Full docs: https://github.com/JuliusBrussee/caveman
diff --git a/.kiro/skills/caveman-review/SKILL.md b/.kiro/skills/caveman-review/SKILL.md
new file mode 100644
index 0000000..48f4adb
--- /dev/null
+++ b/.kiro/skills/caveman-review/SKILL.md
@@ -0,0 +1,55 @@
+---
+name: caveman-review
+description: >
+ Ultra-compressed code review comments. Cuts noise from PR feedback while preserving
+ the actionable signal. Each comment is one line: location, problem, fix. Use when user
+ says "review this PR", "code review", "review the diff", "/review", or invokes
+ /caveman-review. Auto-triggers when reviewing pull requests.
+---
+
+Write code review comments terse and actionable. One line per finding. Location, problem, fix. No throat-clearing.
+
+## Rules
+
+**Format:** `L: . .` — or `:L: ...` when reviewing multi-file diffs.
+
+**Severity prefix (optional, when mixed):**
+- `🔴 bug:` — broken behavior, will cause incident
+- `🟡 risk:` — works but fragile (race, missing null check, swallowed error)
+- `🔵 nit:` — style, naming, micro-optim. Author can ignore
+- `❓ q:` — genuine question, not a suggestion
+
+**Drop:**
+- "I noticed that...", "It seems like...", "You might want to consider..."
+- "This is just a suggestion but..." — use `nit:` instead
+- "Great work!", "Looks good overall but..." — say it once at the top, not per comment
+- Restating what the line does — the reviewer can read the diff
+- Hedging ("perhaps", "maybe", "I think") — if unsure use `q:`
+
+**Keep:**
+- Exact line numbers
+- Exact symbol/function/variable names in backticks
+- Concrete fix, not "consider refactoring this"
+- The *why* if the fix isn't obvious from the problem statement
+
+## Examples
+
+❌ "I noticed that on line 42 you're not checking if the user object is null before accessing the email property. This could potentially cause a crash if the user is not found in the database. You might want to add a null check here."
+
+✅ `L42: 🔴 bug: user can be null after .find(). Add guard before .email.`
+
+❌ "It looks like this function is doing a lot of things and might benefit from being broken up into smaller functions for readability."
+
+✅ `L88-140: 🔵 nit: 50-line fn does 4 things. Extract validate/normalize/persist.`
+
+❌ "Have you considered what happens if the API returns a 429? I think we should probably handle that case."
+
+✅ `L23: 🟡 risk: no retry on 429. Wrap in withBackoff(3).`
+
+## Auto-Clarity
+
+Drop terse mode for: security findings (CVE-class bugs need full explanation + reference), architectural disagreements (need rationale, not just a one-liner), and onboarding contexts where the author is new and needs the "why". In those cases write a normal paragraph, then resume terse for the rest.
+
+## Boundaries
+
+Reviews only — does not write the code fix, does not approve/request-changes, does not run linters. Output the comment(s) ready to paste into the PR. "stop caveman-review" or "normal mode": revert to verbose review style.
\ No newline at end of file
diff --git a/.kiro/skills/caveman/SKILL.md b/.kiro/skills/caveman/SKILL.md
new file mode 100644
index 0000000..2ab498b
--- /dev/null
+++ b/.kiro/skills/caveman/SKILL.md
@@ -0,0 +1,67 @@
+---
+name: caveman
+description: >
+ Ultra-compressed communication mode. Cuts token usage ~75% by speaking like caveman
+ while keeping full technical accuracy. Supports intensity levels: lite, full (default), ultra,
+ wenyan-lite, wenyan-full, wenyan-ultra.
+ Use when user says "caveman mode", "talk like caveman", "use caveman", "less tokens",
+ "be brief", or invokes /caveman. Also auto-triggers when token efficiency is requested.
+---
+
+Respond terse like smart caveman. All technical substance stay. Only fluff die.
+
+## Persistence
+
+ACTIVE EVERY RESPONSE. No revert after many turns. No filler drift. Still active if unsure. Off only: "stop caveman" / "normal mode".
+
+Default: **full**. Switch: `/caveman lite|full|ultra`.
+
+## Rules
+
+Drop: articles (a/an/the), filler (just/really/basically/actually/simply), pleasantries (sure/certainly/of course/happy to), hedging. Fragments OK. Short synonyms (big not extensive, fix not "implement a solution for"). Technical terms exact. Code blocks unchanged. Errors quoted exact.
+
+Pattern: `[thing] [action] [reason]. [next step].`
+
+Not: "Sure! I'd be happy to help you with that. The issue you're experiencing is likely caused by..."
+Yes: "Bug in auth middleware. Token expiry check use `<` not `<=`. Fix:"
+
+## Intensity
+
+| Level | What change |
+|-------|------------|
+| **lite** | No filler/hedging. Keep articles + full sentences. Professional but tight |
+| **full** | Drop articles, fragments OK, short synonyms. Classic caveman |
+| **ultra** | Abbreviate (DB/auth/config/req/res/fn/impl), strip conjunctions, arrows for causality (X → Y), one word when one word enough |
+| **wenyan-lite** | Semi-classical. Drop filler/hedging but keep grammar structure, classical register |
+| **wenyan-full** | Maximum classical terseness. Fully 文言文. 80-90% character reduction. Classical sentence patterns, verbs precede objects, subjects often omitted, classical particles (之/乃/為/其) |
+| **wenyan-ultra** | Extreme abbreviation while keeping classical Chinese feel. Maximum compression, ultra terse |
+
+Example — "Why React component re-render?"
+- lite: "Your component re-renders because you create a new object reference each render. Wrap it in `useMemo`."
+- full: "New object ref each render. Inline object prop = new ref = re-render. Wrap in `useMemo`."
+- ultra: "Inline obj prop → new ref → re-render. `useMemo`."
+- wenyan-lite: "組件頻重繪,以每繪新生對象參照故。以 useMemo 包之。"
+- wenyan-full: "物出新參照,致重繪。useMemo .Wrap之。"
+- wenyan-ultra: "新參照→重繪。useMemo Wrap。"
+
+Example — "Explain database connection pooling."
+- lite: "Connection pooling reuses open connections instead of creating new ones per request. Avoids repeated handshake overhead."
+- full: "Pool reuse open DB connections. No new connection per request. Skip handshake overhead."
+- ultra: "Pool = reuse DB conn. Skip handshake → fast under load."
+- wenyan-full: "池reuse open connection。不每req新開。skip handshake overhead。"
+- wenyan-ultra: "池reuse conn。skip handshake → fast。"
+
+## Auto-Clarity
+
+Drop caveman for: security warnings, irreversible action confirmations, multi-step sequences where fragment order risks misread, user asks to clarify or repeats question. Resume caveman after clear part done.
+
+Example — destructive op:
+> **Warning:** This will permanently delete all rows in the `users` table and cannot be undone.
+> ```sql
+> DROP TABLE users;
+> ```
+> Caveman resume. Verify backup exist first.
+
+## Boundaries
+
+Code/commits/PRs: write normal. "stop caveman" or "normal mode": revert. Level persist until changed or session end.
\ No newline at end of file
diff --git a/.pi/skills/caveman-commit/SKILL.md b/.pi/skills/caveman-commit/SKILL.md
new file mode 100644
index 0000000..729318c
--- /dev/null
+++ b/.pi/skills/caveman-commit/SKILL.md
@@ -0,0 +1,65 @@
+---
+name: caveman-commit
+description: >
+ Ultra-compressed commit message generator. Cuts noise from commit messages while preserving
+ intent and reasoning. Conventional Commits format. Subject ≤50 chars, body only when "why"
+ isn't obvious. Use when user says "write a commit", "commit message", "generate commit",
+ "/commit", or invokes /caveman-commit. Auto-triggers when staging changes.
+---
+
+Write commit messages terse and exact. Conventional Commits format. No fluff. Why over what.
+
+## Rules
+
+**Subject line:**
+- `(): ` — `` optional
+- Types: `feat`, `fix`, `refactor`, `perf`, `docs`, `test`, `chore`, `build`, `ci`, `style`, `revert`
+- Imperative mood: "add", "fix", "remove" — not "added", "adds", "adding"
+- ≤50 chars when possible, hard cap 72
+- No trailing period
+- Match project convention for capitalization after the colon
+
+**Body (only if needed):**
+- Skip entirely when subject is self-explanatory
+- Add body only for: non-obvious *why*, breaking changes, migration notes, linked issues
+- Wrap at 72 chars
+- Bullets `-` not `*`
+- Reference issues/PRs at end: `Closes #42`, `Refs #17`
+
+**What NEVER goes in:**
+- "This commit does X", "I", "we", "now", "currently" — the diff says what
+- "As requested by..." — use Co-authored-by trailer
+- "Generated with Claude Code" or any AI attribution
+- Emoji (unless project convention requires)
+- Restating the file name when scope already says it
+
+## Examples
+
+Diff: new endpoint for user profile with body explaining the why
+- ❌ "feat: add a new endpoint to get user profile information from the database"
+- ✅
+ ```
+ feat(api): add GET /users/:id/profile
+
+ Mobile client needs profile data without the full user payload
+ to reduce LTE bandwidth on cold-launch screens.
+
+ Closes #128
+ ```
+
+Diff: breaking API change
+- ✅
+ ```
+ feat(api)!: rename /v1/orders to /v1/checkout
+
+ BREAKING CHANGE: clients on /v1/orders must migrate to /v1/checkout
+ before 2026-06-01. Old route returns 410 after that date.
+ ```
+
+## Auto-Clarity
+
+Always include body for: breaking changes, security fixes, data migrations, anything reverting a prior commit. Never compress these into subject-only — future debuggers need the context.
+
+## Boundaries
+
+Only generates the commit message. Does not run `git commit`, does not stage files, does not amend. Output the message as a code block ready to paste. "stop caveman-commit" or "normal mode": revert to verbose commit style.
\ No newline at end of file
diff --git a/.pi/skills/caveman-compress/README.md b/.pi/skills/caveman-compress/README.md
new file mode 100644
index 0000000..7c0e8ba
--- /dev/null
+++ b/.pi/skills/caveman-compress/README.md
@@ -0,0 +1,163 @@
+
+
+
+
+caveman-compress
+
+
+ shrink memory file. save token every session.
+
+
+---
+
+A Claude Code skill that compresses your project memory files (`CLAUDE.md`, todos, preferences) into caveman format — so every session loads fewer tokens automatically.
+
+Claude read `CLAUDE.md` on every session start. If file big, cost big. Caveman make file small. Cost go down forever.
+
+## What It Do
+
+```
+/caveman:compress CLAUDE.md
+```
+
+```
+CLAUDE.md ← compressed (Claude reads this — fewer tokens every session)
+CLAUDE.original.md ← human-readable backup (you edit this)
+```
+
+Original never lost. You can read and edit `.original.md`. Run skill again to re-compress after edits.
+
+## Benchmarks
+
+Real results on real project files:
+
+| File | Original | Compressed | Saved |
+|------|----------:|----------:|------:|
+| `claude-md-preferences.md` | 706 | 285 | **59.6%** |
+| `project-notes.md` | 1145 | 535 | **53.3%** |
+| `claude-md-project.md` | 1122 | 636 | **43.3%** |
+| `todo-list.md` | 627 | 388 | **38.1%** |
+| `mixed-with-code.md` | 888 | 560 | **36.9%** |
+| **Average** | **898** | **481** | **46%** |
+
+All validations passed ✅ — headings, code blocks, URLs, file paths preserved exactly.
+
+## Before / After
+
+
+
+
+
+### 📄 Original (706 tokens)
+
+> "I strongly prefer TypeScript with strict mode enabled for all new code. Please don't use `any` type unless there's genuinely no way around it, and if you do, leave a comment explaining the reasoning. I find that taking the time to properly type things catches a lot of bugs before they ever make it to runtime."
+
+
+
+
+### 🪨 Caveman (285 tokens)
+
+> "Prefer TypeScript strict mode always. No `any` unless unavoidable — comment why if used. Proper types catch bugs early."
+
+
+
+
+
+**Same instructions. 60% fewer tokens. Every. Single. Session.**
+
+## Security
+
+`caveman-compress` is flagged as Snyk High Risk due to subprocess and file I/O patterns detected by static analysis. This is a false positive — see [SECURITY.md](./SECURITY.md) for a full explanation of what the skill does and does not do.
+
+## Install
+
+Compress is built in with the `caveman` plugin. Install `caveman` once, then use `/caveman:compress`.
+
+If you need local files, the compress skill lives at:
+
+```bash
+caveman-compress/
+```
+
+**Requires:** Python 3.10+
+
+## Usage
+
+```
+/caveman:compress
+```
+
+Examples:
+```
+/caveman:compress CLAUDE.md
+/caveman:compress docs/preferences.md
+/caveman:compress todos.md
+```
+
+### What files work
+
+| Type | Compress? |
+|------|-----------|
+| `.md`, `.txt`, `.rst` | ✅ Yes |
+| Extensionless natural language | ✅ Yes |
+| `.py`, `.js`, `.ts`, `.json`, `.yaml` | ❌ Skip (code/config) |
+| `*.original.md` | ❌ Skip (backup files) |
+
+## How It Work
+
+```
+/caveman:compress CLAUDE.md
+ ↓
+detect file type (no tokens)
+ ↓
+Claude compresses (tokens — one call)
+ ↓
+validate output (no tokens)
+ checks: headings, code blocks, URLs, file paths, bullets
+ ↓
+if errors: Claude fixes cherry-picked issues only (tokens — targeted fix)
+ does NOT recompress — only patches broken parts
+ ↓
+retry up to 2 times
+ ↓
+write compressed → CLAUDE.md
+write original → CLAUDE.original.md
+```
+
+Only two things use tokens: initial compression + targeted fix if validation fails. Everything else is local Python.
+
+## What Is Preserved
+
+Caveman compress natural language. It never touch:
+
+- Code blocks (` ``` ` fenced or indented)
+- Inline code (`` `backtick content` ``)
+- URLs and links
+- File paths (`/src/components/...`)
+- Commands (`npm install`, `git commit`)
+- Technical terms, library names, API names
+- Headings (exact text preserved)
+- Tables (structure preserved, cell text compressed)
+- Dates, version numbers, numeric values
+
+## Why This Matter
+
+`CLAUDE.md` loads on **every session start**. A 1000-token project memory file costs tokens every single time you open a project. Over 100 sessions that's 100,000 tokens of overhead — just for context you already wrote.
+
+Caveman cut that by ~46% on average. Same instructions. Same accuracy. Less waste.
+
+```
+┌────────────────────────────────────────────┐
+│ TOKEN SAVINGS PER FILE █████ 46% │
+│ SESSIONS THAT BENEFIT ██████████ 100% │
+│ INFORMATION PRESERVED ██████████ 100% │
+│ SETUP TIME █ 1x │
+└────────────────────────────────────────────┘
+```
+
+## Part of Caveman
+
+This skill is part of the [caveman](https://github.com/JuliusBrussee/caveman) toolkit — making Claude use fewer tokens without losing accuracy.
+
+- **caveman** — make Claude *speak* like caveman (cuts response tokens ~65%)
+- **caveman-compress** — make Claude *read* less (cuts context tokens ~46%)
diff --git a/.pi/skills/caveman-compress/SECURITY.md b/.pi/skills/caveman-compress/SECURITY.md
new file mode 100644
index 0000000..693108c
--- /dev/null
+++ b/.pi/skills/caveman-compress/SECURITY.md
@@ -0,0 +1,31 @@
+# Security
+
+## Snyk High Risk Rating
+
+`caveman-compress` receives a Snyk High Risk rating due to static analysis heuristics. This document explains what the skill does and does not do.
+
+### What triggers the rating
+
+1. **subprocess usage**: The skill calls the `claude` CLI via `subprocess.run()` as a fallback when `ANTHROPIC_API_KEY` is not set. The subprocess call uses a fixed argument list — no shell interpolation occurs. User file content is passed via stdin, not as a shell argument.
+
+2. **File read/write**: The skill reads the file the user explicitly points it at, compresses it, and writes the result back to the same path. A `.original.md` backup is saved alongside it. No files outside the user-specified path are read or written.
+
+### What the skill does NOT do
+
+- Does not execute user file content as code
+- Does not make network requests except to Anthropic's API (via SDK or CLI)
+- Does not access files outside the path the user provides
+- Does not use shell=True or string interpolation in subprocess calls
+- Does not collect or transmit any data beyond the file being compressed
+
+### Auth behavior
+
+If `ANTHROPIC_API_KEY` is set, the skill uses the Anthropic Python SDK directly (no subprocess). If not set, it falls back to the `claude` CLI, which uses the user's existing Claude desktop authentication.
+
+### File size limit
+
+Files larger than 500KB are rejected before any API call is made.
+
+### Reporting a vulnerability
+
+If you believe you've found a genuine security issue, please open a GitHub issue with the label `security`.
diff --git a/.pi/skills/caveman-compress/SKILL.md b/.pi/skills/caveman-compress/SKILL.md
new file mode 100644
index 0000000..7b3e3aa
--- /dev/null
+++ b/.pi/skills/caveman-compress/SKILL.md
@@ -0,0 +1,111 @@
+---
+name: caveman-compress
+description: >
+ Compress natural language memory files (CLAUDE.md, todos, preferences) into caveman format
+ to save input tokens. Preserves all technical substance, code, URLs, and structure.
+ Compressed version overwrites the original file. Human-readable backup saved as FILE.original.md.
+ Trigger: /caveman:compress or "compress memory file"
+---
+
+# Caveman Compress
+
+## Purpose
+
+Compress natural language files (CLAUDE.md, todos, preferences) into caveman-speak to reduce input tokens. Compressed version overwrites original. Human-readable backup saved as `.original.md`.
+
+## Trigger
+
+`/caveman:compress ` or when user asks to compress a memory file.
+
+## Process
+
+1. The compression scripts live in `caveman-compress/scripts/` (adjacent to this SKILL.md). If the path is not immediately available, search for `caveman-compress/scripts/__main__.py`.
+
+2. Run:
+
+cd caveman-compress && python3 -m scripts
+
+3. The CLI will:
+- detect file type (no tokens)
+- call Claude to compress
+- validate output (no tokens)
+- if errors: cherry-pick fix with Claude (targeted fixes only, no recompression)
+- retry up to 2 times
+- if still failing after 2 retries: report error to user, leave original file untouched
+
+4. Return result to user
+
+## Compression Rules
+
+### Remove
+- Articles: a, an, the
+- Filler: just, really, basically, actually, simply, essentially, generally
+- Pleasantries: "sure", "certainly", "of course", "happy to", "I'd recommend"
+- Hedging: "it might be worth", "you could consider", "it would be good to"
+- Redundant phrasing: "in order to" → "to", "make sure to" → "ensure", "the reason is because" → "because"
+- Connective fluff: "however", "furthermore", "additionally", "in addition"
+
+### Preserve EXACTLY (never modify)
+- Code blocks (fenced ``` and indented)
+- Inline code (`backtick content`)
+- URLs and links (full URLs, markdown links)
+- File paths (`/src/components/...`, `./config.yaml`)
+- Commands (`npm install`, `git commit`, `docker build`)
+- Technical terms (library names, API names, protocols, algorithms)
+- Proper nouns (project names, people, companies)
+- Dates, version numbers, numeric values
+- Environment variables (`$HOME`, `NODE_ENV`)
+
+### Preserve Structure
+- All markdown headings (keep exact heading text, compress body below)
+- Bullet point hierarchy (keep nesting level)
+- Numbered lists (keep numbering)
+- Tables (compress cell text, keep structure)
+- Frontmatter/YAML headers in markdown files
+
+### Compress
+- Use short synonyms: "big" not "extensive", "fix" not "implement a solution for", "use" not "utilize"
+- Fragments OK: "Run tests before commit" not "You should always run tests before committing"
+- Drop "you should", "make sure to", "remember to" — just state the action
+- Merge redundant bullets that say the same thing differently
+- Keep one example where multiple examples show the same pattern
+
+CRITICAL RULE:
+Anything inside ``` ... ``` must be copied EXACTLY.
+Do not:
+- remove comments
+- remove spacing
+- reorder lines
+- shorten commands
+- simplify anything
+
+Inline code (`...`) must be preserved EXACTLY.
+Do not modify anything inside backticks.
+
+If file contains code blocks:
+- Treat code blocks as read-only regions
+- Only compress text outside them
+- Do not merge sections around code
+
+## Pattern
+
+Original:
+> You should always make sure to run the test suite before pushing any changes to the main branch. This is important because it helps catch bugs early and prevents broken builds from being deployed to production.
+
+Compressed:
+> Run tests before push to main. Catch bugs early, prevent broken prod deploys.
+
+Original:
+> The application uses a microservices architecture with the following components. The API gateway handles all incoming requests and routes them to the appropriate service. The authentication service is responsible for managing user sessions and JWT tokens.
+
+Compressed:
+> Microservices architecture. API gateway route all requests to services. Auth service manage user sessions + JWT tokens.
+
+## Boundaries
+
+- ONLY compress natural language files (.md, .txt, extensionless)
+- NEVER modify: .py, .js, .ts, .json, .yaml, .yml, .toml, .env, .lock, .css, .html, .xml, .sql, .sh
+- If file has mixed content (prose + code), compress ONLY the prose sections
+- If unsure whether something is code or prose, leave it unchanged
+- Original file is backed up as FILE.original.md before overwriting
+- Never compress FILE.original.md (skip it)
diff --git a/.pi/skills/caveman-compress/scripts/__init__.py b/.pi/skills/caveman-compress/scripts/__init__.py
new file mode 100644
index 0000000..16b8c53
--- /dev/null
+++ b/.pi/skills/caveman-compress/scripts/__init__.py
@@ -0,0 +1,9 @@
+"""Caveman compress scripts.
+
+This package provides tools to compress natural language markdown files
+into caveman format to save input tokens.
+"""
+
+__all__ = ["cli", "compress", "detect", "validate"]
+
+__version__ = "1.0.0"
diff --git a/.pi/skills/caveman-compress/scripts/__main__.py b/.pi/skills/caveman-compress/scripts/__main__.py
new file mode 100644
index 0000000..4e28416
--- /dev/null
+++ b/.pi/skills/caveman-compress/scripts/__main__.py
@@ -0,0 +1,3 @@
+from .cli import main
+
+main()
diff --git a/.pi/skills/caveman-compress/scripts/benchmark.py b/.pi/skills/caveman-compress/scripts/benchmark.py
new file mode 100644
index 0000000..eac927d
--- /dev/null
+++ b/.pi/skills/caveman-compress/scripts/benchmark.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+from pathlib import Path
+import sys
+
+# Support both direct execution and module import
+try:
+ from .validate import validate
+except ImportError:
+ sys.path.insert(0, str(Path(__file__).parent))
+ from validate import validate
+
+try:
+ import tiktoken
+ _enc = tiktoken.get_encoding("o200k_base")
+except ImportError:
+ _enc = None
+
+
+def count_tokens(text):
+ if _enc is None:
+ return len(text.split()) # fallback: word count
+ return len(_enc.encode(text))
+
+
+def benchmark_pair(orig_path: Path, comp_path: Path):
+ orig_text = orig_path.read_text()
+ comp_text = comp_path.read_text()
+
+ orig_tokens = count_tokens(orig_text)
+ comp_tokens = count_tokens(comp_text)
+ saved = 100 * (orig_tokens - comp_tokens) / orig_tokens if orig_tokens > 0 else 0.0
+ result = validate(orig_path, comp_path)
+
+ return (comp_path.name, orig_tokens, comp_tokens, saved, result.is_valid)
+
+
+def print_table(rows):
+ print("\n| File | Original | Compressed | Saved % | Valid |")
+ print("|------|----------|------------|---------|-------|")
+ for r in rows:
+ print(f"| {r[0]} | {r[1]} | {r[2]} | {r[3]:.1f}% | {'✅' if r[4] else '❌'} |")
+
+
+def main():
+ # Direct file pair: python3 benchmark.py original.md compressed.md
+ if len(sys.argv) == 3:
+ orig = Path(sys.argv[1]).resolve()
+ comp = Path(sys.argv[2]).resolve()
+ if not orig.exists():
+ print(f"❌ Not found: {orig}")
+ sys.exit(1)
+ if not comp.exists():
+ print(f"❌ Not found: {comp}")
+ sys.exit(1)
+ print_table([benchmark_pair(orig, comp)])
+ return
+
+ # Glob mode: repo_root/tests/caveman-compress/
+ tests_dir = Path(__file__).parent.parent.parent / "tests" / "caveman-compress"
+ if not tests_dir.exists():
+ print(f"❌ Tests dir not found: {tests_dir}")
+ sys.exit(1)
+
+ rows = []
+ for orig in sorted(tests_dir.glob("*.original.md")):
+ comp = orig.with_name(orig.stem.removesuffix(".original") + ".md")
+ if comp.exists():
+ rows.append(benchmark_pair(orig, comp))
+
+ if not rows:
+ print("No compressed file pairs found.")
+ return
+
+ print_table(rows)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.pi/skills/caveman-compress/scripts/cli.py b/.pi/skills/caveman-compress/scripts/cli.py
new file mode 100644
index 0000000..428fd86
--- /dev/null
+++ b/.pi/skills/caveman-compress/scripts/cli.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+"""
+Caveman Compress CLI
+
+Usage:
+ caveman
+"""
+
+import sys
+from pathlib import Path
+
+from .compress import compress_file
+from .detect import detect_file_type, should_compress
+
+
+def print_usage():
+ print("Usage: caveman ")
+
+
+def main():
+ if len(sys.argv) != 2:
+ print_usage()
+ sys.exit(1)
+
+ filepath = Path(sys.argv[1])
+
+ # Check file exists
+ if not filepath.exists():
+ print(f"❌ File not found: {filepath}")
+ sys.exit(1)
+
+ if not filepath.is_file():
+ print(f"❌ Not a file: {filepath}")
+ sys.exit(1)
+
+ filepath = filepath.resolve()
+
+ # Detect file type
+ file_type = detect_file_type(filepath)
+
+ print(f"Detected: {file_type}")
+
+ # Check if compressible
+ if not should_compress(filepath):
+ print("Skipping: file is not natural language (code/config)")
+ sys.exit(0)
+
+ print("Starting caveman compression...\n")
+
+ try:
+ success = compress_file(filepath)
+
+ if success:
+ print("\nCompression completed successfully")
+ backup_path = filepath.with_name(filepath.stem + ".original.md")
+ print(f"Compressed: {filepath}")
+ print(f"Original: {backup_path}")
+ sys.exit(0)
+ else:
+ print("\n❌ Compression failed after retries")
+ sys.exit(2)
+
+ except KeyboardInterrupt:
+ print("\nInterrupted by user")
+ sys.exit(130)
+
+ except Exception as e:
+ print(f"\n❌ Error: {e}")
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.pi/skills/caveman-compress/scripts/compress.py b/.pi/skills/caveman-compress/scripts/compress.py
new file mode 100644
index 0000000..1622a7a
--- /dev/null
+++ b/.pi/skills/caveman-compress/scripts/compress.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python3
+"""
+Caveman Memory Compression Orchestrator
+
+Usage:
+ python scripts/compress.py
+"""
+
+import os
+import re
+import subprocess
+from pathlib import Path
+from typing import List
+
+OUTER_FENCE_REGEX = re.compile(
+ r"\A\s*(`{3,}|~{3,})[^\n]*\n(.*)\n\1\s*\Z", re.DOTALL
+)
+
+
+def strip_llm_wrapper(text: str) -> str:
+ """Strip outer ```markdown ... ``` fence when it wraps the entire output."""
+ m = OUTER_FENCE_REGEX.match(text)
+ if m:
+ return m.group(2)
+ return text
+
+from .detect import should_compress
+from .validate import validate
+
+MAX_RETRIES = 2
+
+
+# ---------- Claude Calls ----------
+
+
+def call_claude(prompt: str) -> str:
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
+ if api_key:
+ try:
+ import anthropic
+
+ client = anthropic.Anthropic(api_key=api_key)
+ msg = client.messages.create(
+ model=os.environ.get("CAVEMAN_MODEL", "claude-sonnet-4-5"),
+ max_tokens=8192,
+ messages=[{"role": "user", "content": prompt}],
+ )
+ return strip_llm_wrapper(msg.content[0].text.strip())
+ except ImportError:
+ pass # anthropic not installed, fall back to CLI
+ # Fallback: use claude CLI (handles desktop auth)
+ try:
+ result = subprocess.run(
+ ["claude", "--print"],
+ input=prompt,
+ text=True,
+ capture_output=True,
+ check=True,
+ )
+ return strip_llm_wrapper(result.stdout.strip())
+ except subprocess.CalledProcessError as e:
+ raise RuntimeError(f"Claude call failed:\n{e.stderr}")
+
+
+def build_compress_prompt(original: str) -> str:
+ return f"""
+Compress this markdown into caveman format.
+
+STRICT RULES:
+- Do NOT modify anything inside ``` code blocks
+- Do NOT modify anything inside inline backticks
+- Preserve ALL URLs exactly
+- Preserve ALL headings exactly
+- Preserve file paths and commands
+- Return ONLY the compressed markdown body — do NOT wrap the entire output in a ```markdown fence or any other fence. Inner code blocks from the original stay as-is; do not add a new outer fence around the whole file.
+
+Only compress natural language.
+
+TEXT:
+{original}
+"""
+
+
+def build_fix_prompt(original: str, compressed: str, errors: List[str]) -> str:
+ errors_str = "\n".join(f"- {e}" for e in errors)
+ return f"""You are fixing a caveman-compressed markdown file. Specific validation errors were found.
+
+CRITICAL RULES:
+- DO NOT recompress or rephrase the file
+- ONLY fix the listed errors — leave everything else exactly as-is
+- The ORIGINAL is provided as reference only (to restore missing content)
+- Preserve caveman style in all untouched sections
+
+ERRORS TO FIX:
+{errors_str}
+
+HOW TO FIX:
+- Missing URL: find it in ORIGINAL, restore it exactly where it belongs in COMPRESSED
+- Code block mismatch: find the exact code block in ORIGINAL, restore it in COMPRESSED
+- Heading mismatch: restore the exact heading text from ORIGINAL into COMPRESSED
+- Do not touch any section not mentioned in the errors
+
+ORIGINAL (reference only):
+{original}
+
+COMPRESSED (fix this):
+{compressed}
+
+Return ONLY the fixed compressed file. No explanation.
+"""
+
+
+# ---------- Core Logic ----------
+
+
+def compress_file(filepath: Path) -> bool:
+ # Resolve and validate path
+ filepath = filepath.resolve()
+ MAX_FILE_SIZE = 500_000 # 500KB
+ if not filepath.exists():
+ raise FileNotFoundError(f"File not found: {filepath}")
+ if filepath.stat().st_size > MAX_FILE_SIZE:
+ raise ValueError(f"File too large to compress safely (max 500KB): {filepath}")
+
+ print(f"Processing: {filepath}")
+
+ if not should_compress(filepath):
+ print("Skipping (not natural language)")
+ return False
+
+ original_text = filepath.read_text(errors="ignore")
+ backup_path = filepath.with_name(filepath.stem + ".original.md")
+
+ # Check if backup already exists to prevent accidental overwriting
+ if backup_path.exists():
+ print(f"⚠️ Backup file already exists: {backup_path}")
+ print("The original backup may contain important content.")
+ print("Aborting to prevent data loss. Please remove or rename the backup file if you want to proceed.")
+ return False
+
+ # Step 1: Compress
+ print("Compressing with Claude...")
+ compressed = call_claude(build_compress_prompt(original_text))
+
+ # Save original as backup, write compressed to original path
+ backup_path.write_text(original_text)
+ filepath.write_text(compressed)
+
+ # Step 2: Validate + Retry
+ for attempt in range(MAX_RETRIES):
+ print(f"\nValidation attempt {attempt + 1}")
+
+ result = validate(backup_path, filepath)
+
+ if result.is_valid:
+ print("Validation passed")
+ break
+
+ print("❌ Validation failed:")
+ for err in result.errors:
+ print(f" - {err}")
+
+ if attempt == MAX_RETRIES - 1:
+ # Restore original on failure
+ filepath.write_text(original_text)
+ backup_path.unlink(missing_ok=True)
+ print("❌ Failed after retries — original restored")
+ return False
+
+ print("Fixing with Claude...")
+ compressed = call_claude(
+ build_fix_prompt(original_text, compressed, result.errors)
+ )
+ filepath.write_text(compressed)
+
+ return True
diff --git a/.pi/skills/caveman-compress/scripts/detect.py b/.pi/skills/caveman-compress/scripts/detect.py
new file mode 100644
index 0000000..5f50fd3
--- /dev/null
+++ b/.pi/skills/caveman-compress/scripts/detect.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+"""Detect whether a file is natural language (compressible) or code/config (skip)."""
+
+import json
+import re
+from pathlib import Path
+
+# Extensions that are natural language and compressible
+COMPRESSIBLE_EXTENSIONS = {".md", ".txt", ".markdown", ".rst"}
+
+# Extensions that are code/config and should be skipped
+SKIP_EXTENSIONS = {
+ ".py", ".js", ".ts", ".tsx", ".jsx", ".json", ".yaml", ".yml",
+ ".toml", ".env", ".lock", ".css", ".scss", ".html", ".xml",
+ ".sql", ".sh", ".bash", ".zsh", ".go", ".rs", ".java", ".c",
+ ".cpp", ".h", ".hpp", ".rb", ".php", ".swift", ".kt", ".lua",
+ ".dockerfile", ".makefile", ".csv", ".ini", ".cfg",
+}
+
+# Patterns that indicate a line is code
+CODE_PATTERNS = [
+ re.compile(r"^\s*(import |from .+ import |require\(|const |let |var )"),
+ re.compile(r"^\s*(def |class |function |async function |export )"),
+ re.compile(r"^\s*(if\s*\(|for\s*\(|while\s*\(|switch\s*\(|try\s*\{)"),
+ re.compile(r"^\s*[\}\]\);]+\s*$"), # closing braces/brackets
+ re.compile(r"^\s*@\w+"), # decorators/annotations
+ re.compile(r'^\s*"[^"]+"\s*:\s*'), # JSON-like key-value
+ re.compile(r"^\s*\w+\s*=\s*[{\[\(\"']"), # assignment with literal
+]
+
+
+def _is_code_line(line: str) -> bool:
+ """Check if a line looks like code."""
+ return any(p.match(line) for p in CODE_PATTERNS)
+
+
+def _is_json_content(text: str) -> bool:
+ """Check if content is valid JSON."""
+ try:
+ json.loads(text)
+ return True
+ except (json.JSONDecodeError, ValueError):
+ return False
+
+
+def _is_yaml_content(lines: list[str]) -> bool:
+ """Heuristic: check if content looks like YAML."""
+ yaml_indicators = 0
+ for line in lines[:30]:
+ stripped = line.strip()
+ if stripped.startswith("---"):
+ yaml_indicators += 1
+ elif re.match(r"^\w[\w\s]*:\s", stripped):
+ yaml_indicators += 1
+ elif stripped.startswith("- ") and ":" in stripped:
+ yaml_indicators += 1
+ # If most non-empty lines look like YAML
+ non_empty = sum(1 for l in lines[:30] if l.strip())
+ return non_empty > 0 and yaml_indicators / non_empty > 0.6
+
+
+def detect_file_type(filepath: Path) -> str:
+ """Classify a file as 'natural_language', 'code', 'config', or 'unknown'.
+
+ Returns:
+ One of: 'natural_language', 'code', 'config', 'unknown'
+ """
+ ext = filepath.suffix.lower()
+
+ # Extension-based classification
+ if ext in COMPRESSIBLE_EXTENSIONS:
+ return "natural_language"
+ if ext in SKIP_EXTENSIONS:
+ return "code" if ext not in {".json", ".yaml", ".yml", ".toml", ".ini", ".cfg", ".env"} else "config"
+
+ # Extensionless files (like CLAUDE.md, TODO) — check content
+ if not ext:
+ try:
+ text = filepath.read_text(errors="ignore")
+ except (OSError, PermissionError):
+ return "unknown"
+
+ lines = text.splitlines()[:50]
+
+ if _is_json_content(text[:10000]):
+ return "config"
+ if _is_yaml_content(lines):
+ return "config"
+
+ code_lines = sum(1 for l in lines if l.strip() and _is_code_line(l))
+ non_empty = sum(1 for l in lines if l.strip())
+ if non_empty > 0 and code_lines / non_empty > 0.4:
+ return "code"
+
+ return "natural_language"
+
+ return "unknown"
+
+
+def should_compress(filepath: Path) -> bool:
+ """Return True if the file is natural language and should be compressed."""
+ if not filepath.is_file():
+ return False
+ # Skip backup files
+ if filepath.name.endswith(".original.md"):
+ return False
+ return detect_file_type(filepath) == "natural_language"
+
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) < 2:
+ print("Usage: python detect.py [file2] ...")
+ sys.exit(1)
+
+ for path_str in sys.argv[1:]:
+ p = Path(path_str).resolve()
+ file_type = detect_file_type(p)
+ compress = should_compress(p)
+ print(f" {p.name:30s} type={file_type:20s} compress={compress}")
diff --git a/.pi/skills/caveman-compress/scripts/validate.py b/.pi/skills/caveman-compress/scripts/validate.py
new file mode 100644
index 0000000..3c4d4c1
--- /dev/null
+++ b/.pi/skills/caveman-compress/scripts/validate.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python3
+import re
+from pathlib import Path
+
+URL_REGEX = re.compile(r"https?://[^\s)]+")
+FENCE_OPEN_REGEX = re.compile(r"^(\s{0,3})(`{3,}|~{3,})(.*)$")
+HEADING_REGEX = re.compile(r"^(#{1,6})\s+(.*)", re.MULTILINE)
+BULLET_REGEX = re.compile(r"^\s*[-*+]\s+", re.MULTILINE)
+
+# crude but effective path detection
+# Requires either a path prefix (./ ../ / or drive letter) or a slash/backslash within the match
+PATH_REGEX = re.compile(r"(?:\./|\.\./|/|[A-Za-z]:\\)[\w\-/\\\.]+|[\w\-\.]+[/\\][\w\-/\\\.]+")
+
+
+class ValidationResult:
+ def __init__(self):
+ self.is_valid = True
+ self.errors = []
+ self.warnings = []
+
+ def add_error(self, msg):
+ self.is_valid = False
+ self.errors.append(msg)
+
+ def add_warning(self, msg):
+ self.warnings.append(msg)
+
+
+def read_file(path: Path) -> str:
+ return path.read_text(errors="ignore")
+
+
+# ---------- Extractors ----------
+
+
+def extract_headings(text):
+ return [(level, title.strip()) for level, title in HEADING_REGEX.findall(text)]
+
+
+def extract_code_blocks(text):
+ """Line-based fenced code block extractor.
+
+ Handles ``` and ~~~ fences with variable length (CommonMark: closing
+ fence must use same char and be at least as long as opening). Supports
+ nested fences (e.g. an outer 4-backtick block wrapping inner 3-backtick
+ content).
+ """
+ blocks = []
+ lines = text.split("\n")
+ i = 0
+ n = len(lines)
+ while i < n:
+ m = FENCE_OPEN_REGEX.match(lines[i])
+ if not m:
+ i += 1
+ continue
+ fence_char = m.group(2)[0]
+ fence_len = len(m.group(2))
+ open_line = lines[i]
+ block_lines = [open_line]
+ i += 1
+ closed = False
+ while i < n:
+ close_m = FENCE_OPEN_REGEX.match(lines[i])
+ if (
+ close_m
+ and close_m.group(2)[0] == fence_char
+ and len(close_m.group(2)) >= fence_len
+ and close_m.group(3).strip() == ""
+ ):
+ block_lines.append(lines[i])
+ closed = True
+ i += 1
+ break
+ block_lines.append(lines[i])
+ i += 1
+ if closed:
+ blocks.append("\n".join(block_lines))
+ # Unclosed fences are silently skipped — they indicate malformed markdown
+ # and including them would cause false-positive validation failures.
+ return blocks
+
+
+def extract_urls(text):
+ return set(URL_REGEX.findall(text))
+
+
+def extract_paths(text):
+ return set(PATH_REGEX.findall(text))
+
+
+def count_bullets(text):
+ return len(BULLET_REGEX.findall(text))
+
+
+# ---------- Validators ----------
+
+
+def validate_headings(orig, comp, result):
+ h1 = extract_headings(orig)
+ h2 = extract_headings(comp)
+
+ if len(h1) != len(h2):
+ result.add_error(f"Heading count mismatch: {len(h1)} vs {len(h2)}")
+
+ if h1 != h2:
+ result.add_warning("Heading text/order changed")
+
+
+def validate_code_blocks(orig, comp, result):
+ c1 = extract_code_blocks(orig)
+ c2 = extract_code_blocks(comp)
+
+ if c1 != c2:
+ result.add_error("Code blocks not preserved exactly")
+
+
+def validate_urls(orig, comp, result):
+ u1 = extract_urls(orig)
+ u2 = extract_urls(comp)
+
+ if u1 != u2:
+ result.add_error(f"URL mismatch: lost={u1 - u2}, added={u2 - u1}")
+
+
+def validate_paths(orig, comp, result):
+ p1 = extract_paths(orig)
+ p2 = extract_paths(comp)
+
+ if p1 != p2:
+ result.add_warning(f"Path mismatch: lost={p1 - p2}, added={p2 - p1}")
+
+
+def validate_bullets(orig, comp, result):
+ b1 = count_bullets(orig)
+ b2 = count_bullets(comp)
+
+ if b1 == 0:
+ return
+
+ diff = abs(b1 - b2) / b1
+
+ if diff > 0.15:
+ result.add_warning(f"Bullet count changed too much: {b1} -> {b2}")
+
+
+# ---------- Main ----------
+
+
+def validate(original_path: Path, compressed_path: Path) -> ValidationResult:
+ result = ValidationResult()
+
+ orig = read_file(original_path)
+ comp = read_file(compressed_path)
+
+ validate_headings(orig, comp, result)
+ validate_code_blocks(orig, comp, result)
+ validate_urls(orig, comp, result)
+ validate_paths(orig, comp, result)
+ validate_bullets(orig, comp, result)
+
+ return result
+
+
+# ---------- CLI ----------
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) != 3:
+ print("Usage: python validate.py ")
+ sys.exit(1)
+
+ orig = Path(sys.argv[1]).resolve()
+ comp = Path(sys.argv[2]).resolve()
+
+ res = validate(orig, comp)
+
+ print(f"\nValid: {res.is_valid}")
+
+ if res.errors:
+ print("\nErrors:")
+ for e in res.errors:
+ print(f" - {e}")
+
+ if res.warnings:
+ print("\nWarnings:")
+ for w in res.warnings:
+ print(f" - {w}")
diff --git a/.pi/skills/caveman-help/SKILL.md b/.pi/skills/caveman-help/SKILL.md
new file mode 100644
index 0000000..078e487
--- /dev/null
+++ b/.pi/skills/caveman-help/SKILL.md
@@ -0,0 +1,59 @@
+---
+name: caveman-help
+description: >
+ Quick-reference card for all caveman modes, skills, and commands.
+ One-shot display, not a persistent mode. Trigger: /caveman-help,
+ "caveman help", "what caveman commands", "how do I use caveman".
+---
+
+# Caveman Help
+
+Display this reference card when invoked. One-shot — do NOT change mode, write flag files, or persist anything. Output in caveman style.
+
+## Modes
+
+| Mode | Trigger | What change |
+|------|---------|-------------|
+| **Lite** | `/caveman lite` | Drop filler. Keep sentence structure. |
+| **Full** | `/caveman` | Drop articles, filler, pleasantries, hedging. Fragments OK. Default. |
+| **Ultra** | `/caveman ultra` | Extreme compression. Bare fragments. Tables over prose. |
+| **Wenyan-Lite** | `/caveman wenyan-lite` | Classical Chinese style, light compression. |
+| **Wenyan-Full** | `/caveman wenyan` | Full 文言文. Maximum classical terseness. |
+| **Wenyan-Ultra** | `/caveman wenyan-ultra` | Extreme. Ancient scholar on a budget. |
+
+Mode stick until changed or session end.
+
+## Skills
+
+| Skill | Trigger | What it do |
+|-------|---------|-----------|
+| **caveman-commit** | `/caveman-commit` | Terse commit messages. Conventional Commits. ≤50 char subject. |
+| **caveman-review** | `/caveman-review` | One-line PR comments: `L42: bug: user null. Add guard.` |
+| **caveman-compress** | `/caveman:compress ` | Compress .md files to caveman prose. Saves ~46% input tokens. |
+| **caveman-help** | `/caveman-help` | This card. |
+
+## Deactivate
+
+Say "stop caveman" or "normal mode". Resume anytime with `/caveman`.
+
+## Configure Default Mode
+
+Default mode = `full`. Change it:
+
+**Environment variable** (highest priority):
+```bash
+export CAVEMAN_DEFAULT_MODE=ultra
+```
+
+**Config file** (`~/.config/caveman/config.json`):
+```json
+{ "defaultMode": "lite" }
+```
+
+Set `"off"` to disable auto-activation on session start. User can still activate manually with `/caveman`.
+
+Resolution: env var > config file > `full`.
+
+## More
+
+Full docs: https://github.com/JuliusBrussee/caveman
diff --git a/.pi/skills/caveman-review/SKILL.md b/.pi/skills/caveman-review/SKILL.md
new file mode 100644
index 0000000..48f4adb
--- /dev/null
+++ b/.pi/skills/caveman-review/SKILL.md
@@ -0,0 +1,55 @@
+---
+name: caveman-review
+description: >
+ Ultra-compressed code review comments. Cuts noise from PR feedback while preserving
+ the actionable signal. Each comment is one line: location, problem, fix. Use when user
+ says "review this PR", "code review", "review the diff", "/review", or invokes
+ /caveman-review. Auto-triggers when reviewing pull requests.
+---
+
+Write code review comments terse and actionable. One line per finding. Location, problem, fix. No throat-clearing.
+
+## Rules
+
+**Format:** `L: . .` — or `:L: ...` when reviewing multi-file diffs.
+
+**Severity prefix (optional, when mixed):**
+- `🔴 bug:` — broken behavior, will cause incident
+- `🟡 risk:` — works but fragile (race, missing null check, swallowed error)
+- `🔵 nit:` — style, naming, micro-optim. Author can ignore
+- `❓ q:` — genuine question, not a suggestion
+
+**Drop:**
+- "I noticed that...", "It seems like...", "You might want to consider..."
+- "This is just a suggestion but..." — use `nit:` instead
+- "Great work!", "Looks good overall but..." — say it once at the top, not per comment
+- Restating what the line does — the reviewer can read the diff
+- Hedging ("perhaps", "maybe", "I think") — if unsure use `q:`
+
+**Keep:**
+- Exact line numbers
+- Exact symbol/function/variable names in backticks
+- Concrete fix, not "consider refactoring this"
+- The *why* if the fix isn't obvious from the problem statement
+
+## Examples
+
+❌ "I noticed that on line 42 you're not checking if the user object is null before accessing the email property. This could potentially cause a crash if the user is not found in the database. You might want to add a null check here."
+
+✅ `L42: 🔴 bug: user can be null after .find(). Add guard before .email.`
+
+❌ "It looks like this function is doing a lot of things and might benefit from being broken up into smaller functions for readability."
+
+✅ `L88-140: 🔵 nit: 50-line fn does 4 things. Extract validate/normalize/persist.`
+
+❌ "Have you considered what happens if the API returns a 429? I think we should probably handle that case."
+
+✅ `L23: 🟡 risk: no retry on 429. Wrap in withBackoff(3).`
+
+## Auto-Clarity
+
+Drop terse mode for: security findings (CVE-class bugs need full explanation + reference), architectural disagreements (need rationale, not just a one-liner), and onboarding contexts where the author is new and needs the "why". In those cases write a normal paragraph, then resume terse for the rest.
+
+## Boundaries
+
+Reviews only — does not write the code fix, does not approve/request-changes, does not run linters. Output the comment(s) ready to paste into the PR. "stop caveman-review" or "normal mode": revert to verbose review style.
\ No newline at end of file
diff --git a/.pi/skills/caveman/SKILL.md b/.pi/skills/caveman/SKILL.md
new file mode 100644
index 0000000..2ab498b
--- /dev/null
+++ b/.pi/skills/caveman/SKILL.md
@@ -0,0 +1,67 @@
+---
+name: caveman
+description: >
+ Ultra-compressed communication mode. Cuts token usage ~75% by speaking like caveman
+ while keeping full technical accuracy. Supports intensity levels: lite, full (default), ultra,
+ wenyan-lite, wenyan-full, wenyan-ultra.
+ Use when user says "caveman mode", "talk like caveman", "use caveman", "less tokens",
+ "be brief", or invokes /caveman. Also auto-triggers when token efficiency is requested.
+---
+
+Respond terse like smart caveman. All technical substance stay. Only fluff die.
+
+## Persistence
+
+ACTIVE EVERY RESPONSE. No revert after many turns. No filler drift. Still active if unsure. Off only: "stop caveman" / "normal mode".
+
+Default: **full**. Switch: `/caveman lite|full|ultra`.
+
+## Rules
+
+Drop: articles (a/an/the), filler (just/really/basically/actually/simply), pleasantries (sure/certainly/of course/happy to), hedging. Fragments OK. Short synonyms (big not extensive, fix not "implement a solution for"). Technical terms exact. Code blocks unchanged. Errors quoted exact.
+
+Pattern: `[thing] [action] [reason]. [next step].`
+
+Not: "Sure! I'd be happy to help you with that. The issue you're experiencing is likely caused by..."
+Yes: "Bug in auth middleware. Token expiry check use `<` not `<=`. Fix:"
+
+## Intensity
+
+| Level | What change |
+|-------|------------|
+| **lite** | No filler/hedging. Keep articles + full sentences. Professional but tight |
+| **full** | Drop articles, fragments OK, short synonyms. Classic caveman |
+| **ultra** | Abbreviate (DB/auth/config/req/res/fn/impl), strip conjunctions, arrows for causality (X → Y), one word when one word enough |
+| **wenyan-lite** | Semi-classical. Drop filler/hedging but keep grammar structure, classical register |
+| **wenyan-full** | Maximum classical terseness. Fully 文言文. 80-90% character reduction. Classical sentence patterns, verbs precede objects, subjects often omitted, classical particles (之/乃/為/其) |
+| **wenyan-ultra** | Extreme abbreviation while keeping classical Chinese feel. Maximum compression, ultra terse |
+
+Example — "Why React component re-render?"
+- lite: "Your component re-renders because you create a new object reference each render. Wrap it in `useMemo`."
+- full: "New object ref each render. Inline object prop = new ref = re-render. Wrap in `useMemo`."
+- ultra: "Inline obj prop → new ref → re-render. `useMemo`."
+- wenyan-lite: "組件頻重繪,以每繪新生對象參照故。以 useMemo 包之。"
+- wenyan-full: "物出新參照,致重繪。useMemo .Wrap之。"
+- wenyan-ultra: "新參照→重繪。useMemo Wrap。"
+
+Example — "Explain database connection pooling."
+- lite: "Connection pooling reuses open connections instead of creating new ones per request. Avoids repeated handshake overhead."
+- full: "Pool reuse open DB connections. No new connection per request. Skip handshake overhead."
+- ultra: "Pool = reuse DB conn. Skip handshake → fast under load."
+- wenyan-full: "池reuse open connection。不每req新開。skip handshake overhead。"
+- wenyan-ultra: "池reuse conn。skip handshake → fast。"
+
+## Auto-Clarity
+
+Drop caveman for: security warnings, irreversible action confirmations, multi-step sequences where fragment order risks misread, user asks to clarify or repeats question. Resume caveman after clear part done.
+
+Example — destructive op:
+> **Warning:** This will permanently delete all rows in the `users` table and cannot be undone.
+> ```sql
+> DROP TABLE users;
+> ```
+> Caveman resume. Verify backup exist first.
+
+## Boundaries
+
+Code/commits/PRs: write normal. "stop caveman" or "normal mode": revert. Level persist until changed or session end.
\ No newline at end of file
diff --git a/.trae/skills/caveman-commit/SKILL.md b/.trae/skills/caveman-commit/SKILL.md
new file mode 100644
index 0000000..729318c
--- /dev/null
+++ b/.trae/skills/caveman-commit/SKILL.md
@@ -0,0 +1,65 @@
+---
+name: caveman-commit
+description: >
+ Ultra-compressed commit message generator. Cuts noise from commit messages while preserving
+ intent and reasoning. Conventional Commits format. Subject ≤50 chars, body only when "why"
+ isn't obvious. Use when user says "write a commit", "commit message", "generate commit",
+ "/commit", or invokes /caveman-commit. Auto-triggers when staging changes.
+---
+
+Write commit messages terse and exact. Conventional Commits format. No fluff. Why over what.
+
+## Rules
+
+**Subject line:**
+- `(): ` — `` optional
+- Types: `feat`, `fix`, `refactor`, `perf`, `docs`, `test`, `chore`, `build`, `ci`, `style`, `revert`
+- Imperative mood: "add", "fix", "remove" — not "added", "adds", "adding"
+- ≤50 chars when possible, hard cap 72
+- No trailing period
+- Match project convention for capitalization after the colon
+
+**Body (only if needed):**
+- Skip entirely when subject is self-explanatory
+- Add body only for: non-obvious *why*, breaking changes, migration notes, linked issues
+- Wrap at 72 chars
+- Bullets `-` not `*`
+- Reference issues/PRs at end: `Closes #42`, `Refs #17`
+
+**What NEVER goes in:**
+- "This commit does X", "I", "we", "now", "currently" — the diff says what
+- "As requested by..." — use Co-authored-by trailer
+- "Generated with Claude Code" or any AI attribution
+- Emoji (unless project convention requires)
+- Restating the file name when scope already says it
+
+## Examples
+
+Diff: new endpoint for user profile with body explaining the why
+- ❌ "feat: add a new endpoint to get user profile information from the database"
+- ✅
+ ```
+ feat(api): add GET /users/:id/profile
+
+ Mobile client needs profile data without the full user payload
+ to reduce LTE bandwidth on cold-launch screens.
+
+ Closes #128
+ ```
+
+Diff: breaking API change
+- ✅
+ ```
+ feat(api)!: rename /v1/orders to /v1/checkout
+
+ BREAKING CHANGE: clients on /v1/orders must migrate to /v1/checkout
+ before 2026-06-01. Old route returns 410 after that date.
+ ```
+
+## Auto-Clarity
+
+Always include body for: breaking changes, security fixes, data migrations, anything reverting a prior commit. Never compress these into subject-only — future debuggers need the context.
+
+## Boundaries
+
+Only generates the commit message. Does not run `git commit`, does not stage files, does not amend. Output the message as a code block ready to paste. "stop caveman-commit" or "normal mode": revert to verbose commit style.
\ No newline at end of file
diff --git a/.trae/skills/caveman-compress/README.md b/.trae/skills/caveman-compress/README.md
new file mode 100644
index 0000000..7c0e8ba
--- /dev/null
+++ b/.trae/skills/caveman-compress/README.md
@@ -0,0 +1,163 @@
+
+
+
+
+caveman-compress
+
+
+ shrink memory file. save token every session.
+
+
+---
+
+A Claude Code skill that compresses your project memory files (`CLAUDE.md`, todos, preferences) into caveman format — so every session loads fewer tokens automatically.
+
+Claude read `CLAUDE.md` on every session start. If file big, cost big. Caveman make file small. Cost go down forever.
+
+## What It Do
+
+```
+/caveman:compress CLAUDE.md
+```
+
+```
+CLAUDE.md ← compressed (Claude reads this — fewer tokens every session)
+CLAUDE.original.md ← human-readable backup (you edit this)
+```
+
+Original never lost. You can read and edit `.original.md`. Run skill again to re-compress after edits.
+
+## Benchmarks
+
+Real results on real project files:
+
+| File | Original | Compressed | Saved |
+|------|----------:|----------:|------:|
+| `claude-md-preferences.md` | 706 | 285 | **59.6%** |
+| `project-notes.md` | 1145 | 535 | **53.3%** |
+| `claude-md-project.md` | 1122 | 636 | **43.3%** |
+| `todo-list.md` | 627 | 388 | **38.1%** |
+| `mixed-with-code.md` | 888 | 560 | **36.9%** |
+| **Average** | **898** | **481** | **46%** |
+
+All validations passed ✅ — headings, code blocks, URLs, file paths preserved exactly.
+
+## Before / After
+
+
+
+
+
+### 📄 Original (706 tokens)
+
+> "I strongly prefer TypeScript with strict mode enabled for all new code. Please don't use `any` type unless there's genuinely no way around it, and if you do, leave a comment explaining the reasoning. I find that taking the time to properly type things catches a lot of bugs before they ever make it to runtime."
+
+
+
+
+### 🪨 Caveman (285 tokens)
+
+> "Prefer TypeScript strict mode always. No `any` unless unavoidable — comment why if used. Proper types catch bugs early."
+
+
+
+
+
+**Same instructions. 60% fewer tokens. Every. Single. Session.**
+
+## Security
+
+`caveman-compress` is flagged as Snyk High Risk due to subprocess and file I/O patterns detected by static analysis. This is a false positive — see [SECURITY.md](./SECURITY.md) for a full explanation of what the skill does and does not do.
+
+## Install
+
+Compress is built in with the `caveman` plugin. Install `caveman` once, then use `/caveman:compress`.
+
+If you need local files, the compress skill lives at:
+
+```bash
+caveman-compress/
+```
+
+**Requires:** Python 3.10+
+
+## Usage
+
+```
+/caveman:compress
+```
+
+Examples:
+```
+/caveman:compress CLAUDE.md
+/caveman:compress docs/preferences.md
+/caveman:compress todos.md
+```
+
+### What files work
+
+| Type | Compress? |
+|------|-----------|
+| `.md`, `.txt`, `.rst` | ✅ Yes |
+| Extensionless natural language | ✅ Yes |
+| `.py`, `.js`, `.ts`, `.json`, `.yaml` | ❌ Skip (code/config) |
+| `*.original.md` | ❌ Skip (backup files) |
+
+## How It Work
+
+```
+/caveman:compress CLAUDE.md
+ ↓
+detect file type (no tokens)
+ ↓
+Claude compresses (tokens — one call)
+ ↓
+validate output (no tokens)
+ checks: headings, code blocks, URLs, file paths, bullets
+ ↓
+if errors: Claude fixes cherry-picked issues only (tokens — targeted fix)
+ does NOT recompress — only patches broken parts
+ ↓
+retry up to 2 times
+ ↓
+write compressed → CLAUDE.md
+write original → CLAUDE.original.md
+```
+
+Only two things use tokens: initial compression + targeted fix if validation fails. Everything else is local Python.
+
+## What Is Preserved
+
+Caveman compress natural language. It never touch:
+
+- Code blocks (` ``` ` fenced or indented)
+- Inline code (`` `backtick content` ``)
+- URLs and links
+- File paths (`/src/components/...`)
+- Commands (`npm install`, `git commit`)
+- Technical terms, library names, API names
+- Headings (exact text preserved)
+- Tables (structure preserved, cell text compressed)
+- Dates, version numbers, numeric values
+
+## Why This Matter
+
+`CLAUDE.md` loads on **every session start**. A 1000-token project memory file costs tokens every single time you open a project. Over 100 sessions that's 100,000 tokens of overhead — just for context you already wrote.
+
+Caveman cut that by ~46% on average. Same instructions. Same accuracy. Less waste.
+
+```
+┌────────────────────────────────────────────┐
+│ TOKEN SAVINGS PER FILE █████ 46% │
+│ SESSIONS THAT BENEFIT ██████████ 100% │
+│ INFORMATION PRESERVED ██████████ 100% │
+│ SETUP TIME █ 1x │
+└────────────────────────────────────────────┘
+```
+
+## Part of Caveman
+
+This skill is part of the [caveman](https://github.com/JuliusBrussee/caveman) toolkit — making Claude use fewer tokens without losing accuracy.
+
+- **caveman** — make Claude *speak* like caveman (cuts response tokens ~65%)
+- **caveman-compress** — make Claude *read* less (cuts context tokens ~46%)
diff --git a/.trae/skills/caveman-compress/SECURITY.md b/.trae/skills/caveman-compress/SECURITY.md
new file mode 100644
index 0000000..693108c
--- /dev/null
+++ b/.trae/skills/caveman-compress/SECURITY.md
@@ -0,0 +1,31 @@
+# Security
+
+## Snyk High Risk Rating
+
+`caveman-compress` receives a Snyk High Risk rating due to static analysis heuristics. This document explains what the skill does and does not do.
+
+### What triggers the rating
+
+1. **subprocess usage**: The skill calls the `claude` CLI via `subprocess.run()` as a fallback when `ANTHROPIC_API_KEY` is not set. The subprocess call uses a fixed argument list — no shell interpolation occurs. User file content is passed via stdin, not as a shell argument.
+
+2. **File read/write**: The skill reads the file the user explicitly points it at, compresses it, and writes the result back to the same path. A `.original.md` backup is saved alongside it. No files outside the user-specified path are read or written.
+
+### What the skill does NOT do
+
+- Does not execute user file content as code
+- Does not make network requests except to Anthropic's API (via SDK or CLI)
+- Does not access files outside the path the user provides
+- Does not use shell=True or string interpolation in subprocess calls
+- Does not collect or transmit any data beyond the file being compressed
+
+### Auth behavior
+
+If `ANTHROPIC_API_KEY` is set, the skill uses the Anthropic Python SDK directly (no subprocess). If not set, it falls back to the `claude` CLI, which uses the user's existing Claude desktop authentication.
+
+### File size limit
+
+Files larger than 500KB are rejected before any API call is made.
+
+### Reporting a vulnerability
+
+If you believe you've found a genuine security issue, please open a GitHub issue with the label `security`.
diff --git a/.trae/skills/caveman-compress/SKILL.md b/.trae/skills/caveman-compress/SKILL.md
new file mode 100644
index 0000000..7b3e3aa
--- /dev/null
+++ b/.trae/skills/caveman-compress/SKILL.md
@@ -0,0 +1,111 @@
+---
+name: caveman-compress
+description: >
+ Compress natural language memory files (CLAUDE.md, todos, preferences) into caveman format
+ to save input tokens. Preserves all technical substance, code, URLs, and structure.
+ Compressed version overwrites the original file. Human-readable backup saved as FILE.original.md.
+ Trigger: /caveman:compress or "compress memory file"
+---
+
+# Caveman Compress
+
+## Purpose
+
+Compress natural language files (CLAUDE.md, todos, preferences) into caveman-speak to reduce input tokens. Compressed version overwrites original. Human-readable backup saved as `.original.md`.
+
+## Trigger
+
+`/caveman:compress ` or when user asks to compress a memory file.
+
+## Process
+
+1. The compression scripts live in `caveman-compress/scripts/` (adjacent to this SKILL.md). If the path is not immediately available, search for `caveman-compress/scripts/__main__.py`.
+
+2. Run:
+
+cd caveman-compress && python3 -m scripts
+
+3. The CLI will:
+- detect file type (no tokens)
+- call Claude to compress
+- validate output (no tokens)
+- if errors: cherry-pick fix with Claude (targeted fixes only, no recompression)
+- retry up to 2 times
+- if still failing after 2 retries: report error to user, leave original file untouched
+
+4. Return result to user
+
+## Compression Rules
+
+### Remove
+- Articles: a, an, the
+- Filler: just, really, basically, actually, simply, essentially, generally
+- Pleasantries: "sure", "certainly", "of course", "happy to", "I'd recommend"
+- Hedging: "it might be worth", "you could consider", "it would be good to"
+- Redundant phrasing: "in order to" → "to", "make sure to" → "ensure", "the reason is because" → "because"
+- Connective fluff: "however", "furthermore", "additionally", "in addition"
+
+### Preserve EXACTLY (never modify)
+- Code blocks (fenced ``` and indented)
+- Inline code (`backtick content`)
+- URLs and links (full URLs, markdown links)
+- File paths (`/src/components/...`, `./config.yaml`)
+- Commands (`npm install`, `git commit`, `docker build`)
+- Technical terms (library names, API names, protocols, algorithms)
+- Proper nouns (project names, people, companies)
+- Dates, version numbers, numeric values
+- Environment variables (`$HOME`, `NODE_ENV`)
+
+### Preserve Structure
+- All markdown headings (keep exact heading text, compress body below)
+- Bullet point hierarchy (keep nesting level)
+- Numbered lists (keep numbering)
+- Tables (compress cell text, keep structure)
+- Frontmatter/YAML headers in markdown files
+
+### Compress
+- Use short synonyms: "big" not "extensive", "fix" not "implement a solution for", "use" not "utilize"
+- Fragments OK: "Run tests before commit" not "You should always run tests before committing"
+- Drop "you should", "make sure to", "remember to" — just state the action
+- Merge redundant bullets that say the same thing differently
+- Keep one example where multiple examples show the same pattern
+
+CRITICAL RULE:
+Anything inside ``` ... ``` must be copied EXACTLY.
+Do not:
+- remove comments
+- remove spacing
+- reorder lines
+- shorten commands
+- simplify anything
+
+Inline code (`...`) must be preserved EXACTLY.
+Do not modify anything inside backticks.
+
+If file contains code blocks:
+- Treat code blocks as read-only regions
+- Only compress text outside them
+- Do not merge sections around code
+
+## Pattern
+
+Original:
+> You should always make sure to run the test suite before pushing any changes to the main branch. This is important because it helps catch bugs early and prevents broken builds from being deployed to production.
+
+Compressed:
+> Run tests before push to main. Catch bugs early, prevent broken prod deploys.
+
+Original:
+> The application uses a microservices architecture with the following components. The API gateway handles all incoming requests and routes them to the appropriate service. The authentication service is responsible for managing user sessions and JWT tokens.
+
+Compressed:
+> Microservices architecture. API gateway route all requests to services. Auth service manage user sessions + JWT tokens.
+
+## Boundaries
+
+- ONLY compress natural language files (.md, .txt, extensionless)
+- NEVER modify: .py, .js, .ts, .json, .yaml, .yml, .toml, .env, .lock, .css, .html, .xml, .sql, .sh
+- If file has mixed content (prose + code), compress ONLY the prose sections
+- If unsure whether something is code or prose, leave it unchanged
+- Original file is backed up as FILE.original.md before overwriting
+- Never compress FILE.original.md (skip it)
diff --git a/.trae/skills/caveman-compress/scripts/__init__.py b/.trae/skills/caveman-compress/scripts/__init__.py
new file mode 100644
index 0000000..16b8c53
--- /dev/null
+++ b/.trae/skills/caveman-compress/scripts/__init__.py
@@ -0,0 +1,9 @@
+"""Caveman compress scripts.
+
+This package provides tools to compress natural language markdown files
+into caveman format to save input tokens.
+"""
+
+__all__ = ["cli", "compress", "detect", "validate"]
+
+__version__ = "1.0.0"
diff --git a/.trae/skills/caveman-compress/scripts/__main__.py b/.trae/skills/caveman-compress/scripts/__main__.py
new file mode 100644
index 0000000..4e28416
--- /dev/null
+++ b/.trae/skills/caveman-compress/scripts/__main__.py
@@ -0,0 +1,3 @@
+from .cli import main
+
+main()
diff --git a/.trae/skills/caveman-compress/scripts/benchmark.py b/.trae/skills/caveman-compress/scripts/benchmark.py
new file mode 100644
index 0000000..eac927d
--- /dev/null
+++ b/.trae/skills/caveman-compress/scripts/benchmark.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+from pathlib import Path
+import sys
+
+# Support both direct execution and module import
+try:
+ from .validate import validate
+except ImportError:
+ sys.path.insert(0, str(Path(__file__).parent))
+ from validate import validate
+
+try:
+ import tiktoken
+ _enc = tiktoken.get_encoding("o200k_base")
+except ImportError:
+ _enc = None
+
+
+def count_tokens(text):
+ if _enc is None:
+ return len(text.split()) # fallback: word count
+ return len(_enc.encode(text))
+
+
+def benchmark_pair(orig_path: Path, comp_path: Path):
+ orig_text = orig_path.read_text()
+ comp_text = comp_path.read_text()
+
+ orig_tokens = count_tokens(orig_text)
+ comp_tokens = count_tokens(comp_text)
+ saved = 100 * (orig_tokens - comp_tokens) / orig_tokens if orig_tokens > 0 else 0.0
+ result = validate(orig_path, comp_path)
+
+ return (comp_path.name, orig_tokens, comp_tokens, saved, result.is_valid)
+
+
+def print_table(rows):
+ print("\n| File | Original | Compressed | Saved % | Valid |")
+ print("|------|----------|------------|---------|-------|")
+ for r in rows:
+ print(f"| {r[0]} | {r[1]} | {r[2]} | {r[3]:.1f}% | {'✅' if r[4] else '❌'} |")
+
+
+def main():
+ # Direct file pair: python3 benchmark.py original.md compressed.md
+ if len(sys.argv) == 3:
+ orig = Path(sys.argv[1]).resolve()
+ comp = Path(sys.argv[2]).resolve()
+ if not orig.exists():
+ print(f"❌ Not found: {orig}")
+ sys.exit(1)
+ if not comp.exists():
+ print(f"❌ Not found: {comp}")
+ sys.exit(1)
+ print_table([benchmark_pair(orig, comp)])
+ return
+
+ # Glob mode: repo_root/tests/caveman-compress/
+ tests_dir = Path(__file__).parent.parent.parent / "tests" / "caveman-compress"
+ if not tests_dir.exists():
+ print(f"❌ Tests dir not found: {tests_dir}")
+ sys.exit(1)
+
+ rows = []
+ for orig in sorted(tests_dir.glob("*.original.md")):
+ comp = orig.with_name(orig.stem.removesuffix(".original") + ".md")
+ if comp.exists():
+ rows.append(benchmark_pair(orig, comp))
+
+ if not rows:
+ print("No compressed file pairs found.")
+ return
+
+ print_table(rows)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.trae/skills/caveman-compress/scripts/cli.py b/.trae/skills/caveman-compress/scripts/cli.py
new file mode 100644
index 0000000..428fd86
--- /dev/null
+++ b/.trae/skills/caveman-compress/scripts/cli.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+"""
+Caveman Compress CLI
+
+Usage:
+ caveman
+"""
+
+import sys
+from pathlib import Path
+
+from .compress import compress_file
+from .detect import detect_file_type, should_compress
+
+
+def print_usage():
+ print("Usage: caveman ")
+
+
+def main():
+ if len(sys.argv) != 2:
+ print_usage()
+ sys.exit(1)
+
+ filepath = Path(sys.argv[1])
+
+ # Check file exists
+ if not filepath.exists():
+ print(f"❌ File not found: {filepath}")
+ sys.exit(1)
+
+ if not filepath.is_file():
+ print(f"❌ Not a file: {filepath}")
+ sys.exit(1)
+
+ filepath = filepath.resolve()
+
+ # Detect file type
+ file_type = detect_file_type(filepath)
+
+ print(f"Detected: {file_type}")
+
+ # Check if compressible
+ if not should_compress(filepath):
+ print("Skipping: file is not natural language (code/config)")
+ sys.exit(0)
+
+ print("Starting caveman compression...\n")
+
+ try:
+ success = compress_file(filepath)
+
+ if success:
+ print("\nCompression completed successfully")
+ backup_path = filepath.with_name(filepath.stem + ".original.md")
+ print(f"Compressed: {filepath}")
+ print(f"Original: {backup_path}")
+ sys.exit(0)
+ else:
+ print("\n❌ Compression failed after retries")
+ sys.exit(2)
+
+ except KeyboardInterrupt:
+ print("\nInterrupted by user")
+ sys.exit(130)
+
+ except Exception as e:
+ print(f"\n❌ Error: {e}")
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.trae/skills/caveman-compress/scripts/compress.py b/.trae/skills/caveman-compress/scripts/compress.py
new file mode 100644
index 0000000..1622a7a
--- /dev/null
+++ b/.trae/skills/caveman-compress/scripts/compress.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python3
+"""
+Caveman Memory Compression Orchestrator
+
+Usage:
+ python scripts/compress.py
+"""
+
+import os
+import re
+import subprocess
+from pathlib import Path
+from typing import List
+
+OUTER_FENCE_REGEX = re.compile(
+ r"\A\s*(`{3,}|~{3,})[^\n]*\n(.*)\n\1\s*\Z", re.DOTALL
+)
+
+
+def strip_llm_wrapper(text: str) -> str:
+ """Strip outer ```markdown ... ``` fence when it wraps the entire output."""
+ m = OUTER_FENCE_REGEX.match(text)
+ if m:
+ return m.group(2)
+ return text
+
+from .detect import should_compress
+from .validate import validate
+
+MAX_RETRIES = 2
+
+
+# ---------- Claude Calls ----------
+
+
+def call_claude(prompt: str) -> str:
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
+ if api_key:
+ try:
+ import anthropic
+
+ client = anthropic.Anthropic(api_key=api_key)
+ msg = client.messages.create(
+ model=os.environ.get("CAVEMAN_MODEL", "claude-sonnet-4-5"),
+ max_tokens=8192,
+ messages=[{"role": "user", "content": prompt}],
+ )
+ return strip_llm_wrapper(msg.content[0].text.strip())
+ except ImportError:
+ pass # anthropic not installed, fall back to CLI
+ # Fallback: use claude CLI (handles desktop auth)
+ try:
+ result = subprocess.run(
+ ["claude", "--print"],
+ input=prompt,
+ text=True,
+ capture_output=True,
+ check=True,
+ )
+ return strip_llm_wrapper(result.stdout.strip())
+ except subprocess.CalledProcessError as e:
+ raise RuntimeError(f"Claude call failed:\n{e.stderr}")
+
+
+def build_compress_prompt(original: str) -> str:
+ return f"""
+Compress this markdown into caveman format.
+
+STRICT RULES:
+- Do NOT modify anything inside ``` code blocks
+- Do NOT modify anything inside inline backticks
+- Preserve ALL URLs exactly
+- Preserve ALL headings exactly
+- Preserve file paths and commands
+- Return ONLY the compressed markdown body — do NOT wrap the entire output in a ```markdown fence or any other fence. Inner code blocks from the original stay as-is; do not add a new outer fence around the whole file.
+
+Only compress natural language.
+
+TEXT:
+{original}
+"""
+
+
+def build_fix_prompt(original: str, compressed: str, errors: List[str]) -> str:
+ errors_str = "\n".join(f"- {e}" for e in errors)
+ return f"""You are fixing a caveman-compressed markdown file. Specific validation errors were found.
+
+CRITICAL RULES:
+- DO NOT recompress or rephrase the file
+- ONLY fix the listed errors — leave everything else exactly as-is
+- The ORIGINAL is provided as reference only (to restore missing content)
+- Preserve caveman style in all untouched sections
+
+ERRORS TO FIX:
+{errors_str}
+
+HOW TO FIX:
+- Missing URL: find it in ORIGINAL, restore it exactly where it belongs in COMPRESSED
+- Code block mismatch: find the exact code block in ORIGINAL, restore it in COMPRESSED
+- Heading mismatch: restore the exact heading text from ORIGINAL into COMPRESSED
+- Do not touch any section not mentioned in the errors
+
+ORIGINAL (reference only):
+{original}
+
+COMPRESSED (fix this):
+{compressed}
+
+Return ONLY the fixed compressed file. No explanation.
+"""
+
+
+# ---------- Core Logic ----------
+
+
+def compress_file(filepath: Path) -> bool:
+ # Resolve and validate path
+ filepath = filepath.resolve()
+ MAX_FILE_SIZE = 500_000 # 500KB
+ if not filepath.exists():
+ raise FileNotFoundError(f"File not found: {filepath}")
+ if filepath.stat().st_size > MAX_FILE_SIZE:
+ raise ValueError(f"File too large to compress safely (max 500KB): {filepath}")
+
+ print(f"Processing: {filepath}")
+
+ if not should_compress(filepath):
+ print("Skipping (not natural language)")
+ return False
+
+ original_text = filepath.read_text(errors="ignore")
+ backup_path = filepath.with_name(filepath.stem + ".original.md")
+
+ # Check if backup already exists to prevent accidental overwriting
+ if backup_path.exists():
+ print(f"⚠️ Backup file already exists: {backup_path}")
+ print("The original backup may contain important content.")
+ print("Aborting to prevent data loss. Please remove or rename the backup file if you want to proceed.")
+ return False
+
+ # Step 1: Compress
+ print("Compressing with Claude...")
+ compressed = call_claude(build_compress_prompt(original_text))
+
+ # Save original as backup, write compressed to original path
+ backup_path.write_text(original_text)
+ filepath.write_text(compressed)
+
+ # Step 2: Validate + Retry
+ for attempt in range(MAX_RETRIES):
+ print(f"\nValidation attempt {attempt + 1}")
+
+ result = validate(backup_path, filepath)
+
+ if result.is_valid:
+ print("Validation passed")
+ break
+
+ print("❌ Validation failed:")
+ for err in result.errors:
+ print(f" - {err}")
+
+ if attempt == MAX_RETRIES - 1:
+ # Restore original on failure
+ filepath.write_text(original_text)
+ backup_path.unlink(missing_ok=True)
+ print("❌ Failed after retries — original restored")
+ return False
+
+ print("Fixing with Claude...")
+ compressed = call_claude(
+ build_fix_prompt(original_text, compressed, result.errors)
+ )
+ filepath.write_text(compressed)
+
+ return True
diff --git a/.trae/skills/caveman-compress/scripts/detect.py b/.trae/skills/caveman-compress/scripts/detect.py
new file mode 100644
index 0000000..5f50fd3
--- /dev/null
+++ b/.trae/skills/caveman-compress/scripts/detect.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+"""Detect whether a file is natural language (compressible) or code/config (skip)."""
+
+import json
+import re
+from pathlib import Path
+
+# Extensions that are natural language and compressible
+COMPRESSIBLE_EXTENSIONS = {".md", ".txt", ".markdown", ".rst"}
+
+# Extensions that are code/config and should be skipped
+SKIP_EXTENSIONS = {
+ ".py", ".js", ".ts", ".tsx", ".jsx", ".json", ".yaml", ".yml",
+ ".toml", ".env", ".lock", ".css", ".scss", ".html", ".xml",
+ ".sql", ".sh", ".bash", ".zsh", ".go", ".rs", ".java", ".c",
+ ".cpp", ".h", ".hpp", ".rb", ".php", ".swift", ".kt", ".lua",
+ ".dockerfile", ".makefile", ".csv", ".ini", ".cfg",
+}
+
+# Patterns that indicate a line is code
+CODE_PATTERNS = [
+ re.compile(r"^\s*(import |from .+ import |require\(|const |let |var )"),
+ re.compile(r"^\s*(def |class |function |async function |export )"),
+ re.compile(r"^\s*(if\s*\(|for\s*\(|while\s*\(|switch\s*\(|try\s*\{)"),
+ re.compile(r"^\s*[\}\]\);]+\s*$"), # closing braces/brackets
+ re.compile(r"^\s*@\w+"), # decorators/annotations
+ re.compile(r'^\s*"[^"]+"\s*:\s*'), # JSON-like key-value
+ re.compile(r"^\s*\w+\s*=\s*[{\[\(\"']"), # assignment with literal
+]
+
+
+def _is_code_line(line: str) -> bool:
+ """Check if a line looks like code."""
+ return any(p.match(line) for p in CODE_PATTERNS)
+
+
+def _is_json_content(text: str) -> bool:
+ """Check if content is valid JSON."""
+ try:
+ json.loads(text)
+ return True
+ except (json.JSONDecodeError, ValueError):
+ return False
+
+
+def _is_yaml_content(lines: list[str]) -> bool:
+ """Heuristic: check if content looks like YAML."""
+ yaml_indicators = 0
+ for line in lines[:30]:
+ stripped = line.strip()
+ if stripped.startswith("---"):
+ yaml_indicators += 1
+ elif re.match(r"^\w[\w\s]*:\s", stripped):
+ yaml_indicators += 1
+ elif stripped.startswith("- ") and ":" in stripped:
+ yaml_indicators += 1
+ # If most non-empty lines look like YAML
+ non_empty = sum(1 for l in lines[:30] if l.strip())
+ return non_empty > 0 and yaml_indicators / non_empty > 0.6
+
+
+def detect_file_type(filepath: Path) -> str:
+ """Classify a file as 'natural_language', 'code', 'config', or 'unknown'.
+
+ Returns:
+ One of: 'natural_language', 'code', 'config', 'unknown'
+ """
+ ext = filepath.suffix.lower()
+
+ # Extension-based classification
+ if ext in COMPRESSIBLE_EXTENSIONS:
+ return "natural_language"
+ if ext in SKIP_EXTENSIONS:
+ return "code" if ext not in {".json", ".yaml", ".yml", ".toml", ".ini", ".cfg", ".env"} else "config"
+
+ # Extensionless files (like CLAUDE.md, TODO) — check content
+ if not ext:
+ try:
+ text = filepath.read_text(errors="ignore")
+ except (OSError, PermissionError):
+ return "unknown"
+
+ lines = text.splitlines()[:50]
+
+ if _is_json_content(text[:10000]):
+ return "config"
+ if _is_yaml_content(lines):
+ return "config"
+
+ code_lines = sum(1 for l in lines if l.strip() and _is_code_line(l))
+ non_empty = sum(1 for l in lines if l.strip())
+ if non_empty > 0 and code_lines / non_empty > 0.4:
+ return "code"
+
+ return "natural_language"
+
+ return "unknown"
+
+
+def should_compress(filepath: Path) -> bool:
+ """Return True if the file is natural language and should be compressed."""
+ if not filepath.is_file():
+ return False
+ # Skip backup files
+ if filepath.name.endswith(".original.md"):
+ return False
+ return detect_file_type(filepath) == "natural_language"
+
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) < 2:
+ print("Usage: python detect.py [file2] ...")
+ sys.exit(1)
+
+ for path_str in sys.argv[1:]:
+ p = Path(path_str).resolve()
+ file_type = detect_file_type(p)
+ compress = should_compress(p)
+ print(f" {p.name:30s} type={file_type:20s} compress={compress}")
diff --git a/.trae/skills/caveman-compress/scripts/validate.py b/.trae/skills/caveman-compress/scripts/validate.py
new file mode 100644
index 0000000..3c4d4c1
--- /dev/null
+++ b/.trae/skills/caveman-compress/scripts/validate.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python3
+import re
+from pathlib import Path
+
+URL_REGEX = re.compile(r"https?://[^\s)]+")
+FENCE_OPEN_REGEX = re.compile(r"^(\s{0,3})(`{3,}|~{3,})(.*)$")
+HEADING_REGEX = re.compile(r"^(#{1,6})\s+(.*)", re.MULTILINE)
+BULLET_REGEX = re.compile(r"^\s*[-*+]\s+", re.MULTILINE)
+
+# crude but effective path detection
+# Requires either a path prefix (./ ../ / or drive letter) or a slash/backslash within the match
+PATH_REGEX = re.compile(r"(?:\./|\.\./|/|[A-Za-z]:\\)[\w\-/\\\.]+|[\w\-\.]+[/\\][\w\-/\\\.]+")
+
+
+class ValidationResult:
+ def __init__(self):
+ self.is_valid = True
+ self.errors = []
+ self.warnings = []
+
+ def add_error(self, msg):
+ self.is_valid = False
+ self.errors.append(msg)
+
+ def add_warning(self, msg):
+ self.warnings.append(msg)
+
+
+def read_file(path: Path) -> str:
+ return path.read_text(errors="ignore")
+
+
+# ---------- Extractors ----------
+
+
+def extract_headings(text):
+ return [(level, title.strip()) for level, title in HEADING_REGEX.findall(text)]
+
+
+def extract_code_blocks(text):
+ """Line-based fenced code block extractor.
+
+ Handles ``` and ~~~ fences with variable length (CommonMark: closing
+ fence must use same char and be at least as long as opening). Supports
+ nested fences (e.g. an outer 4-backtick block wrapping inner 3-backtick
+ content).
+ """
+ blocks = []
+ lines = text.split("\n")
+ i = 0
+ n = len(lines)
+ while i < n:
+ m = FENCE_OPEN_REGEX.match(lines[i])
+ if not m:
+ i += 1
+ continue
+ fence_char = m.group(2)[0]
+ fence_len = len(m.group(2))
+ open_line = lines[i]
+ block_lines = [open_line]
+ i += 1
+ closed = False
+ while i < n:
+ close_m = FENCE_OPEN_REGEX.match(lines[i])
+ if (
+ close_m
+ and close_m.group(2)[0] == fence_char
+ and len(close_m.group(2)) >= fence_len
+ and close_m.group(3).strip() == ""
+ ):
+ block_lines.append(lines[i])
+ closed = True
+ i += 1
+ break
+ block_lines.append(lines[i])
+ i += 1
+ if closed:
+ blocks.append("\n".join(block_lines))
+ # Unclosed fences are silently skipped — they indicate malformed markdown
+ # and including them would cause false-positive validation failures.
+ return blocks
+
+
+def extract_urls(text):
+ return set(URL_REGEX.findall(text))
+
+
+def extract_paths(text):
+ return set(PATH_REGEX.findall(text))
+
+
+def count_bullets(text):
+ return len(BULLET_REGEX.findall(text))
+
+
+# ---------- Validators ----------
+
+
+def validate_headings(orig, comp, result):
+ h1 = extract_headings(orig)
+ h2 = extract_headings(comp)
+
+ if len(h1) != len(h2):
+ result.add_error(f"Heading count mismatch: {len(h1)} vs {len(h2)}")
+
+ if h1 != h2:
+ result.add_warning("Heading text/order changed")
+
+
+def validate_code_blocks(orig, comp, result):
+ c1 = extract_code_blocks(orig)
+ c2 = extract_code_blocks(comp)
+
+ if c1 != c2:
+ result.add_error("Code blocks not preserved exactly")
+
+
+def validate_urls(orig, comp, result):
+ u1 = extract_urls(orig)
+ u2 = extract_urls(comp)
+
+ if u1 != u2:
+ result.add_error(f"URL mismatch: lost={u1 - u2}, added={u2 - u1}")
+
+
+def validate_paths(orig, comp, result):
+ p1 = extract_paths(orig)
+ p2 = extract_paths(comp)
+
+ if p1 != p2:
+ result.add_warning(f"Path mismatch: lost={p1 - p2}, added={p2 - p1}")
+
+
+def validate_bullets(orig, comp, result):
+ b1 = count_bullets(orig)
+ b2 = count_bullets(comp)
+
+ if b1 == 0:
+ return
+
+ diff = abs(b1 - b2) / b1
+
+ if diff > 0.15:
+ result.add_warning(f"Bullet count changed too much: {b1} -> {b2}")
+
+
+# ---------- Main ----------
+
+
+def validate(original_path: Path, compressed_path: Path) -> ValidationResult:
+ result = ValidationResult()
+
+ orig = read_file(original_path)
+ comp = read_file(compressed_path)
+
+ validate_headings(orig, comp, result)
+ validate_code_blocks(orig, comp, result)
+ validate_urls(orig, comp, result)
+ validate_paths(orig, comp, result)
+ validate_bullets(orig, comp, result)
+
+ return result
+
+
+# ---------- CLI ----------
+
+if __name__ == "__main__":
+ import sys
+
+ if len(sys.argv) != 3:
+ print("Usage: python validate.py ")
+ sys.exit(1)
+
+ orig = Path(sys.argv[1]).resolve()
+ comp = Path(sys.argv[2]).resolve()
+
+ res = validate(orig, comp)
+
+ print(f"\nValid: {res.is_valid}")
+
+ if res.errors:
+ print("\nErrors:")
+ for e in res.errors:
+ print(f" - {e}")
+
+ if res.warnings:
+ print("\nWarnings:")
+ for w in res.warnings:
+ print(f" - {w}")
diff --git a/.trae/skills/caveman-help/SKILL.md b/.trae/skills/caveman-help/SKILL.md
new file mode 100644
index 0000000..078e487
--- /dev/null
+++ b/.trae/skills/caveman-help/SKILL.md
@@ -0,0 +1,59 @@
+---
+name: caveman-help
+description: >
+ Quick-reference card for all caveman modes, skills, and commands.
+ One-shot display, not a persistent mode. Trigger: /caveman-help,
+ "caveman help", "what caveman commands", "how do I use caveman".
+---
+
+# Caveman Help
+
+Display this reference card when invoked. One-shot — do NOT change mode, write flag files, or persist anything. Output in caveman style.
+
+## Modes
+
+| Mode | Trigger | What change |
+|------|---------|-------------|
+| **Lite** | `/caveman lite` | Drop filler. Keep sentence structure. |
+| **Full** | `/caveman` | Drop articles, filler, pleasantries, hedging. Fragments OK. Default. |
+| **Ultra** | `/caveman ultra` | Extreme compression. Bare fragments. Tables over prose. |
+| **Wenyan-Lite** | `/caveman wenyan-lite` | Classical Chinese style, light compression. |
+| **Wenyan-Full** | `/caveman wenyan` | Full 文言文. Maximum classical terseness. |
+| **Wenyan-Ultra** | `/caveman wenyan-ultra` | Extreme. Ancient scholar on a budget. |
+
+Mode stick until changed or session end.
+
+## Skills
+
+| Skill | Trigger | What it do |
+|-------|---------|-----------|
+| **caveman-commit** | `/caveman-commit` | Terse commit messages. Conventional Commits. ≤50 char subject. |
+| **caveman-review** | `/caveman-review` | One-line PR comments: `L42: bug: user null. Add guard.` |
+| **caveman-compress** | `/caveman:compress ` | Compress .md files to caveman prose. Saves ~46% input tokens. |
+| **caveman-help** | `/caveman-help` | This card. |
+
+## Deactivate
+
+Say "stop caveman" or "normal mode". Resume anytime with `/caveman`.
+
+## Configure Default Mode
+
+Default mode = `full`. Change it:
+
+**Environment variable** (highest priority):
+```bash
+export CAVEMAN_DEFAULT_MODE=ultra
+```
+
+**Config file** (`~/.config/caveman/config.json`):
+```json
+{ "defaultMode": "lite" }
+```
+
+Set `"off"` to disable auto-activation on session start. User can still activate manually with `/caveman`.
+
+Resolution: env var > config file > `full`.
+
+## More
+
+Full docs: https://github.com/JuliusBrussee/caveman
diff --git a/.trae/skills/caveman-review/SKILL.md b/.trae/skills/caveman-review/SKILL.md
new file mode 100644
index 0000000..48f4adb
--- /dev/null
+++ b/.trae/skills/caveman-review/SKILL.md
@@ -0,0 +1,55 @@
+---
+name: caveman-review
+description: >
+ Ultra-compressed code review comments. Cuts noise from PR feedback while preserving
+ the actionable signal. Each comment is one line: location, problem, fix. Use when user
+ says "review this PR", "code review", "review the diff", "/review", or invokes
+ /caveman-review. Auto-triggers when reviewing pull requests.
+---
+
+Write code review comments terse and actionable. One line per finding. Location, problem, fix. No throat-clearing.
+
+## Rules
+
+**Format:** `L: . .` — or `:L: ...` when reviewing multi-file diffs.
+
+**Severity prefix (optional, when mixed):**
+- `🔴 bug:` — broken behavior, will cause incident
+- `🟡 risk:` — works but fragile (race, missing null check, swallowed error)
+- `🔵 nit:` — style, naming, micro-optim. Author can ignore
+- `❓ q:` — genuine question, not a suggestion
+
+**Drop:**
+- "I noticed that...", "It seems like...", "You might want to consider..."
+- "This is just a suggestion but..." — use `nit:` instead
+- "Great work!", "Looks good overall but..." — say it once at the top, not per comment
+- Restating what the line does — the reviewer can read the diff
+- Hedging ("perhaps", "maybe", "I think") — if unsure use `q:`
+
+**Keep:**
+- Exact line numbers
+- Exact symbol/function/variable names in backticks
+- Concrete fix, not "consider refactoring this"
+- The *why* if the fix isn't obvious from the problem statement
+
+## Examples
+
+❌ "I noticed that on line 42 you're not checking if the user object is null before accessing the email property. This could potentially cause a crash if the user is not found in the database. You might want to add a null check here."
+
+✅ `L42: 🔴 bug: user can be null after .find(). Add guard before .email.`
+
+❌ "It looks like this function is doing a lot of things and might benefit from being broken up into smaller functions for readability."
+
+✅ `L88-140: 🔵 nit: 50-line fn does 4 things. Extract validate/normalize/persist.`
+
+❌ "Have you considered what happens if the API returns a 429? I think we should probably handle that case."
+
+✅ `L23: 🟡 risk: no retry on 429. Wrap in withBackoff(3).`
+
+## Auto-Clarity
+
+Drop terse mode for: security findings (CVE-class bugs need full explanation + reference), architectural disagreements (need rationale, not just a one-liner), and onboarding contexts where the author is new and needs the "why". In those cases write a normal paragraph, then resume terse for the rest.
+
+## Boundaries
+
+Reviews only — does not write the code fix, does not approve/request-changes, does not run linters. Output the comment(s) ready to paste into the PR. "stop caveman-review" or "normal mode": revert to verbose review style.
\ No newline at end of file
diff --git a/.trae/skills/caveman/SKILL.md b/.trae/skills/caveman/SKILL.md
new file mode 100644
index 0000000..2ab498b
--- /dev/null
+++ b/.trae/skills/caveman/SKILL.md
@@ -0,0 +1,67 @@
+---
+name: caveman
+description: >
+ Ultra-compressed communication mode. Cuts token usage ~75% by speaking like caveman
+ while keeping full technical accuracy. Supports intensity levels: lite, full (default), ultra,
+ wenyan-lite, wenyan-full, wenyan-ultra.
+ Use when user says "caveman mode", "talk like caveman", "use caveman", "less tokens",
+ "be brief", or invokes /caveman. Also auto-triggers when token efficiency is requested.
+---
+
+Respond terse like smart caveman. All technical substance stay. Only fluff die.
+
+## Persistence
+
+ACTIVE EVERY RESPONSE. No revert after many turns. No filler drift. Still active if unsure. Off only: "stop caveman" / "normal mode".
+
+Default: **full**. Switch: `/caveman lite|full|ultra`.
+
+## Rules
+
+Drop: articles (a/an/the), filler (just/really/basically/actually/simply), pleasantries (sure/certainly/of course/happy to), hedging. Fragments OK. Short synonyms (big not extensive, fix not "implement a solution for"). Technical terms exact. Code blocks unchanged. Errors quoted exact.
+
+Pattern: `[thing] [action] [reason]. [next step].`
+
+Not: "Sure! I'd be happy to help you with that. The issue you're experiencing is likely caused by..."
+Yes: "Bug in auth middleware. Token expiry check use `<` not `<=`. Fix:"
+
+## Intensity
+
+| Level | What change |
+|-------|------------|
+| **lite** | No filler/hedging. Keep articles + full sentences. Professional but tight |
+| **full** | Drop articles, fragments OK, short synonyms. Classic caveman |
+| **ultra** | Abbreviate (DB/auth/config/req/res/fn/impl), strip conjunctions, arrows for causality (X → Y), one word when one word enough |
+| **wenyan-lite** | Semi-classical. Drop filler/hedging but keep grammar structure, classical register |
+| **wenyan-full** | Maximum classical terseness. Fully 文言文. 80-90% character reduction. Classical sentence patterns, verbs precede objects, subjects often omitted, classical particles (之/乃/為/其) |
+| **wenyan-ultra** | Extreme abbreviation while keeping classical Chinese feel. Maximum compression, ultra terse |
+
+Example — "Why React component re-render?"
+- lite: "Your component re-renders because you create a new object reference each render. Wrap it in `useMemo`."
+- full: "New object ref each render. Inline object prop = new ref = re-render. Wrap in `useMemo`."
+- ultra: "Inline obj prop → new ref → re-render. `useMemo`."
+- wenyan-lite: "組件頻重繪,以每繪新生對象參照故。以 useMemo 包之。"
+- wenyan-full: "物出新參照,致重繪。useMemo .Wrap之。"
+- wenyan-ultra: "新參照→重繪。useMemo Wrap。"
+
+Example — "Explain database connection pooling."
+- lite: "Connection pooling reuses open connections instead of creating new ones per request. Avoids repeated handshake overhead."
+- full: "Pool reuse open DB connections. No new connection per request. Skip handshake overhead."
+- ultra: "Pool = reuse DB conn. Skip handshake → fast under load."
+- wenyan-full: "池reuse open connection。不每req新開。skip handshake overhead。"
+- wenyan-ultra: "池reuse conn。skip handshake → fast。"
+
+## Auto-Clarity
+
+Drop caveman for: security warnings, irreversible action confirmations, multi-step sequences where fragment order risks misread, user asks to clarify or repeats question. Resume caveman after clear part done.
+
+Example — destructive op:
+> **Warning:** This will permanently delete all rows in the `users` table and cannot be undone.
+> ```sql
+> DROP TABLE users;
+> ```
+> Caveman resume. Verify backup exist first.
+
+## Boundaries
+
+Code/commits/PRs: write normal. "stop caveman" or "normal mode": revert. Level persist until changed or session end.
\ No newline at end of file
diff --git a/skills-lock.json b/skills-lock.json
new file mode 100644
index 0000000..ad2a06d
--- /dev/null
+++ b/skills-lock.json
@@ -0,0 +1,30 @@
+{
+ "version": 1,
+ "skills": {
+ "caveman": {
+ "source": "JuliusBrussee/caveman",
+ "sourceType": "github",
+ "computedHash": "a818cdc41dcfaa50dd891c5cb5e5705968338de02e7e37949ca56e8c30ad4176"
+ },
+ "caveman-commit": {
+ "source": "JuliusBrussee/caveman",
+ "sourceType": "github",
+ "computedHash": "76994afc39b68f4dbe098df76262b68d9928a23ee0f6aaebb11ba0e0f55bfa13"
+ },
+ "caveman-compress": {
+ "source": "JuliusBrussee/caveman",
+ "sourceType": "github",
+ "computedHash": "ed0f21f26011a8e8faa64cb49ac43b6533c64e0ec626b07cce4d4eca6c40a428"
+ },
+ "caveman-help": {
+ "source": "JuliusBrussee/caveman",
+ "sourceType": "github",
+ "computedHash": "3cd5f7d3f88c8ef7b16a6555dc61f5a11b14151386697609ab6887ab8b5f059d"
+ },
+ "caveman-review": {
+ "source": "JuliusBrussee/caveman",
+ "sourceType": "github",
+ "computedHash": "1dc59e7e896bc9dd449e43116c4c8b2e656b88c3370df91848330c17347af3d0"
+ }
+ }
+}
From 7aab492de68988f64e3b073c0e36f25cd3d98f63 Mon Sep 17 00:00:00 2001
From: matheusBBarni
Date: Sun, 12 Apr 2026 11:48:19 -0300
Subject: [PATCH 08/32] Convert chat topics to HeroUI cards with action
dropdown
---
src/screens/ChatScreen.tsx | 368 -------------------------------------
1 file changed, 368 deletions(-)
delete mode 100644 src/screens/ChatScreen.tsx
diff --git a/src/screens/ChatScreen.tsx b/src/screens/ChatScreen.tsx
deleted file mode 100644
index 4e55a3f..0000000
--- a/src/screens/ChatScreen.tsx
+++ /dev/null
@@ -1,368 +0,0 @@
-import {
- ArrowRight,
- Attachment,
- ChatBubble,
- ChatLines,
- CheckCircle,
- EditPencil,
- Refresh,
- SendSolid,
- Settings,
- Trash,
- WarningCircle,
- XmarkCircle
-} from "iconoir-react";
-import { useMemo, useState } from "react";
-
-import { getModelOptions, getReasoningOptions } from "../lib/agentConfig";
-import { isOpenableWorkspacePath } from "../lib/appShell";
-import { DiffPreview } from "../components/DiffPreview";
-import type {
- AutonomyMode,
- ChatContextItem,
- ChatSession,
- ChatSessionSummary,
- ModelProvider,
- WorkspaceEntry
-} from "../types";
-
-interface ChatScreenProps {
- workspaceRootName: string;
- sessions: ChatSessionSummary[];
- activeSession: ChatSession | null;
- activeDraft: string;
- workspaceEntries: WorkspaceEntry[];
- configuredModelProviders: ModelProvider[];
- cavemanReady: boolean;
- cavemanMessage: string;
- cavemanChecking: boolean;
- onCreateSession: () => void;
- onDeleteSession: (sessionId: string) => void;
- onDraftChange: (value: string) => void;
- onRefresh: () => void;
- onRemoveContextItem: (itemId: string) => void;
- onRenameSession: (sessionId: string, title: string) => void;
- onSaveSessionConfig: (payload: {
- sessionId: string;
- selectedModel: ChatSession["selectedModel"];
- selectedReasoning: ChatSession["selectedReasoning"];
- autonomyMode: AutonomyMode;
- contextItems: ChatContextItem[];
- }) => void;
- onSelectSession: (sessionId: string) => void;
- onSend: () => void;
- onStop: () => void;
- onApprove: () => void;
- onAttachFile: (path: string) => void;
- onOpenReview: () => void;
-}
-
-const AUTONOMY_OPTIONS: AutonomyMode[] = ["stepped", "milestone", "god_mode"];
-
-export function ChatScreen({
- workspaceRootName,
- sessions,
- activeSession,
- activeDraft,
- workspaceEntries,
- configuredModelProviders,
- cavemanReady,
- cavemanMessage,
- cavemanChecking,
- onCreateSession,
- onDeleteSession,
- onDraftChange,
- onRefresh,
- onRemoveContextItem,
- onRenameSession,
- onSaveSessionConfig,
- onSelectSession,
- onSend,
- onStop,
- onApprove,
- onAttachFile,
- onOpenReview
-}: ChatScreenProps) {
- const [topicSearch, setTopicSearch] = useState("");
- const [contextSearch, setContextSearch] = useState("");
- const mentionQuery = useMemo(() => {
- const match = activeDraft.match(/(?:^|\s)@([^\s@]*)$/);
- return match?.[1]?.toLowerCase() ?? "";
- }, [activeDraft]);
- const visibleSessions = useMemo(
- () =>
- sessions.filter((session) =>
- `${session.title} ${session.lastMessagePreview}`
- .toLowerCase()
- .includes(topicSearch.trim().toLowerCase())
- ),
- [sessions, topicSearch]
- );
- const attachableFiles = useMemo(
- () =>
- workspaceEntries.filter(
- (entry) =>
- entry.kind === "file" &&
- isOpenableWorkspacePath(entry.path) &&
- `${entry.name} ${entry.path}`
- .toLowerCase()
- .includes((mentionQuery || contextSearch).trim().toLowerCase())
- ),
- [contextSearch, mentionQuery, workspaceEntries]
- );
- const messageCount = activeSession?.messages.length ?? 0;
- const canSend = Boolean(activeSession && activeDraft.trim() && cavemanReady && !activeSession.runtime.isBusy);
-
- return (
-
-
-
-
-
-
Topics
-
{workspaceRootName}
-
-
-
- New
-
-
-
- setTopicSearch(event.target.value)}
- placeholder="Search topics"
- value={topicSearch}
- />
-
-
- {visibleSessions.map((session) => (
-
onSelectSession(session.id)}
- type="button"
- >
-
-
-
{session.title}
-
{session.lastMessagePreview || "No messages yet."}
-
-
- {session.status.replace("_", " ")}
-
-
-
- ))}
-
-
- {activeSession ? (
-
- {
- const nextTitle = window.prompt("Rename topic", activeSession.title)?.trim();
- if (nextTitle) {
- onRenameSession(activeSession.id, nextTitle);
- }
- }}
- type="button"
- >
-
- Rename
-
- onDeleteSession(activeSession.id)} type="button">
-
- Delete
-
-
- ) : null}
-
-
-
-
-
-
Agent Chat
-
{activeSession?.title ?? "No topic selected"}
-
-
-
-
- {!cavemanReady ? (
-
-
-
{cavemanChecking ? "Verifying Caveman skill..." : cavemanMessage}
-
- ) : null}
-
-
- {activeSession ? (
-
- {activeSession.messages.map((message) => (
-
- {message.role}
- {message.content}
-
- ))}
- {messageCount === 0 ? (
-
- Start a topic. PRD, SPEC, supporting docs, and the workspace tree are already attached to the first turn.
-
- ) : null}
-
- ) : (
-
- Create a topic to start the agent chat workspace.
-
- )}
-
-
-
- onDraftChange(event.target.value)}
- placeholder="Ask the agent anything about this topic. Type @ to attach a workspace file."
- value={activeDraft}
- />
- {mentionQuery.length > 0 ? (
-
- {attachableFiles.slice(0, 8).map((entry) => (
-
onAttachFile(entry.path)} type="button">
-
- {entry.path}
-
- ))}
-
- ) : null}
-
-
-
- {activeSession?.runtime.executionSummary ?? "Ready for the next prompt."}
-
-
- {activeSession?.runtime.awaitingApproval ? (
-
-
- Approve
-
- ) : null}
- {activeSession?.runtime.isBusy ? (
-
-
- Stop
-
- ) : null}
-
-
- Send
-
-
-
-
-
-
-
-
-
-
Context & Artifacts
-
- {activeSession ? (
- <>
-
- onSaveSessionConfig({ sessionId: activeSession.id, selectedModel, selectedReasoning: activeSession.selectedReasoning, autonomyMode: activeSession.autonomyMode, contextItems: activeSession.contextItems })} />
- onSaveSessionConfig({ sessionId: activeSession.id, selectedModel: activeSession.selectedModel, selectedReasoning, autonomyMode: activeSession.autonomyMode, contextItems: activeSession.contextItems })} />
- ({ value, label: value.replace("_", " ") }))} value={activeSession.autonomyMode} onChange={(autonomyMode) => onSaveSessionConfig({ sessionId: activeSession.id, selectedModel: activeSession.selectedModel, selectedReasoning: activeSession.selectedReasoning, autonomyMode, contextItems: activeSession.contextItems })} />
-
-
-
-
Attached Context
-
- {activeSession.contextItems.map((item) => (
- onRemoveContextItem(item.id)} type="button">
- {item.label}
- x
-
- ))}
-
-
-
-
-
setContextSearch(event.target.value)} placeholder="Attach workspace files" value={contextSearch} />
-
- {attachableFiles.slice(0, 18).map((entry) => (
-
onAttachFile(entry.path)} type="button">
-
- {entry.path}
-
- ))}
-
-
-
-
-
-
- {activeSession.runtime.terminalOutput.length === 0 ? (
-
Terminal output will appear here for the active topic.
- ) : (
- activeSession.runtime.terminalOutput.map((line, index) =>
{line}
)
- )}
-
- >
- ) : null}
-
-
-
- );
-}
-
-function SelectField({
- label,
- options,
- value,
- onChange
-}: {
- label: string;
- options: Array<{ value: Value; label: string }>;
- value: Value;
- onChange: (value: Value) => void;
-}) {
- return (
-
- {label}
- onChange(event.target.value as Value)} value={value}>
- {options.map((option) => (
-
- {option.label}
-
- ))}
-
-
- );
-}
-
-const PANEL_CLASS = "flex min-h-0 flex-col gap-4 overflow-hidden rounded-[1.5rem] border border-[var(--border-strong)] bg-[var(--bg-panel)] p-5 shadow-[var(--shadow)] backdrop-blur-[30px]";
-const INPUT_CLASS = "w-full rounded-[1rem] border border-[var(--border-soft)] bg-black/15 px-4 py-3 text-[15px] text-[var(--text-main)] outline-none transition placeholder:text-[var(--text-muted)] focus:border-[var(--accent)]";
-const PRIMARY_BUTTON_CLASS = "inline-flex items-center justify-center gap-2 rounded-[1rem] border-0 bg-[linear-gradient(135deg,var(--accent),#ff79c6)] px-4 py-3 font-semibold text-[#15131c] transition hover:-translate-y-0.5 hover:opacity-95";
-const SECONDARY_BUTTON_CLASS = "inline-flex items-center justify-center gap-2 rounded-[1rem] border border-[var(--border-soft)] bg-white/5 px-4 py-3 font-medium text-[var(--text-main)] transition hover:-translate-y-0.5 hover:bg-white/8";
-const DANGER_BUTTON_CLASS = "inline-flex items-center justify-center gap-2 rounded-[1rem] border border-[rgba(255,85,85,0.32)] bg-[rgba(255,85,85,0.16)] px-4 py-3 font-medium text-[var(--danger)] transition hover:-translate-y-0.5";
-const TOPIC_CARD_CLASS = "w-full rounded-[1rem] border border-[var(--border-soft)] bg-[var(--bg-surface)]/70 px-4 py-4 transition hover:-translate-y-0.5 hover:bg-[var(--bg-surface)]/90";
-const CONTEXT_CHIP_CLASS = "inline-flex items-center gap-2 rounded-full border border-[var(--border-soft)] bg-white/6 px-3 py-2 text-xs font-medium text-[var(--text-main)]";
-const LIST_ITEM_CLASS = "flex w-full items-center gap-2 rounded-[0.9rem] px-3 py-2 text-left text-sm text-[var(--text-main)] transition hover:bg-white/8";
From 2b3d0043f22533bbe254593bafe9344a6fd0973c Mon Sep 17 00:00:00 2001
From: matheusBBarni
Date: Sun, 12 Apr 2026 17:11:53 -0300
Subject: [PATCH 09/32] Ignore .specforge in Tailwind and Vite watch
---
src/screens/ChatScreen.tsx | 639 +++++++++++++++++++++++++++++++++++++
src/styles.css | 1 +
vite.config.ts | 2 +-
3 files changed, 641 insertions(+), 1 deletion(-)
create mode 100644 src/screens/ChatScreen.tsx
diff --git a/src/screens/ChatScreen.tsx b/src/screens/ChatScreen.tsx
new file mode 100644
index 0000000..b8eab03
--- /dev/null
+++ b/src/screens/ChatScreen.tsx
@@ -0,0 +1,639 @@
+import {
+ Button,
+ Card,
+ DropdownItem,
+ DropdownMenu,
+ DropdownPopover,
+ DropdownRoot,
+ DropdownTrigger,
+ Input,
+ Label,
+ ListBox,
+ Select,
+ TextArea
+} from "@heroui/react";
+import {
+ ArrowRight,
+ Attachment,
+ ChatBubble,
+ ChatLines,
+ CheckCircle,
+ EditPencil,
+ Refresh,
+ SendSolid,
+ Settings,
+ ThreePointsCircle,
+ Trash,
+ WarningCircle,
+ XmarkCircle
+} from "iconoir-react";
+import { useCallback, useMemo, useState, type Key } from "react";
+
+import { DiffPreview } from "../components/DiffPreview";
+import {
+ FIELD_LABEL_CLASS,
+ INPUT_CLASS,
+ LISTBOX_ITEM_CLASS,
+ PRIMARY_BUTTON_CLASS,
+ SECONDARY_BUTTON_CLASS,
+ SELECT_TRIGGER_CLASS,
+ SETTINGS_PANEL_CLASS,
+ TEXTAREA_CLASS
+} from "../components/SettingsPrimitives";
+import { getModelOptions, getReasoningOptions } from "../lib/agentConfig";
+import { isOpenableWorkspacePath } from "../lib/appShell";
+import type {
+ AutonomyMode,
+ ChatContextItem,
+ ChatSession,
+ ChatSessionSummary,
+ ModelProvider,
+ WorkspaceEntry
+} from "../types";
+
+interface ChatScreenProps {
+ workspaceRootName: string;
+ sessions: ChatSessionSummary[];
+ activeSession: ChatSession | null;
+ activeDraft: string;
+ workspaceEntries: WorkspaceEntry[];
+ configuredModelProviders: ModelProvider[];
+ cavemanReady: boolean;
+ cavemanMessage: string;
+ cavemanChecking: boolean;
+ onCreateSession: () => void;
+ onDeleteSession: (sessionId: string) => void;
+ onDraftChange: (value: string) => void;
+ onRefresh: () => void;
+ onRemoveContextItem: (itemId: string) => void;
+ onRenameSession: (sessionId: string, title: string) => void;
+ onSaveSessionConfig: (payload: {
+ sessionId: string;
+ selectedModel: ChatSession["selectedModel"];
+ selectedReasoning: ChatSession["selectedReasoning"];
+ autonomyMode: AutonomyMode;
+ contextItems: ChatContextItem[];
+ }) => void;
+ onSelectSession: (sessionId: string) => void;
+ onSend: () => void;
+ onStop: () => void;
+ onApprove: () => void;
+ onAttachFile: (path: string) => void;
+ onOpenReview: () => void;
+}
+
+const AUTONOMY_OPTIONS: AutonomyMode[] = ["stepped", "milestone", "god_mode"];
+
+const PANEL_CLASS = `${SETTINGS_PANEL_CLASS} rounded-[1.5rem]`;
+const PANEL_CONTENT_CLASS = "flex min-h-0 flex-1 flex-col gap-4 px-5 py-5";
+const EYEBROW_CLASS =
+ "m-0 text-[11px] font-extrabold uppercase tracking-[0.12em] text-[var(--accent-2)]";
+const PANEL_TITLE_CLASS = "m-0 text-lg font-semibold text-[var(--text-main)]";
+const ICON_BUTTON_CLASS =
+ "inline-flex min-h-[3rem] min-w-[3rem] items-center justify-center rounded-[1rem] border border-[var(--border-soft)] bg-white/5 px-0 font-medium text-[var(--text-main)] transition hover:-translate-y-0.5 hover:bg-white/8";
+const TOPIC_CARD_CLASS =
+ "border border-[var(--border-soft)] bg-[var(--bg-surface)] shadow-none transition";
+const TOPIC_CARD_ACTIVE_CLASS = "border-[var(--accent)] bg-white/10";
+const TOPIC_SELECT_BUTTON_CLASS =
+ "flex min-w-0 items-start justify-start rounded-[0.95rem] px-3 py-3 text-left transition hover:bg-white/[0.06]";
+const TOPIC_MENU_BUTTON_CLASS =
+ "inline-flex min-h-10 min-w-10 items-center justify-center rounded-[0.95rem] border border-[var(--border-soft)] bg-[rgba(255,184,108,0.08)] px-0 text-[var(--text-main)] transition hover:bg-[rgba(255,184,108,0.14)]";
+const TOPIC_MENU_POPOVER_CLASS =
+ "min-w-56 rounded-[1rem] border border-[var(--border-soft)] bg-[var(--bg-panel-strong)] p-1 shadow-[var(--shadow)] backdrop-blur-xl";
+const TOPIC_MENU_ITEM_CLASS =
+ "cursor-pointer rounded-[0.9rem] px-3 py-3 text-[var(--text-main)] outline-none transition data-[focused=true]:bg-white/8";
+const TOPIC_MENU_DANGER_ITEM_CLASS =
+ "cursor-pointer rounded-[0.9rem] px-3 py-3 text-[var(--danger)] outline-none transition data-[focused=true]:bg-[rgba(255,85,85,0.14)]";
+const STATUS_BADGE_CLASS =
+ "shrink-0 rounded-full border border-[var(--border-soft)] px-2 py-1 text-[10px] font-semibold uppercase tracking-[0.08em] text-[var(--text-subtle)]";
+const WARNING_CARD_CLASS =
+ "border border-[rgba(255,184,108,0.25)] bg-[rgba(255,184,108,0.12)] shadow-none";
+const EMPTY_STATE_CARD_CLASS =
+ "border border-dashed border-[var(--border-soft)] bg-[var(--bg-surface)] shadow-none";
+const ATTACH_CARD_CLASS =
+ "border border-[var(--border-soft)] bg-[var(--bg-surface)] shadow-none";
+const LIST_ITEM_BUTTON_CLASS =
+ "flex w-full items-center justify-start gap-2 rounded-[0.9rem] px-3 py-2 text-left text-sm text-[var(--text-main)] transition hover:bg-white/8";
+const CONTEXT_CHIP_CLASS =
+ "inline-flex items-center gap-2 rounded-full border border-[var(--border-soft)] bg-white/[0.06] px-3 py-2 text-xs font-medium text-[var(--text-main)] transition hover:bg-white/[0.1]";
+const TERMINAL_CARD_CLASS =
+ "min-h-0 flex-1 border border-[var(--border-soft)] bg-black/20 shadow-none";
+
+export function ChatScreen({
+ workspaceRootName,
+ sessions,
+ activeSession,
+ activeDraft,
+ workspaceEntries,
+ configuredModelProviders,
+ cavemanReady,
+ cavemanMessage,
+ cavemanChecking,
+ onCreateSession,
+ onDeleteSession,
+ onDraftChange,
+ onRefresh,
+ onRemoveContextItem,
+ onRenameSession,
+ onSaveSessionConfig,
+ onSelectSession,
+ onSend,
+ onStop,
+ onApprove,
+ onAttachFile,
+ onOpenReview
+}: ChatScreenProps) {
+ const [topicSearch, setTopicSearch] = useState("");
+ const [contextSearch, setContextSearch] = useState("");
+
+ const mentionQuery = useMemo(() => {
+ const match = activeDraft.match(/(?:^|\s)@([^\s@]*)$/);
+ return match?.[1]?.toLowerCase() ?? "";
+ }, [activeDraft]);
+
+ const visibleSessions = useMemo(
+ () =>
+ sessions.filter((session) =>
+ `${session.title} ${session.lastMessagePreview}`
+ .toLowerCase()
+ .includes(topicSearch.trim().toLowerCase())
+ ),
+ [sessions, topicSearch]
+ );
+
+ const attachableFiles = useMemo(
+ () =>
+ workspaceEntries.filter(
+ (entry) =>
+ entry.kind === "file" &&
+ isOpenableWorkspacePath(entry.path) &&
+ `${entry.name} ${entry.path}`
+ .toLowerCase()
+ .includes((mentionQuery || contextSearch).trim().toLowerCase())
+ ),
+ [contextSearch, mentionQuery, workspaceEntries]
+ );
+
+ const canSend = Boolean(
+ activeSession &&
+ activeDraft.trim() &&
+ cavemanReady &&
+ !activeSession.runtime.isBusy
+ );
+
+ const handleRenameRequest = useCallback(
+ (session: ChatSessionSummary) => {
+ const nextTitle = window.prompt("Rename topic", session.title)?.trim();
+
+ if (nextTitle) {
+ onRenameSession(session.id, nextTitle);
+ }
+ },
+ [onRenameSession]
+ );
+
+ return (
+
+
+
+
+
+
+
Topics
+
{workspaceRootName}
+
+
+
+ New
+
+
+
+ setTopicSearch(event.target.value)}
+ placeholder="Search topics"
+ value={topicSearch}
+ />
+
+
+ {visibleSessions.map((session) => {
+ const isActive = session.id === activeSession?.id;
+
+ return (
+
+
+ onSelectSession(session.id)}
+ >
+
+
+
+ {session.title}
+
+
+ {session.status.replace("_", " ")}
+
+
+
+ {session.lastMessagePreview || "No messages yet."}
+
+
+
+
+
+
+
+
+
+
+
+ {
+ if (key === "rename") {
+ handleRenameRequest(session);
+ return;
+ }
+
+ if (key === "delete") {
+ onDeleteSession(session.id);
+ }
+ }}
+ >
+
+
+
+ Rename
+
+
+
+
+
+ Delete Conversation
+
+
+
+
+
+
+
+ );
+ })}
+
+ {visibleSessions.length === 0 ? (
+
+
+ No topics match your search.
+
+
+ ) : null}
+
+
+
+
+
+
+
+
+
Agent Chat
+
+ {activeSession?.title ?? "No topic selected"}
+
+
+
+
+
+ {!cavemanReady ? (
+
+
+
+ {cavemanChecking ? "Verifying Caveman skill..." : cavemanMessage}
+
+
+ ) : null}
+
+
+ {activeSession ? (
+
+ {activeSession.messages.map((message) => (
+
+
+
+ {message.role}
+
+
+ {message.content}
+
+
+
+ ))}
+
+ {activeSession.messages.length === 0 ? (
+
+
+ Start a topic. PRD, SPEC, supporting docs, and the workspace tree are
+ already attached to the first turn.
+
+
+ ) : null}
+
+ ) : (
+
+
+ Create a topic to start the agent chat workspace.
+
+
+ )}
+
+
+
+ onDraftChange(event.target.value)}
+ placeholder="Ask the agent anything about this topic. Type @ to attach a workspace file."
+ value={activeDraft}
+ />
+
+ {mentionQuery.length > 0 ? (
+
+
+ {attachableFiles.length > 0 ? (
+ attachableFiles.slice(0, 8).map((entry) => (
+ onAttachFile(entry.path)}
+ >
+
+ {entry.path}
+
+ ))
+ ) : (
+
+ No matching files.
+
+ )}
+
+
+ ) : null}
+
+
+
+
+ {activeSession?.runtime.executionSummary ?? "Ready for the next prompt."}
+
+
+ {activeSession?.runtime.awaitingApproval ? (
+
+
+ Approve
+
+ ) : null}
+ {activeSession?.runtime.isBusy ? (
+
+
+ Stop
+
+ ) : null}
+
+
+ Send
+
+
+
+
+
+
+
+
+
+
+
+
Context & Artifacts
+
+
+ {activeSession ? (
+ <>
+
+
+ onSaveSessionConfig({
+ sessionId: activeSession.id,
+ selectedModel,
+ selectedReasoning: activeSession.selectedReasoning,
+ autonomyMode: activeSession.autonomyMode,
+ contextItems: activeSession.contextItems
+ })
+ }
+ options={getModelOptions(
+ configuredModelProviders.length === 1
+ ? configuredModelProviders[0]
+ : undefined
+ )}
+ value={activeSession.selectedModel}
+ />
+
+ onSaveSessionConfig({
+ sessionId: activeSession.id,
+ selectedModel: activeSession.selectedModel,
+ selectedReasoning,
+ autonomyMode: activeSession.autonomyMode,
+ contextItems: activeSession.contextItems
+ })
+ }
+ options={getReasoningOptions(activeSession.selectedModel)}
+ value={activeSession.selectedReasoning}
+ />
+
+ onSaveSessionConfig({
+ sessionId: activeSession.id,
+ selectedModel: activeSession.selectedModel,
+ selectedReasoning: activeSession.selectedReasoning,
+ autonomyMode,
+ contextItems: activeSession.contextItems
+ })
+ }
+ options={AUTONOMY_OPTIONS.map((value) => ({
+ value,
+ label: value.replace("_", " ")
+ }))}
+ value={activeSession.autonomyMode}
+ />
+
+
+
+
+ Attached Context
+
+
+ {activeSession.contextItems.map((item) => (
+ onRemoveContextItem(item.id)}
+ >
+ {item.label}
+ x
+
+ ))}
+
+
+
+
+
+ Attach Files
+
+
setContextSearch(event.target.value)}
+ placeholder="Attach workspace files"
+ value={contextSearch}
+ />
+
+
+ {attachableFiles.length > 0 ? (
+ attachableFiles.slice(0, 18).map((entry) => (
+ onAttachFile(entry.path)}
+ >
+
+ {entry.path}
+
+ ))
+ ) : (
+
+ No matching files.
+
+ )}
+
+
+
+
+
+
+
+
+ {activeSession.runtime.terminalOutput.length === 0 ? (
+
+ Terminal output will appear here for the active topic.
+
+ ) : (
+ activeSession.runtime.terminalOutput.map((line, index) => (
+ {line}
+ ))
+ )}
+
+
+ >
+ ) : (
+
+
+ Select a topic to inspect context, diff, and terminal output.
+
+
+ )}
+
+
+
+
+ );
+}
+
+function SelectField({
+ label,
+ options,
+ value,
+ onChange
+}: {
+ label: string;
+ options: Array<{ value: Value; label: string; hint?: string }>;
+ value: Value;
+ onChange: (value: Value) => void;
+}) {
+ const handleSelectionChange = useCallback(
+ (key: Key | null) => {
+ if (key !== null) {
+ onChange(String(key) as Value);
+ }
+ },
+ [onChange]
+ );
+
+ return (
+
+ {label}
+
+
+
+
+
+
+ {options.map((option) => (
+
+
+ {option.label}
+ {option.hint ? (
+
+ {option.hint}
+
+ ) : null}
+
+
+ ))}
+
+
+
+ );
+}
diff --git a/src/styles.css b/src/styles.css
index 76022c4..217df52 100644
--- a/src/styles.css
+++ b/src/styles.css
@@ -1,5 +1,6 @@
@import "tailwindcss";
@import "@heroui/styles";
+@source not "../.specforge";
:root {
color-scheme: dark;
diff --git a/vite.config.ts b/vite.config.ts
index fdfb6a3..8220053 100644
--- a/vite.config.ts
+++ b/vite.config.ts
@@ -21,7 +21,7 @@ export default defineConfig({
}
: undefined,
watch: {
- ignored: ["**/src-tauri/**"]
+ ignored: ["**/src-tauri/**", "**/.specforge/**"]
}
},
envPrefix: ["VITE_", "TAURI_ENV_*"],
From dd08d73172b8f6c9b84741c29ab3f2fd0501e960 Mon Sep 17 00:00:00 2001
From: matheusBBarni
Date: Sun, 12 Apr 2026 18:17:20 -0300
Subject: [PATCH 10/32] Make Caveman built in and fix project settings
persistence
---
docs/PRD.md | 8 +-
docs/SPEC.md | 13 +-
src-tauri/src/chat.rs | 100 +------------
src-tauri/src/lib.rs | 8 +-
src-tauri/src/models.rs | 7 -
src/App.tsx | 194 ++++++++++++++------------
src/components/ControlColumn.tsx | 30 ++--
src/components/SettingsPrimitives.tsx | 12 +-
src/lib/runtime.ts | 9 --
src/screens/ChatScreen.tsx | 13 +-
src/store/useChatStore.ts | 4 +-
src/styles.css | 5 +-
src/types.ts | 5 -
13 files changed, 153 insertions(+), 255 deletions(-)
diff --git a/docs/PRD.md b/docs/PRD.md
index f7d7643..55b727c 100644
--- a/docs/PRD.md
+++ b/docs/PRD.md
@@ -59,10 +59,10 @@ The product combines four responsibilities in one desktop shell:
### 4.4. Caveman Requirement
-* **Always-on skill:** Chat must always run with the Caveman skill active.
-* **Auto-verify on chat entry:** On first entry into `/chat`, SpecForge must verify the skill and install it if missing.
-* **Required install command:** The default install path must use `npx skills add JuliusBrussee/caveman`.
-* **Blocked sends on failure:** If Caveman installation or verification fails, message sending must remain disabled and the UI must show a recoverable banner.
+* **Always-on mode:** Chat must always apply Caveman-style response guidance automatically for every topic.
+* **No chat-entry verification:** Entering `/chat` must not trigger a blocking install or verification step.
+* **Built-in prompt behavior:** The Caveman behavior must be injected by SpecForge's own system prompt so users do not need to spend turn tokens enabling it.
+* **Never gate navigation or settings:** Caveman activation must not stop topic switching, route changes, or model/autonomy edits.
### 4.5. Review And Settings
diff --git a/docs/SPEC.md b/docs/SPEC.md
index da23699..b70a571 100644
--- a/docs/SPEC.md
+++ b/docs/SPEC.md
@@ -111,23 +111,16 @@ The desktop runtime currently exposes:
* `send_chat_message`
* `approve_chat_session`
* `stop_chat_session`
-* `ensure_caveman_skill`
Chat runtime updates are streamed through a typed `chat-session-event` payload carrying the session id plus the current session snapshot or summary update.
## 6. Caveman Integration
-Entering `/chat` triggers backend verification of the Caveman skill. If it is missing, the backend attempts installation with:
+SpecForge now treats Caveman as a built-in chat response mode instead of a runtime-installed dependency.
-* `npx skills add JuliusBrussee/caveman`
+Each outgoing chat turn prepends a compact Caveman-style instruction before the normal SpecForge system prompt, so the behavior stays active without making the user spend tokens enabling it manually.
-If verification or installation fails:
-
-* the frontend stores a failed Caveman state in `useChatStore`
-* the composer send action remains disabled
-* the user sees a blocking but recoverable banner in chat
-
-Each outgoing chat turn prepends a Caveman activation preamble before the normal SpecForge system prompt so the skill is active on every turn, not merely installed on disk.
+There is no chat-entry verification or installation path tied to navigation, and Caveman state must never block topic changes, route changes, or session configuration edits.
## 7. Review Workspace
diff --git a/src-tauri/src/chat.rs b/src-tauri/src/chat.rs
index 8a4b927..9c637e1 100644
--- a/src-tauri/src/chat.rs
+++ b/src-tauri/src/chat.rs
@@ -7,10 +7,10 @@ use crate::{
},
git::git_get_diff_for_root,
models::{
- CavemanStatusPayload, ChatContextItem, ChatEventPayload, ChatMessage, ChatRuntimeState,
- ChatSessionIndexPayload, ChatSessionSnapshot, ChatSessionSummary, ProjectSettings,
+ ChatContextItem, ChatEventPayload, ChatMessage, ChatRuntimeState, ChatSessionIndexPayload,
+ ChatSessionSnapshot, ChatSessionSummary, ProjectSettings,
},
- paths::{resolve_override_path, resolve_relative_path_under_root},
+ paths::resolve_relative_path_under_root,
project::{build_default_project_settings, load_project_settings_from_workspace_root},
state::{ChatExecutionRuntime, SharedState, WorkspaceContext},
};
@@ -30,9 +30,8 @@ use tauri::{AppHandle, Emitter, State};
const SESSION_DIRECTORY_RELATIVE_PATH: &str = ".specforge/sessions";
const SESSION_INDEX_FILE_NAME: &str = "index.json";
-const CAVEMAN_REPO: &str = "JuliusBrussee/caveman";
const CAVEMAN_PREAMBLE: &str =
- "/caveman\nUse the Caveman skill. Be direct and minimal in prose while keeping code and diffs fully normal.";
+ "Default response style: caveman. Keep prose terse and direct while leaving code blocks, commands, and diffs fully normal.";
static SESSION_COUNTER: AtomicU64 = AtomicU64::new(0);
@@ -197,29 +196,6 @@ pub(crate) fn stop_chat_session(
Ok(())
}
-#[tauri::command]
-pub(crate) fn ensure_caveman_skill() -> Result {
- if is_caveman_installed() {
- return Ok(CavemanStatusPayload {
- ready: true,
- detail: String::from("Caveman is already installed for this machine."),
- });
- }
-
- install_caveman_skill()?;
-
- if !is_caveman_installed() {
- return Err(String::from(
- "Caveman installation completed, but the skill directory could not be verified.",
- ));
- }
-
- Ok(CavemanStatusPayload {
- ready: true,
- detail: String::from("Caveman was installed and verified successfully."),
- })
-}
-
#[tauri::command]
pub(crate) fn send_chat_message(
app: AppHandle,
@@ -235,12 +211,6 @@ pub(crate) fn send_chat_message(
return Err(String::from("A message is required before sending."));
}
- if !is_caveman_installed() {
- return Err(String::from(
- "Caveman is not installed yet. Verify the skill before sending a chat turn.",
- ));
- }
-
let workspace = active_workspace_context(&state)?;
let snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
@@ -1241,68 +1211,6 @@ fn create_chat_entity_id(prefix: &str) -> String {
format!("{prefix}-{millis:x}-{counter:x}")
}
-fn is_caveman_installed() -> bool {
- caveman_install_paths().iter().any(|path| path.exists())
-}
-
-fn caveman_install_paths() -> Vec {
- let Some(home_directory) = home_directory() else {
- return Vec::new();
- };
-
- vec![
- home_directory.join(".codex").join("skills").join("caveman"),
- home_directory.join(".claude").join("skills").join("caveman"),
- home_directory.join(".opencode").join("skill").join("caveman"),
- home_directory.join(".opencode").join("skills").join("caveman"),
- ]
-}
-
-fn install_caveman_skill() -> Result<(), String> {
- let npm_binary = resolve_npx_binary()?;
- let candidates = [
- vec!["skills", "add", CAVEMAN_REPO, "-y"],
- vec!["add-skill", CAVEMAN_REPO, "-g", "-a", "codex", "-a", "claude-code", "-y"],
- ];
-
- let mut last_error = None;
-
- for arguments in candidates {
- let output = Command::new(&npm_binary)
- .args(&arguments)
- .current_dir(resolve_override_path("."))
- .stdout(Stdio::piped())
- .stderr(Stdio::piped())
- .output();
-
- match output {
- Ok(output) if output.status.success() => return Ok(()),
- Ok(output) => {
- last_error = Some(format_process_failure("npx", &output));
- }
- Err(error) => {
- last_error = Some(format!("Unable to run npx for Caveman installation: {error}"));
- }
- }
- }
-
- Err(last_error.unwrap_or_else(|| {
- String::from("Unable to install Caveman because the skills CLI returned no output.")
- }))
-}
-
-fn resolve_npx_binary() -> Result {
- which::which("npx")
- .or_else(|_| which::which("npx.cmd"))
- .map_err(|_| String::from("npx was not found on PATH, so Caveman cannot be installed."))
-}
-
-fn home_directory() -> Option {
- std::env::var_os("USERPROFILE")
- .map(PathBuf::from)
- .or_else(|| std::env::var_os("HOME").map(PathBuf::from))
-}
-
#[derive(Clone, Copy)]
enum ChatExecutionPhase {
Proposal,
diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs
index 2aa638e..57bfecf 100644
--- a/src-tauri/src/lib.rs
+++ b/src-tauri/src/lib.rs
@@ -13,9 +13,8 @@ mod workspace;
use agent::{approve_action, kill_agent_process, spawn_cli_agent};
use chat::{
- approve_chat_session, create_chat_session, delete_chat_session, ensure_caveman_skill,
- load_chat_session, rename_chat_session, save_chat_session, send_chat_message,
- stop_chat_session,
+ approve_chat_session, create_chat_session, delete_chat_session, load_chat_session,
+ rename_chat_session, save_chat_session, send_chat_message, stop_chat_session,
};
use documents::{parse_document, pick_document};
use environment::run_environment_scan;
@@ -51,8 +50,7 @@ pub fn run() {
delete_chat_session,
send_chat_message,
approve_chat_session,
- stop_chat_session,
- ensure_caveman_skill
+ stop_chat_session
])
.run(tauri::generate_context!())
.expect("error while running tauri application");
diff --git a/src-tauri/src/models.rs b/src-tauri/src/models.rs
index 2c4ec03..de20c43 100644
--- a/src-tauri/src/models.rs
+++ b/src-tauri/src/models.rs
@@ -169,13 +169,6 @@ pub(crate) struct ChatSessionIndexPayload {
pub(crate) last_active_session_id: Option,
}
-#[derive(Clone, Serialize)]
-#[serde(rename_all = "camelCase")]
-pub(crate) struct CavemanStatusPayload {
- pub(crate) ready: bool,
- pub(crate) detail: String,
-}
-
#[derive(Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ChatEventPayload {
diff --git a/src/App.tsx b/src/App.tsx
index 54921a0..34316b3 100644
--- a/src/App.tsx
+++ b/src/App.tsx
@@ -14,6 +14,7 @@ import {
useLocation,
useNavigate
} from "react-router-dom";
+import { useShallow } from "zustand/react/shallow";
import { AppRail } from "./components/AppRail";
import {
@@ -31,6 +32,7 @@ import {
getReasoningLabel
} from "./lib/agentConfig";
import {
+ buildCurrentProjectSettings,
buildConfigPathDisplay,
buildWorkspaceNotice,
waitForNextPaint
@@ -42,7 +44,6 @@ import {
createChatSession,
deleteChatSession,
emergencyStop,
- ensureCavemanSkill,
generatePrdDocument,
generateSpecDocument,
getGitDiff,
@@ -80,7 +81,6 @@ import {
} from "./hooks/useAppLifecycle";
import {
useAgentStoreSlice,
- useChatStoreSlice,
useProjectStoreSlice,
useSettingsStoreSlice
} from "./hooks/useAppStoreSlices";
@@ -96,6 +96,7 @@ import { PrdScreen } from "./screens/PrdScreen";
import { SettingsScreen } from "./screens/SettingsScreen";
import { useAgentStore } from "./store/useAgentStore";
import { useChatStore } from "./store/useChatStore";
+import { useProjectStore } from "./store/useProjectStore";
import type {
ChatContextItem,
ChatSession,
@@ -111,14 +112,11 @@ function App() {
const desktopRuntime = isTauriRuntime();
const agentState = useAgentStoreSlice();
- const chatState = useChatStoreSlice();
const projectState = useProjectStoreSlice();
const settingsState = useSettingsStoreSlice();
const {
sessions: chatSessions,
activeSessionId,
- loadedSessions,
- drafts: chatDrafts,
cavemanReady,
cavemanMessage,
cavemanChecking,
@@ -130,7 +128,23 @@ function App() {
setSessionConfig,
deleteSession: deleteChatSessionState,
setCavemanStatus
- } = chatState;
+ } = useChatStore(
+ useShallow((state) => ({
+ sessions: state.sessions,
+ activeSessionId: state.activeSessionId,
+ cavemanReady: state.cavemanReady,
+ cavemanMessage: state.cavemanMessage,
+ cavemanChecking: state.cavemanChecking,
+ setSessions: state.setSessions,
+ setActiveSessionId: state.setActiveSessionId,
+ upsertSession: state.upsertSession,
+ setDraft: state.setDraft,
+ setContextItems: state.setContextItems,
+ setSessionConfig: state.setSessionConfig,
+ deleteSession: state.deleteSession,
+ setCavemanStatus: state.setCavemanStatus
+ }))
+ );
const [commandSearch, setCommandSearch] = useState("");
const [isImporting, setIsImporting] = useState(false);
@@ -166,12 +180,25 @@ function App() {
const hasScannedEnvironmentRef = useRef(false);
const projectSaveTimerRef = useRef(null);
const pendingProjectReloadRef = useRef(false);
+ const latestPathnameRef = useRef(location.pathname);
- const activeChatSession = useMemo(
- () => (activeSessionId ? loadedSessions[activeSessionId] ?? null : null),
- [activeSessionId, loadedSessions]
+ useEffect(() => {
+ latestPathnameRef.current = location.pathname;
+ }, [location.pathname]);
+
+ const activeChatSession = useChatStore(
+ useCallback(
+ (state) =>
+ state.activeSessionId ? state.loadedSessions[state.activeSessionId] ?? null : null,
+ []
+ )
+ );
+ const activeChatDraft = useChatStore(
+ useCallback(
+ (state) => (state.activeSessionId ? state.drafts[state.activeSessionId] ?? "" : ""),
+ []
+ )
);
- const activeChatDraft = activeSessionId ? chatDrafts[activeSessionId] ?? "" : "";
const reviewVisibleDiff = activeChatSession
? activeChatSession.runtime.pendingDiff ?? "No diff captured for the active chat topic yet."
: agentState.pendingDiff ?? latestDiff;
@@ -242,6 +269,29 @@ function App() {
const applyProjectContext = useCallback(
(context: ProjectContext, options?: { navigateToChat?: boolean }) => {
+ const normalizedCurrentProjectPath = projectRootPath
+ .replace(/\\/g, "/")
+ .replace(/\/+$/, "")
+ .toLowerCase();
+ const normalizedNextProjectPath = context.rootPath
+ .replace(/\\/g, "/")
+ .replace(/\/+$/, "")
+ .toLowerCase();
+ const isSameProject =
+ normalizedCurrentProjectPath.length > 0 &&
+ normalizedCurrentProjectPath === normalizedNextProjectPath;
+ const nextPrdSourcePath =
+ context.prdDocument?.sourcePath ?? context.settings.prdPath;
+ const nextSpecSourcePath =
+ context.specDocument?.sourcePath ?? context.settings.specPath;
+ const preserveEditingPrd =
+ isSameProject &&
+ projectState.prdPaneMode === "edit" &&
+ projectState.prdPath === nextPrdSourcePath;
+ const preserveEditingSpec =
+ isSameProject &&
+ projectState.specPaneMode === "edit" &&
+ projectState.specPath === nextSpecSourcePath;
const settingsPathDisplay = buildConfigPathDisplay(
context.settingsPath,
context.rootName
@@ -258,7 +308,9 @@ function App() {
])
);
- projectState.resetWorkspaceContext();
+ if (!isSameProject) {
+ projectState.resetWorkspaceContext();
+ }
setProjectRootName(context.rootName);
setProjectRootPath(context.rootPath);
setProjectConfigPath(context.settingsPath);
@@ -275,8 +327,8 @@ function App() {
setChatSessions(context.chatSessions);
setActiveSessionId(context.lastActiveSessionId ?? context.chatSessions[0]?.id ?? null);
setCavemanStatus({
- ready: false,
- message: "Caveman has not been verified for this project yet."
+ ready: true,
+ message: "Caveman mode is built into every topic."
});
setProjectStatusMessage(
context.hasSavedSettings
@@ -287,23 +339,35 @@ function App() {
setWorkspaceNotice(buildWorkspaceNotice(context));
startTransition(() => {
- projectState.setPrdContent(
- context.prdDocument?.content ?? "",
- context.prdDocument?.sourcePath ?? context.settings.prdPath
- );
- projectState.setSpecContent(
- context.specDocument?.content ?? "",
- context.specDocument?.sourcePath ?? context.settings.specPath
- );
- projectState.setPrdPaneMode("preview");
- projectState.setSpecPaneMode("preview");
+ if (!preserveEditingPrd) {
+ projectState.setPrdContent(
+ context.prdDocument?.content ?? "",
+ nextPrdSourcePath
+ );
+ projectState.setPrdPaneMode("preview");
+ }
+
+ if (!preserveEditingSpec) {
+ projectState.setSpecContent(
+ context.specDocument?.content ?? "",
+ nextSpecSourcePath
+ );
+ projectState.setSpecPaneMode("preview");
+ }
});
- if (options?.navigateToChat) {
+ if (options?.navigateToChat && latestPathnameRef.current === "/") {
navigate("/chat");
}
},
- [navigate, projectState, setActiveSessionId, setCavemanStatus, setChatSessions, settingsState]
+ [
+ navigate,
+ projectState,
+ setActiveSessionId,
+ setCavemanStatus,
+ setChatSessions,
+ settingsState
+ ]
);
const saveCurrentProjectSettings = useCallback(
@@ -329,9 +393,19 @@ function App() {
setIsProjectSaving(true);
try {
+ const latestProjectState = useProjectStore.getState();
+ const currentProjectSettings = buildCurrentProjectSettings({
+ configuredPrdPath: latestProjectState.configuredPrdPath,
+ configuredSpecPath: latestProjectState.configuredSpecPath,
+ prdPromptTemplate: latestProjectState.prdPromptTemplate,
+ selectedModel: latestProjectState.selectedModel,
+ selectedReasoning: latestProjectState.selectedReasoning,
+ specPromptTemplate: latestProjectState.specPromptTemplate,
+ supportingDocumentPaths: latestProjectState.supportingDocumentPaths
+ });
const savedSettings = await saveProjectSettings({
folderPath: projectRootPath,
- settings: derivedState.currentProjectSettings
+ settings: currentProjectSettings
});
projectState.setProjectSettings(savedSettings);
@@ -357,7 +431,6 @@ function App() {
[
applyProjectContext,
derivedState.configPathDisplay,
- derivedState.currentProjectSettings,
desktopRuntime,
projectRootName,
projectRootPath,
@@ -933,7 +1006,7 @@ function App() {
);
const handleSendChatMessage = useCallback(async () => {
- if (!activeChatSession || !activeChatDraft.trim() || !cavemanReady) {
+ if (!activeChatSession || !activeChatDraft.trim()) {
return;
}
@@ -950,7 +1023,7 @@ function App() {
error instanceof Error ? error.message : "Unable to send the current chat message."
);
}
- }, [activeChatDraft, activeChatSession, cavemanReady, setChatDraft, settingsState]);
+ }, [activeChatDraft, activeChatSession, setChatDraft, settingsState]);
const handleApproveChatSession = useCallback(async () => {
if (!activeChatSession) {
@@ -1097,11 +1170,7 @@ function App() {
setLastProjectPath: settingsState.setLastProjectPath
});
useEffect(() => {
- if (
- !desktopRuntime ||
- !activeSessionId ||
- loadedSessions[activeSessionId]
- ) {
+ if (!desktopRuntime || !activeSessionId || activeChatSession) {
return;
}
@@ -1128,7 +1197,7 @@ function App() {
return () => {
isDisposed = true;
};
- }, [activeSessionId, desktopRuntime, loadedSessions, upsertSession]);
+ }, [activeChatSession, activeSessionId, desktopRuntime, upsertSession]);
useEffect(() => {
if (
@@ -1173,61 +1242,6 @@ function App() {
upsertSession
]);
- useEffect(() => {
- if (
- !desktopRuntime ||
- !hasSavedProjectSettings ||
- !isChatRoute ||
- cavemanReady ||
- cavemanChecking
- ) {
- return;
- }
-
- let isDisposed = false;
- setCavemanStatus({
- ready: false,
- message: "Verifying Caveman skill...",
- checking: true
- });
-
- void ensureCavemanSkill()
- .then((status) => {
- if (isDisposed) {
- return;
- }
-
- setCavemanStatus({
- ready: status.ready,
- message: status.detail
- });
- })
- .catch((error) => {
- if (isDisposed) {
- return;
- }
-
- setCavemanStatus({
- ready: false,
- message:
- error instanceof Error
- ? error.message
- : "Unable to verify the Caveman skill for this workspace."
- });
- });
-
- return () => {
- isDisposed = true;
- };
- }, [
- cavemanChecking,
- cavemanReady,
- desktopRuntime,
- hasSavedProjectSettings,
- isChatRoute,
- setCavemanStatus
- ]);
-
useEffect(() => {
let unlisten: (() => void) | undefined;
let isDisposed = false;
diff --git a/src/components/ControlColumn.tsx b/src/components/ControlColumn.tsx
index c9a6057..e19170f 100644
--- a/src/components/ControlColumn.tsx
+++ b/src/components/ControlColumn.tsx
@@ -14,7 +14,6 @@ import {
useEffect,
useMemo,
useState,
- type Key,
type ReactNode
} from "react";
@@ -191,9 +190,9 @@ function ModelSelectField({
}, [activeProviderTab, hasProviderTabs, singleConfiguredProvider]);
const handleSelectionChange = useCallback(
- (key: Key | null) => {
- if (key !== null) {
- onSelectionChange(String(key) as ModelId);
+ (value: string | number | null) => {
+ if (value !== null) {
+ onSelectionChange(String(value) as ModelId);
}
},
[onSelectionChange]
@@ -202,8 +201,8 @@ function ModelSelectField({
return (
{label}
@@ -219,7 +218,14 @@ function ModelSelectField({
provider === activeProviderTab ? MODEL_PROVIDER_TAB_ACTIVE_CLASS : MODEL_PROVIDER_TAB_CLASS
}
key={provider}
- onClick={() => setActiveProviderTab(provider)}
+ onClick={(event) => {
+ event.preventDefault();
+ event.stopPropagation();
+ setActiveProviderTab(provider);
+ }}
+ onMouseDown={(event) => {
+ event.preventDefault();
+ }}
type="button"
>
{getProviderLabel(provider)}
@@ -289,9 +295,9 @@ function ControlSelectField({
onSelectionChange
}: ControlSelectFieldProps) {
const handleSelectionChange = useCallback(
- (key: Key | null) => {
- if (key !== null) {
- onSelectionChange(String(key) as Value);
+ (value: string | number | null) => {
+ if (value !== null) {
+ onSelectionChange(String(value) as Value);
}
},
[onSelectionChange]
@@ -300,8 +306,8 @@ function ControlSelectField({
return (
{label}
diff --git a/src/components/SettingsPrimitives.tsx b/src/components/SettingsPrimitives.tsx
index ce1d8a6..11963ea 100644
--- a/src/components/SettingsPrimitives.tsx
+++ b/src/components/SettingsPrimitives.tsx
@@ -3,7 +3,7 @@ import {
ListBox,
Select
} from "@heroui/react";
-import { useCallback, type Key, type ReactNode } from "react";
+import { useCallback, type ReactNode } from "react";
import type { SelectOption } from "../lib/agentConfig";
@@ -34,9 +34,9 @@ export function SettingsSelectField({
onSelectionChange
}: SettingsSelectFieldProps) {
const handleSelectionChange = useCallback(
- (key: Key | null) => {
- if (key !== null) {
- onSelectionChange(String(key) as Value);
+ (value: string | number | null) => {
+ if (value !== null) {
+ onSelectionChange(String(value) as Value);
}
},
[onSelectionChange]
@@ -45,8 +45,8 @@ export function SettingsSelectField({
return (
{label}
diff --git a/src/lib/runtime.ts b/src/lib/runtime.ts
index 9f02a84..4fea792 100644
--- a/src/lib/runtime.ts
+++ b/src/lib/runtime.ts
@@ -3,7 +3,6 @@ import { listen, type UnlistenFn } from "@tauri-apps/api/event";
import type {
AgentEventPayload,
- CavemanStatus,
ChatContextItem,
ChatEventPayload,
ChatSession,
@@ -332,14 +331,6 @@ export async function stopChatSession(sessionId: string): Promise {
await invoke("stop_chat_session", { sessionId });
}
-export async function ensureCavemanSkill(): Promise {
- if (!isTauriRuntime()) {
- throw new Error("Chat sessions require the desktop runtime.");
- }
-
- return invoke("ensure_caveman_skill");
-}
-
export async function subscribeToAgentEvents(handlers: {
onLine: (line: string) => void;
onState: (payload: AgentEventPayload) => void;
diff --git a/src/screens/ChatScreen.tsx b/src/screens/ChatScreen.tsx
index b8eab03..59ccc15 100644
--- a/src/screens/ChatScreen.tsx
+++ b/src/screens/ChatScreen.tsx
@@ -27,7 +27,7 @@ import {
WarningCircle,
XmarkCircle
} from "iconoir-react";
-import { useCallback, useMemo, useState, type Key } from "react";
+import { useCallback, useMemo, useState } from "react";
import { DiffPreview } from "../components/DiffPreview";
import {
@@ -177,7 +177,6 @@ export function ChatScreen({
const canSend = Boolean(
activeSession &&
activeDraft.trim() &&
- cavemanReady &&
!activeSession.runtime.isBusy
);
@@ -594,9 +593,9 @@ function SelectField({
onChange: (value: Value) => void;
}) {
const handleSelectionChange = useCallback(
- (key: Key | null) => {
- if (key !== null) {
- onChange(String(key) as Value);
+ (value: string | number | null) => {
+ if (value !== null) {
+ onChange(String(value) as Value);
}
},
[onChange]
@@ -605,8 +604,8 @@ function SelectField({
return (
{label}
diff --git a/src/store/useChatStore.ts b/src/store/useChatStore.ts
index 2873ee6..8fff149 100644
--- a/src/store/useChatStore.ts
+++ b/src/store/useChatStore.ts
@@ -90,8 +90,8 @@ export const useChatStore = create((set) => ({
activeSessionId: null,
loadedSessions: {},
drafts: {},
- cavemanReady: false,
- cavemanMessage: "Caveman has not been verified yet.",
+ cavemanReady: true,
+ cavemanMessage: "Caveman mode is built into every topic.",
cavemanChecking: false,
setSessions: (sessions) =>
set((state) => {
diff --git a/src/styles.css b/src/styles.css
index 217df52..e91b4bb 100644
--- a/src/styles.css
+++ b/src/styles.css
@@ -1,6 +1,7 @@
-@import "tailwindcss";
+@import "tailwindcss" source(none);
@import "@heroui/styles";
-@source not "../.specforge";
+@source "../index.html";
+@source "../src";
:root {
color-scheme: dark;
diff --git a/src/types.ts b/src/types.ts
index 94b138b..f1e2ed0 100644
--- a/src/types.ts
+++ b/src/types.ts
@@ -192,8 +192,3 @@ export interface ChatEventPayload {
runtime: ChatRuntimeState | null;
summary: ChatSessionSummary | null;
}
-
-export interface CavemanStatus {
- ready: boolean;
- detail: string;
-}
From 8793145892d3da39b75ae9a20d0a2cff2d3e42ad Mon Sep 17 00:00:00 2001
From: matheusBBarni
Date: Sun, 12 Apr 2026 18:53:20 -0300
Subject: [PATCH 11/32] Fix model selection and workspace tree state resets
---
src-tauri/src/chat.rs | 11 +++++---
src-tauri/src/project.rs | 33 ++++++++++++++++-------
src/components/ControlColumn.tsx | 21 ++++++++-------
src/components/InspectorColumn.tsx | 38 ++++++++++++++++++++++++---
src/components/SettingsPrimitives.tsx | 12 ++++-----
src/hooks/useAppView.ts | 4 ++-
src/screens/ChatScreen.tsx | 12 ++++-----
7 files changed, 92 insertions(+), 39 deletions(-)
diff --git a/src-tauri/src/chat.rs b/src-tauri/src/chat.rs
index 9c637e1..8983798 100644
--- a/src-tauri/src/chat.rs
+++ b/src-tauri/src/chat.rs
@@ -11,7 +11,10 @@ use crate::{
ChatSessionSnapshot, ChatSessionSummary, ProjectSettings,
},
paths::resolve_relative_path_under_root,
- project::{build_default_project_settings, load_project_settings_from_workspace_root},
+ project::{
+ build_default_project_settings, load_project_settings_from_workspace_root,
+ normalize_project_model, normalize_project_reasoning,
+ },
state::{ChatExecutionRuntime, SharedState, WorkspaceContext},
};
use std::{
@@ -96,8 +99,10 @@ pub(crate) fn save_chat_session(
) -> Result {
let workspace = active_workspace_context(&state)?;
let mut snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
- snapshot.selected_model = selected_model.trim().to_string();
- snapshot.selected_reasoning = selected_reasoning.trim().to_string();
+ snapshot.selected_model =
+ normalize_project_model(&selected_model, &snapshot.selected_model)?;
+ snapshot.selected_reasoning =
+ normalize_project_reasoning(&selected_reasoning, &snapshot.selected_reasoning)?;
snapshot.autonomy_mode = normalize_autonomy_mode(&autonomy_mode);
snapshot.context_items = normalize_context_items(context_items);
snapshot.updated_at = current_timestamp();
diff --git a/src-tauri/src/project.rs b/src-tauri/src/project.rs
index ffe1d40..dfdf6d2 100644
--- a/src-tauri/src/project.rs
+++ b/src-tauri/src/project.rs
@@ -161,9 +161,9 @@ pub(crate) fn normalize_project_settings(
};
let selected_model =
- normalize_project_model(&provided.selected_model, &defaults.selected_model);
+ normalize_project_model(&provided.selected_model, &defaults.selected_model)?;
let selected_reasoning =
- normalize_project_reasoning(&provided.selected_reasoning, &defaults.selected_reasoning);
+ normalize_project_reasoning(&provided.selected_reasoning, &defaults.selected_reasoning)?;
let normalized_prd_path =
normalize_project_path_or_default(workspace_root, &provided.prd_path, &defaults.prd_path)?;
let normalized_spec_path = normalize_project_path_or_default(
@@ -265,7 +265,7 @@ pub(crate) fn load_project_settings_from_workspace_root(
read_project_settings(&settings_path, workspace_root, defaults)
}
-fn normalize_project_model(value: &str, fallback: &str) -> String {
+pub(crate) fn normalize_project_model(value: &str, fallback: &str) -> Result {
const VALID_MODELS: &[&str] = &[
"gpt-5.4",
"gpt-5.4-mini",
@@ -280,17 +280,30 @@ fn normalize_project_model(value: &str, fallback: &str) -> String {
"claude-3-5-haiku-20241022",
"claude-3-haiku-20240307",
];
+ let trimmed_value = value.trim();
- if VALID_MODELS.contains(&value.trim()) {
- return value.trim().to_string();
+ if trimmed_value.is_empty() {
+ return Ok(fallback.to_string());
+ }
+
+ if VALID_MODELS.contains(&trimmed_value) {
+ return Ok(trimmed_value.to_string());
}
- fallback.to_string()
+ Err(format!("Unsupported model `{trimmed_value}` in project settings."))
}
-fn normalize_project_reasoning(value: &str, fallback: &str) -> String {
- match value.trim() {
- "low" | "medium" | "high" | "max" => value.trim().to_string(),
- _ => fallback.to_string(),
+pub(crate) fn normalize_project_reasoning(value: &str, fallback: &str) -> Result {
+ let trimmed_value = value.trim();
+
+ if trimmed_value.is_empty() {
+ return Ok(fallback.to_string());
+ }
+
+ match trimmed_value {
+ "low" | "medium" | "high" | "max" => Ok(trimmed_value.to_string()),
+ _ => Err(format!(
+ "Unsupported reasoning profile `{trimmed_value}` in project settings."
+ )),
}
}
diff --git a/src/components/ControlColumn.tsx b/src/components/ControlColumn.tsx
index e19170f..3249f91 100644
--- a/src/components/ControlColumn.tsx
+++ b/src/components/ControlColumn.tsx
@@ -14,6 +14,7 @@ import {
useEffect,
useMemo,
useState,
+ type Key,
type ReactNode
} from "react";
@@ -190,9 +191,9 @@ function ModelSelectField({
}, [activeProviderTab, hasProviderTabs, singleConfiguredProvider]);
const handleSelectionChange = useCallback(
- (value: string | number | null) => {
- if (value !== null) {
- onSelectionChange(String(value) as ModelId);
+ (key: Key | null) => {
+ if (key !== null) {
+ onSelectionChange(String(key) as ModelId);
}
},
[onSelectionChange]
@@ -201,8 +202,8 @@ function ModelSelectField({
return (
{label}
@@ -295,9 +296,9 @@ function ControlSelectField({
onSelectionChange
}: ControlSelectFieldProps) {
const handleSelectionChange = useCallback(
- (value: string | number | null) => {
- if (value !== null) {
- onSelectionChange(String(value) as Value);
+ (key: Key | null) => {
+ if (key !== null) {
+ onSelectionChange(String(key) as Value);
}
},
[onSelectionChange]
@@ -306,8 +307,8 @@ function ControlSelectField({
return (
{label}
diff --git a/src/components/InspectorColumn.tsx b/src/components/InspectorColumn.tsx
index a69de88..7ec0005 100644
--- a/src/components/InspectorColumn.tsx
+++ b/src/components/InspectorColumn.tsx
@@ -4,7 +4,16 @@ import {
Folder,
Page
} from "iconoir-react";
-import { memo, useCallback, useEffect, useMemo, useState, type ChangeEvent, type RefObject } from "react";
+import {
+ memo,
+ useCallback,
+ useEffect,
+ useMemo,
+ useRef,
+ useState,
+ type ChangeEvent,
+ type RefObject
+} from "react";
import type { WorkspaceEntry } from "../types";
@@ -19,6 +28,7 @@ interface InspectorColumnProps {
hasWorkspaceEntries: boolean;
emptyStateMessage: string;
workspaceRootName: string;
+ workspaceRootPath: string;
workspaceNotice: string;
folderInputRef: RefObject;
onOpenFolder: () => void;
@@ -36,6 +46,7 @@ export const InspectorColumn = memo(function InspectorColumn({
hasWorkspaceEntries,
emptyStateMessage,
workspaceRootName,
+ workspaceRootPath,
workspaceNotice,
folderInputRef,
onOpenFolder,
@@ -49,10 +60,31 @@ export const InspectorColumn = memo(function InspectorColumn({
);
const [collapsedFolders, setCollapsedFolders] = useState(() => directoryPaths);
const collapsedFoldersLookup = useMemo(() => new Set(collapsedFolders), [collapsedFolders]);
+ const previousWorkspaceRootPathRef = useRef(workspaceRootPath);
useEffect(() => {
- setCollapsedFolders(directoryPaths);
- }, [directoryPaths, workspaceRootName]);
+ setCollapsedFolders((currentValue) => {
+ if (previousWorkspaceRootPathRef.current !== workspaceRootPath) {
+ previousWorkspaceRootPathRef.current = workspaceRootPath;
+ return directoryPaths;
+ }
+
+ const currentLookup = new Set(currentValue);
+ const nextValue = [...currentValue];
+ let hasNewDirectory = false;
+
+ for (const path of directoryPaths) {
+ if (currentLookup.has(path)) {
+ continue;
+ }
+
+ nextValue.push(path);
+ hasNewDirectory = true;
+ }
+
+ return hasNewDirectory ? nextValue : currentValue;
+ });
+ }, [directoryPaths, workspaceRootPath]);
const toggleFolder = useCallback((path: string) => {
setCollapsedFolders((currentValue) =>
diff --git a/src/components/SettingsPrimitives.tsx b/src/components/SettingsPrimitives.tsx
index 11963ea..ce1d8a6 100644
--- a/src/components/SettingsPrimitives.tsx
+++ b/src/components/SettingsPrimitives.tsx
@@ -3,7 +3,7 @@ import {
ListBox,
Select
} from "@heroui/react";
-import { useCallback, type ReactNode } from "react";
+import { useCallback, type Key, type ReactNode } from "react";
import type { SelectOption } from "../lib/agentConfig";
@@ -34,9 +34,9 @@ export function SettingsSelectField({
onSelectionChange
}: SettingsSelectFieldProps) {
const handleSelectionChange = useCallback(
- (value: string | number | null) => {
- if (value !== null) {
- onSelectionChange(String(value) as Value);
+ (key: Key | null) => {
+ if (key !== null) {
+ onSelectionChange(String(key) as Value);
}
},
[onSelectionChange]
@@ -45,8 +45,8 @@ export function SettingsSelectField({
return (
{label}
diff --git a/src/hooks/useAppView.ts b/src/hooks/useAppView.ts
index 2881d1e..5941a12 100644
--- a/src/hooks/useAppView.ts
+++ b/src/hooks/useAppView.ts
@@ -676,13 +676,15 @@ export function useAppScreenProps({
onOpenFolder: handlePickProjectFolder,
workspaceEntries: derivedState.filteredWorkspaceEntries,
workspaceNotice,
- workspaceRootName: projectRootName
+ workspaceRootName: projectRootName,
+ workspaceRootPath: projectRootPath
}),
[
derivedState,
folderInputRef,
handlePickProjectFolder,
projectRootName,
+ projectRootPath,
settingsState.workspaceEntries.length,
uiHandlers,
workspaceNotice
diff --git a/src/screens/ChatScreen.tsx b/src/screens/ChatScreen.tsx
index 59ccc15..9aef58b 100644
--- a/src/screens/ChatScreen.tsx
+++ b/src/screens/ChatScreen.tsx
@@ -27,7 +27,7 @@ import {
WarningCircle,
XmarkCircle
} from "iconoir-react";
-import { useCallback, useMemo, useState } from "react";
+import { useCallback, useMemo, useState, type Key } from "react";
import { DiffPreview } from "../components/DiffPreview";
import {
@@ -593,9 +593,9 @@ function SelectField({
onChange: (value: Value) => void;
}) {
const handleSelectionChange = useCallback(
- (value: string | number | null) => {
- if (value !== null) {
- onChange(String(value) as Value);
+ (key: Key | null) => {
+ if (key !== null) {
+ onChange(String(key) as Value);
}
},
[onChange]
@@ -604,8 +604,8 @@ function SelectField({
return (
{label}
From 58fe5b517be3bd0a987ba3cbd28649efd6f6f949 Mon Sep 17 00:00:00 2001
From: MatheusBBarni <29718530+MatheusBBarni@users.noreply.github.com>
Date: Tue, 14 Apr 2026 10:28:51 +0000
Subject: [PATCH 12/32] refactor: comprehensive project improvements
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- Security: Enable CSP, add prompt boundary markers
- Testing: Add vitest with 83 tests, Biome linter
- Architecture: Extract App.tsx into focused hooks (1423→821 lines),
split useAppView.ts into 4 files, split chat.rs into 5 submodules
- Rust: Replace string enums with SessionStatus/AutonomyMode/MessageRole,
deduplicate approval gate, batch disk writes, reduce cloning
- Performance: React.lazy code splitting, React.memo on screens,
optimize terminal buffer
- Accessibility: aria-labels on all interactive controls
- Config: private:true, explicit tailwindcss, meta description
- Docs: Update SPEC.md and AGENTS.md
---
AGENTS.md | 9 +-
biome.json | 50 +
bun.lock | 204 ++++
docs/SPEC.md | 1 +
index.html | 1 +
package.json | 14 +-
src-tauri/src/chat.rs | 1312 ----------------------
src-tauri/src/chat/commands.rs | 242 ++++
src-tauri/src/chat/execution.rs | 724 ++++++++++++
src-tauri/src/chat/helpers.rs | 168 +++
src-tauri/src/chat/mod.rs | 11 +
src-tauri/src/chat/persistence.rs | 125 +++
src-tauri/src/chat/prompt.rs | 126 +++
src-tauri/src/generation.rs | 4 +-
src-tauri/src/models.rs | 61 +-
src-tauri/tauri.conf.json | 2 +-
src/App.tsx | 900 +++------------
src/components/ControlColumn.tsx | 8 +-
src/components/DocumentPane.tsx | 5 +-
src/components/ExecutionPanel.tsx | 6 +-
src/components/InspectorColumn.tsx | 8 +-
src/components/MainWorkspace.tsx | 14 +-
src/components/ProjectAiSettingsCard.tsx | 4 +-
src/components/SettingsPrimitives.tsx | 2 +-
src/components/SettingsView.tsx | 15 +-
src/hooks/useAppLifecycle.ts | 4 +-
src/hooks/useAppScreenProps.ts | 314 ++++++
src/hooks/useAppUiHandlers.ts | 205 ++++
src/hooks/useAppView.ts | 631 +----------
src/hooks/useChatHandlers.ts | 277 +++++
src/hooks/useDocumentHandlers.ts | 313 ++++++
src/hooks/useProjectHandlers.ts | 324 ++++++
src/hooks/useProjectSettingsHandlers.ts | 110 ++
src/lib/agentConfig.test.ts | 191 ++++
src/lib/appState.test.ts | 221 ++++
src/lib/appState.ts | 16 +-
src/lib/projectConfig.test.ts | 211 ++++
src/lib/projectConfig.ts | 2 +-
src/lib/runtime.ts | 6 +-
src/screens/ChatScreen.tsx | 17 +-
src/screens/ConfigurationScreen.tsx | 5 +-
src/screens/PrdScreen.tsx | 6 +-
src/screens/SettingsScreen.tsx | 9 +-
src/store/useAgentStore.ts | 11 +-
src/store/useSettingsStore.ts | 69 +-
src/test/setup.ts | 1 +
tsconfig.json | 2 +-
vitest.config.ts | 11 +
48 files changed, 4164 insertions(+), 2808 deletions(-)
create mode 100644 biome.json
delete mode 100644 src-tauri/src/chat.rs
create mode 100644 src-tauri/src/chat/commands.rs
create mode 100644 src-tauri/src/chat/execution.rs
create mode 100644 src-tauri/src/chat/helpers.rs
create mode 100644 src-tauri/src/chat/mod.rs
create mode 100644 src-tauri/src/chat/persistence.rs
create mode 100644 src-tauri/src/chat/prompt.rs
create mode 100644 src/hooks/useAppScreenProps.ts
create mode 100644 src/hooks/useAppUiHandlers.ts
create mode 100644 src/hooks/useChatHandlers.ts
create mode 100644 src/hooks/useDocumentHandlers.ts
create mode 100644 src/hooks/useProjectHandlers.ts
create mode 100644 src/hooks/useProjectSettingsHandlers.ts
create mode 100644 src/lib/agentConfig.test.ts
create mode 100644 src/lib/appState.test.ts
create mode 100644 src/lib/projectConfig.test.ts
create mode 100644 src/test/setup.ts
create mode 100644 vitest.config.ts
diff --git a/AGENTS.md b/AGENTS.md
index 03cc9bd..4e3659e 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -3,6 +3,9 @@
## Commands
- `bun run dev` starts the Vite web shell on port 5173 for local UI work.
- `bun run build` runs `tsc && vite build` for the frontend bundle.
+- `bun run test` runs the Vitest test suite (unit and component tests).
+- `bun run lint` runs the Biome linter/formatter check against `src/`.
+- `bun run lint:fix` auto-fixes lint and format issues in `src/`.
- `bun run tauri dev` starts the desktop shell against the local Vite server.
- `bun run tauri build` packages the desktop app.
- `cargo check --manifest-path .\src-tauri\Cargo.toml` validates the Rust command layer.
@@ -19,7 +22,7 @@
- MUST keep `docs/PRD.md` and `docs/SPEC.md` aligned with shipped behavior when you change the review flow, model options, import flow, or autonomy modes.
- MUST run `cargo check --manifest-path .\src-tauri\Cargo.toml` after changing Rust commands or shared payload types.
- MUST run `bun run build` after changing routes, stores, document loading, or shared UI contracts. If Bun reports broken shims first, repair them with `bun install --force`.
-- MUST extract new frontend behavior out of `src/App.tsx` when possible; it is already the main orchestration shell.
+- MUST extract new frontend behavior out of `src/App.tsx` when possible; it is being refactored from a monolithic file into smaller components and route shells.
- MUST use context7 mcp server for all documentation lookups.
## Ask First
@@ -34,12 +37,12 @@
- NEVER commit secrets, auth tokens, or machine-local binary paths.
## Landmines
-- `src/App.tsx` is 1,071 lines and `src/styles.css` is 1,047 lines. Prefer targeted extractions over widening either file.
+- `src/App.tsx` is being refactored from a large monolithic file. Prefer targeted extractions into `src/components` and `src/lib` over widening it further. `src/styles.css` is similarly large; prefer Tailwind utilities over adding more custom CSS.
- `src-tauri/src/lib.rs` mixes environment scanning, workspace walking, diffing, document parsing, and simulated agent execution. Small changes are safer than broad rewrites.
- `git_get_diff()` returns a sample diff when the working tree is clean, and `FALLBACK_WORKSPACE` advertises files that may not exist yet. Keep demo behavior separate from real execution logic.
- `scan_workspace_folder()` and `filterWorkspaceFiles()` intentionally respect `.gitignore`; preserve that behavior when changing workspace discovery.
- `docs/SPEC.md` is partially aspirational today and references tooling that is not in `package.json` (`react-markdown`, `react-syntax-highlighter`, `tauri-plugin-store`). Update the docs when you normalize or implement those gaps.
-- There is no committed frontend formatter, linter, or automated test suite yet. MUST ask before introducing one mid-task.
+- Biome is configured as the project linter/formatter (`bun run lint`). Vitest is configured for testing (`bun run test`). A CI workflow validates typecheck, lint, and tests on push/PR.
## Patterns
- Put reusable frontend behavior in `src/lib`, long-lived client state in `src/store`, and view composition in `src/components` or route shells.
diff --git a/biome.json b/biome.json
new file mode 100644
index 0000000..d6496eb
--- /dev/null
+++ b/biome.json
@@ -0,0 +1,50 @@
+{
+ "$schema": "https://biomejs.dev/schemas/2.4.11/schema.json",
+ "assist": { "actions": { "source": { "organizeImports": "on" } } },
+ "linter": {
+ "enabled": true,
+ "rules": {
+ "recommended": true,
+ "a11y": {
+ "noLabelWithoutControl": "warn"
+ },
+ "complexity": {
+ "noExcessiveCognitiveComplexity": "warn"
+ },
+ "correctness": {
+ "noUnusedImports": "warn",
+ "noUnusedVariables": "warn",
+ "noUnusedFunctionParameters": "warn",
+ "useExhaustiveDependencies": "warn"
+ },
+ "style": {
+ "noNonNullAssertion": "off",
+ "useImportType": "off"
+ },
+ "suspicious": {
+ "noExplicitAny": "warn",
+ "noArrayIndexKey": "warn"
+ }
+ }
+ },
+ "formatter": {
+ "enabled": false
+ },
+ "css": {
+ "linter": {
+ "enabled": false
+ },
+ "parser": {
+ "cssModules": false
+ }
+ },
+ "files": {
+ "includes": [
+ "**",
+ "!**/dist/**",
+ "!**/node_modules/**",
+ "!**/src-tauri/target/**",
+ "!**/*.css"
+ ]
+ }
+}
diff --git a/bun.lock b/bun.lock
index df4969f..f2dd43a 100644
--- a/bun.lock
+++ b/bun.lock
@@ -16,18 +16,28 @@
"zustand": "^5.0.8",
},
"devDependencies": {
+ "@biomejs/biome": "^2.0.0",
"@tailwindcss/vite": "^4.1.0",
"@tauri-apps/cli": "^2.8.0",
+ "@testing-library/jest-dom": "^6.6.3",
+ "@testing-library/react": "^16.3.0",
"@types/node": "^24.0.0",
"@types/react": "^19.0.0",
"@types/react-dom": "^19.0.0",
"@vitejs/plugin-react": "^5.0.0",
+ "jsdom": "^26.1.0",
+ "tailwindcss": "^4.1.0",
"typescript": "^5.9.0",
"vite": "^7.0.0",
+ "vitest": "^3.2.1",
},
},
},
"packages": {
+ "@adobe/css-tools": ["@adobe/css-tools@4.4.4", "", {}, "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg=="],
+
+ "@asamuzakjp/css-color": ["@asamuzakjp/css-color@3.2.0", "", { "dependencies": { "@csstools/css-calc": "^2.1.3", "@csstools/css-color-parser": "^3.0.9", "@csstools/css-parser-algorithms": "^3.0.4", "@csstools/css-tokenizer": "^3.0.3", "lru-cache": "^10.4.3" } }, "sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw=="],
+
"@babel/code-frame": ["@babel/code-frame@7.29.0", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.28.5", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw=="],
"@babel/compat-data": ["@babel/compat-data@7.29.0", "", {}, "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg=="],
@@ -60,12 +70,42 @@
"@babel/plugin-transform-react-jsx-source": ["@babel/plugin-transform-react-jsx-source@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw=="],
+ "@babel/runtime": ["@babel/runtime@7.29.2", "", {}, "sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g=="],
+
"@babel/template": ["@babel/template@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/parser": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ=="],
"@babel/traverse": ["@babel/traverse@7.29.0", "", { "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.29.0", "@babel/template": "^7.28.6", "@babel/types": "^7.29.0", "debug": "^4.3.1" } }, "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA=="],
"@babel/types": ["@babel/types@7.29.0", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" } }, "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A=="],
+ "@biomejs/biome": ["@biomejs/biome@2.4.11", "", { "optionalDependencies": { "@biomejs/cli-darwin-arm64": "2.4.11", "@biomejs/cli-darwin-x64": "2.4.11", "@biomejs/cli-linux-arm64": "2.4.11", "@biomejs/cli-linux-arm64-musl": "2.4.11", "@biomejs/cli-linux-x64": "2.4.11", "@biomejs/cli-linux-x64-musl": "2.4.11", "@biomejs/cli-win32-arm64": "2.4.11", "@biomejs/cli-win32-x64": "2.4.11" }, "bin": { "biome": "bin/biome" } }, "sha512-nWxHX8tf3Opb/qRgZpBbsTOqOodkbrkJ7S+JxJAruxOReaDPPmPuLBAGQ8vigyUgo0QBB+oQltNEAvalLcjggA=="],
+
+ "@biomejs/cli-darwin-arm64": ["@biomejs/cli-darwin-arm64@2.4.11", "", { "os": "darwin", "cpu": "arm64" }, "sha512-wOt+ed+L2dgZanWyL6i29qlXMc088N11optzpo10peayObBaAshbTcxKUchzEMp9QSY8rh5h6VfAFE3WTS1rqg=="],
+
+ "@biomejs/cli-darwin-x64": ["@biomejs/cli-darwin-x64@2.4.11", "", { "os": "darwin", "cpu": "x64" }, "sha512-gZ6zR8XmZlExfi/Pz/PffmdpWOQ8Qhy7oBztgkR8/ylSRyLwfRPSadmiVCV8WQ8PoJ2MWUy2fgID9zmtgUUJmw=="],
+
+ "@biomejs/cli-linux-arm64": ["@biomejs/cli-linux-arm64@2.4.11", "", { "os": "linux", "cpu": "arm64" }, "sha512-avdJaEElXrKceK0va9FkJ4P5ci3N01TGkc6ni3P8l3BElqbOz42Wg2IyX3gbh0ZLEd4HVKEIrmuVu/AMuSeFFA=="],
+
+ "@biomejs/cli-linux-arm64-musl": ["@biomejs/cli-linux-arm64-musl@2.4.11", "", { "os": "linux", "cpu": "arm64" }, "sha512-+Sbo1OAmlegtdwqFE8iOxFIWLh1B3OEgsuZfBpyyN/kWuqZ8dx9ZEes6zVnDMo+zRHF2wLynRVhoQmV7ohxl2Q=="],
+
+ "@biomejs/cli-linux-x64": ["@biomejs/cli-linux-x64@2.4.11", "", { "os": "linux", "cpu": "x64" }, "sha512-TagWV0iomp5LnEnxWFg4nQO+e52Fow349vaX0Q/PIcX6Zhk4GGBgp3qqZ8PVkpC+cuehRctMf3+6+FgQ8jCEFQ=="],
+
+ "@biomejs/cli-linux-x64-musl": ["@biomejs/cli-linux-x64-musl@2.4.11", "", { "os": "linux", "cpu": "x64" }, "sha512-bexd2IklK7ZgPhrz6jXzpIL6dEAH9MlJU1xGTrypx+FICxrXUp4CqtwfiuoDKse+UlgAlWtzML3jrMqeEAHEhA=="],
+
+ "@biomejs/cli-win32-arm64": ["@biomejs/cli-win32-arm64@2.4.11", "", { "os": "win32", "cpu": "arm64" }, "sha512-RJhaTnY8byzxDt4bDVb7AFPHkPcjOPK3xBip4ZRTrN3TEfyhjLRm3r3mqknqydgVTB74XG8l4jMLwEACEeihVg=="],
+
+ "@biomejs/cli-win32-x64": ["@biomejs/cli-win32-x64@2.4.11", "", { "os": "win32", "cpu": "x64" }, "sha512-A8D3JM/00C2KQgUV3oj8Ba15EHEYwebAGCy5Sf9GAjr5Y3+kJIYOiESoqRDeuRZueuMdCsbLZIUqmPhpYXJE9A=="],
+
+ "@csstools/color-helpers": ["@csstools/color-helpers@5.1.0", "", {}, "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA=="],
+
+ "@csstools/css-calc": ["@csstools/css-calc@2.1.4", "", { "peerDependencies": { "@csstools/css-parser-algorithms": "^3.0.5", "@csstools/css-tokenizer": "^3.0.4" } }, "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ=="],
+
+ "@csstools/css-color-parser": ["@csstools/css-color-parser@3.1.0", "", { "dependencies": { "@csstools/color-helpers": "^5.1.0", "@csstools/css-calc": "^2.1.4" }, "peerDependencies": { "@csstools/css-parser-algorithms": "^3.0.5", "@csstools/css-tokenizer": "^3.0.4" } }, "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA=="],
+
+ "@csstools/css-parser-algorithms": ["@csstools/css-parser-algorithms@3.0.5", "", { "peerDependencies": { "@csstools/css-tokenizer": "^3.0.4" } }, "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ=="],
+
+ "@csstools/css-tokenizer": ["@csstools/css-tokenizer@3.0.4", "", {}, "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw=="],
+
"@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.7", "", { "os": "aix", "cpu": "ppc64" }, "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg=="],
"@esbuild/android-arm": ["@esbuild/android-arm@0.27.7", "", { "os": "android", "cpu": "arm" }, "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ=="],
@@ -492,6 +532,14 @@
"@tauri-apps/cli-win32-x64-msvc": ["@tauri-apps/cli-win32-x64-msvc@2.10.1", "", { "os": "win32", "cpu": "x64" }, "sha512-6Cn7YpPFwzChy0ERz6djKEmUehWrYlM+xTaNzGPgZocw3BD7OfwfWHKVWxXzdjEW2KfKkHddfdxK1XXTYqBRLg=="],
+ "@testing-library/dom": ["@testing-library/dom@10.4.1", "", { "dependencies": { "@babel/code-frame": "^7.10.4", "@babel/runtime": "^7.12.5", "@types/aria-query": "^5.0.1", "aria-query": "5.3.0", "dom-accessibility-api": "^0.5.9", "lz-string": "^1.5.0", "picocolors": "1.1.1", "pretty-format": "^27.0.2" } }, "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg=="],
+
+ "@testing-library/jest-dom": ["@testing-library/jest-dom@6.9.1", "", { "dependencies": { "@adobe/css-tools": "^4.4.0", "aria-query": "^5.0.0", "css.escape": "^1.5.1", "dom-accessibility-api": "^0.6.3", "picocolors": "^1.1.1", "redent": "^3.0.0" } }, "sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA=="],
+
+ "@testing-library/react": ["@testing-library/react@16.3.2", "", { "dependencies": { "@babel/runtime": "^7.12.5" }, "peerDependencies": { "@testing-library/dom": "^10.0.0", "@types/react": "^18.0.0 || ^19.0.0", "@types/react-dom": "^18.0.0 || ^19.0.0", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-XU5/SytQM+ykqMnAnvB2umaJNIOsLF3PVv//1Ew4CTcpz0/BRyy/af40qqrt7SjKpDdT1saBMc42CUok5gaw+g=="],
+
+ "@types/aria-query": ["@types/aria-query@5.0.4", "", {}, "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw=="],
+
"@types/babel__core": ["@types/babel__core@7.20.5", "", { "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", "@types/babel__generator": "*", "@types/babel__template": "*", "@types/babel__traverse": "*" } }, "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA=="],
"@types/babel__generator": ["@types/babel__generator@7.27.0", "", { "dependencies": { "@babel/types": "^7.0.0" } }, "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg=="],
@@ -500,6 +548,10 @@
"@types/babel__traverse": ["@types/babel__traverse@7.28.0", "", { "dependencies": { "@babel/types": "^7.28.2" } }, "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q=="],
+ "@types/chai": ["@types/chai@5.2.3", "", { "dependencies": { "@types/deep-eql": "*", "assertion-error": "^2.0.1" } }, "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA=="],
+
+ "@types/deep-eql": ["@types/deep-eql@4.0.2", "", {}, "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw=="],
+
"@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="],
"@types/node": ["@types/node@24.12.2", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-A1sre26ke7HDIuY/M23nd9gfB+nrmhtYyMINbjI1zHJxYteKR6qSMX56FsmjMcDb3SMcjJg5BiRRgOCC/yBD0g=="],
@@ -510,12 +562,42 @@
"@vitejs/plugin-react": ["@vitejs/plugin-react@5.2.0", "", { "dependencies": { "@babel/core": "^7.29.0", "@babel/plugin-transform-react-jsx-self": "^7.27.1", "@babel/plugin-transform-react-jsx-source": "^7.27.1", "@rolldown/pluginutils": "1.0.0-rc.3", "@types/babel__core": "^7.20.5", "react-refresh": "^0.18.0" }, "peerDependencies": { "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0" } }, "sha512-YmKkfhOAi3wsB1PhJq5Scj3GXMn3WvtQ/JC0xoopuHoXSdmtdStOpFrYaT1kie2YgFBcIe64ROzMYRjCrYOdYw=="],
+ "@vitest/expect": ["@vitest/expect@3.2.4", "", { "dependencies": { "@types/chai": "^5.2.2", "@vitest/spy": "3.2.4", "@vitest/utils": "3.2.4", "chai": "^5.2.0", "tinyrainbow": "^2.0.0" } }, "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig=="],
+
+ "@vitest/mocker": ["@vitest/mocker@3.2.4", "", { "dependencies": { "@vitest/spy": "3.2.4", "estree-walker": "^3.0.3", "magic-string": "^0.30.17" }, "peerDependencies": { "msw": "^2.4.9", "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" }, "optionalPeers": ["msw", "vite"] }, "sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ=="],
+
+ "@vitest/pretty-format": ["@vitest/pretty-format@3.2.4", "", { "dependencies": { "tinyrainbow": "^2.0.0" } }, "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA=="],
+
+ "@vitest/runner": ["@vitest/runner@3.2.4", "", { "dependencies": { "@vitest/utils": "3.2.4", "pathe": "^2.0.3", "strip-literal": "^3.0.0" } }, "sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ=="],
+
+ "@vitest/snapshot": ["@vitest/snapshot@3.2.4", "", { "dependencies": { "@vitest/pretty-format": "3.2.4", "magic-string": "^0.30.17", "pathe": "^2.0.3" } }, "sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ=="],
+
+ "@vitest/spy": ["@vitest/spy@3.2.4", "", { "dependencies": { "tinyspy": "^4.0.3" } }, "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw=="],
+
+ "@vitest/utils": ["@vitest/utils@3.2.4", "", { "dependencies": { "@vitest/pretty-format": "3.2.4", "loupe": "^3.1.4", "tinyrainbow": "^2.0.0" } }, "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA=="],
+
+ "agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="],
+
+ "ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
+
+ "ansi-styles": ["ansi-styles@5.2.0", "", {}, "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA=="],
+
+ "aria-query": ["aria-query@5.3.2", "", {}, "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw=="],
+
+ "assertion-error": ["assertion-error@2.0.1", "", {}, "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA=="],
+
"baseline-browser-mapping": ["baseline-browser-mapping@2.10.17", "", { "bin": { "baseline-browser-mapping": "dist/cli.cjs" } }, "sha512-HdrkN8eVG2CXxeifv/VdJ4A4RSra1DTW8dc/hdxzhGHN8QePs6gKaWM9pHPcpCoxYZJuOZ8drHmbdpLHjCYjLA=="],
"browserslist": ["browserslist@4.28.2", "", { "dependencies": { "baseline-browser-mapping": "^2.10.12", "caniuse-lite": "^1.0.30001782", "electron-to-chromium": "^1.5.328", "node-releases": "^2.0.36", "update-browserslist-db": "^1.2.3" }, "bin": { "browserslist": "cli.js" } }, "sha512-48xSriZYYg+8qXna9kwqjIVzuQxi+KYWp2+5nCYnYKPTr0LvD89Jqk2Or5ogxz0NUMfIjhh2lIUX/LyX9B4oIg=="],
+ "cac": ["cac@6.7.14", "", {}, "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ=="],
+
"caniuse-lite": ["caniuse-lite@1.0.30001787", "", {}, "sha512-mNcrMN9KeI68u7muanUpEejSLghOKlVhRqS/Za2IeyGllJ9I9otGpR9g3nsw7n4W378TE/LyIteA0+/FOZm4Kg=="],
+ "chai": ["chai@5.3.3", "", { "dependencies": { "assertion-error": "^2.0.1", "check-error": "^2.1.1", "deep-eql": "^5.0.1", "loupe": "^3.1.0", "pathval": "^2.0.0" } }, "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw=="],
+
+ "check-error": ["check-error@2.1.3", "", {}, "sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA=="],
+
"client-only": ["client-only@0.0.1", "", {}, "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA=="],
"clsx": ["clsx@2.1.1", "", {}, "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA=="],
@@ -524,22 +606,42 @@
"cookie": ["cookie@1.1.1", "", {}, "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ=="],
+ "css.escape": ["css.escape@1.5.1", "", {}, "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg=="],
+
+ "cssstyle": ["cssstyle@4.6.0", "", { "dependencies": { "@asamuzakjp/css-color": "^3.2.0", "rrweb-cssom": "^0.8.0" } }, "sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg=="],
+
"csstype": ["csstype@3.2.3", "", {}, "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ=="],
+ "data-urls": ["data-urls@5.0.0", "", { "dependencies": { "whatwg-mimetype": "^4.0.0", "whatwg-url": "^14.0.0" } }, "sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg=="],
+
"debug": ["debug@4.4.3", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA=="],
"decimal.js": ["decimal.js@10.6.0", "", {}, "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg=="],
+ "deep-eql": ["deep-eql@5.0.2", "", {}, "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q=="],
+
+ "dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="],
+
"detect-libc": ["detect-libc@2.1.2", "", {}, "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ=="],
+ "dom-accessibility-api": ["dom-accessibility-api@0.6.3", "", {}, "sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w=="],
+
"electron-to-chromium": ["electron-to-chromium@1.5.334", "", {}, "sha512-mgjZAz7Jyx1SRCwEpy9wefDS7GvNPazLthHg8eQMJ76wBdGQQDW33TCrUTvQ4wzpmOrv2zrFoD3oNufMdyMpog=="],
"enhanced-resolve": ["enhanced-resolve@5.20.1", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.3.0" } }, "sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA=="],
+ "entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="],
+
+ "es-module-lexer": ["es-module-lexer@1.7.0", "", {}, "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA=="],
+
"esbuild": ["esbuild@0.27.7", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.7", "@esbuild/android-arm": "0.27.7", "@esbuild/android-arm64": "0.27.7", "@esbuild/android-x64": "0.27.7", "@esbuild/darwin-arm64": "0.27.7", "@esbuild/darwin-x64": "0.27.7", "@esbuild/freebsd-arm64": "0.27.7", "@esbuild/freebsd-x64": "0.27.7", "@esbuild/linux-arm": "0.27.7", "@esbuild/linux-arm64": "0.27.7", "@esbuild/linux-ia32": "0.27.7", "@esbuild/linux-loong64": "0.27.7", "@esbuild/linux-mips64el": "0.27.7", "@esbuild/linux-ppc64": "0.27.7", "@esbuild/linux-riscv64": "0.27.7", "@esbuild/linux-s390x": "0.27.7", "@esbuild/linux-x64": "0.27.7", "@esbuild/netbsd-arm64": "0.27.7", "@esbuild/netbsd-x64": "0.27.7", "@esbuild/openbsd-arm64": "0.27.7", "@esbuild/openbsd-x64": "0.27.7", "@esbuild/openharmony-arm64": "0.27.7", "@esbuild/sunos-x64": "0.27.7", "@esbuild/win32-arm64": "0.27.7", "@esbuild/win32-ia32": "0.27.7", "@esbuild/win32-x64": "0.27.7" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w=="],
"escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="],
+ "estree-walker": ["estree-walker@3.0.3", "", { "dependencies": { "@types/estree": "^1.0.0" } }, "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g=="],
+
+ "expect-type": ["expect-type@1.3.0", "", {}, "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA=="],
+
"fdir": ["fdir@6.5.0", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg=="],
"fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="],
@@ -548,18 +650,32 @@
"graceful-fs": ["graceful-fs@4.2.11", "", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="],
+ "html-encoding-sniffer": ["html-encoding-sniffer@4.0.0", "", { "dependencies": { "whatwg-encoding": "^3.1.1" } }, "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ=="],
+
+ "http-proxy-agent": ["http-proxy-agent@7.0.2", "", { "dependencies": { "agent-base": "^7.1.0", "debug": "^4.3.4" } }, "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig=="],
+
+ "https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="],
+
"iconoir-react": ["iconoir-react@7.11.0", "", { "peerDependencies": { "react": "18 || 19" } }, "sha512-uvTKtnHYwbbTsmQ6HCcliYd50WK0GbjP497RwdISxKzfS01x4cK1Mn/F2mT/t2roSaJQ0I+KnHxMcyvmNMXWsQ=="],
+ "iconv-lite": ["iconv-lite@0.6.3", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw=="],
+
"ignore": ["ignore@7.0.5", "", {}, "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg=="],
+ "indent-string": ["indent-string@4.0.0", "", {}, "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg=="],
+
"input-otp": ["input-otp@1.4.2", "", { "peerDependencies": { "react": "^16.8 || ^17.0 || ^18.0 || ^19.0.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0.0 || ^19.0.0-rc" } }, "sha512-l3jWwYNvrEa6NTCt7BECfCm48GvwuZzkoeG3gBL2w4CHeOXW3eKFmf9UNYkNfYc3mxMrthMnxjIE07MT0zLBQA=="],
"intl-messageformat": ["intl-messageformat@10.7.18", "", { "dependencies": { "@formatjs/ecma402-abstract": "2.3.6", "@formatjs/fast-memoize": "2.2.7", "@formatjs/icu-messageformat-parser": "2.11.4", "tslib": "^2.8.0" } }, "sha512-m3Ofv/X/tV8Y3tHXLohcuVuhWKo7BBq62cqY15etqmLxg2DZ34AGGgQDeR+SCta2+zICb1NX83af0GJmbQ1++g=="],
+ "is-potential-custom-element-name": ["is-potential-custom-element-name@1.0.1", "", {}, "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ=="],
+
"jiti": ["jiti@2.6.1", "", { "bin": { "jiti": "lib/jiti-cli.mjs" } }, "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ=="],
"js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="],
+ "jsdom": ["jsdom@26.1.0", "", { "dependencies": { "cssstyle": "^4.2.1", "data-urls": "^5.0.0", "decimal.js": "^10.5.0", "html-encoding-sniffer": "^4.0.0", "http-proxy-agent": "^7.0.2", "https-proxy-agent": "^7.0.6", "is-potential-custom-element-name": "^1.0.1", "nwsapi": "^2.2.16", "parse5": "^7.2.1", "rrweb-cssom": "^0.8.0", "saxes": "^6.0.0", "symbol-tree": "^3.2.4", "tough-cookie": "^5.1.1", "w3c-xmlserializer": "^5.0.0", "webidl-conversions": "^7.0.0", "whatwg-encoding": "^3.1.1", "whatwg-mimetype": "^4.0.0", "whatwg-url": "^14.1.1", "ws": "^8.18.0", "xml-name-validator": "^5.0.0" }, "peerDependencies": { "canvas": "^3.0.0" }, "optionalPeers": ["canvas"] }, "sha512-Cvc9WUhxSMEo4McES3P7oK3QaXldCfNWp7pl2NNeiIFlCoLr3kfq9kb1fxftiwk1FLV7CvpvDfonxtzUDeSOPg=="],
+
"jsesc": ["jsesc@3.1.0", "", { "bin": { "jsesc": "bin/jsesc" } }, "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA=="],
"json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="],
@@ -588,22 +704,40 @@
"lightningcss-win32-x64-msvc": ["lightningcss-win32-x64-msvc@1.32.0", "", { "os": "win32", "cpu": "x64" }, "sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q=="],
+ "loupe": ["loupe@3.2.1", "", {}, "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ=="],
+
"lru-cache": ["lru-cache@5.1.1", "", { "dependencies": { "yallist": "^3.0.2" } }, "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w=="],
+ "lz-string": ["lz-string@1.5.0", "", { "bin": { "lz-string": "bin/bin.js" } }, "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ=="],
+
"magic-string": ["magic-string@0.30.21", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.5" } }, "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ=="],
+ "min-indent": ["min-indent@1.0.1", "", {}, "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg=="],
+
"ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="],
"nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="],
"node-releases": ["node-releases@2.0.37", "", {}, "sha512-1h5gKZCF+pO/o3Iqt5Jp7wc9rH3eJJ0+nh/CIoiRwjRxde/hAHyLPXYN4V3CqKAbiZPSeJFSWHmJsbkicta0Eg=="],
+ "nwsapi": ["nwsapi@2.2.23", "", {}, "sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ=="],
+
+ "parse5": ["parse5@7.3.0", "", { "dependencies": { "entities": "^6.0.0" } }, "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw=="],
+
+ "pathe": ["pathe@2.0.3", "", {}, "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w=="],
+
+ "pathval": ["pathval@2.0.1", "", {}, "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ=="],
+
"picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="],
"picomatch": ["picomatch@4.0.4", "", {}, "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A=="],
"postcss": ["postcss@8.5.9", "", { "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-7a70Nsot+EMX9fFU3064K/kdHWZqGVY+BADLyXc8Dfv+mTLLVl6JzJpPaCZ2kQL9gIJvKXSLMHhqdRRjwQeFtw=="],
+ "pretty-format": ["pretty-format@27.5.1", "", { "dependencies": { "ansi-regex": "^5.0.1", "ansi-styles": "^5.0.0", "react-is": "^17.0.1" } }, "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ=="],
+
+ "punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="],
+
"react": ["react@19.2.5", "", {}, "sha512-llUJLzz1zTUBrskt2pwZgLq59AemifIftw4aB7JxOqf1HY2FDaGDxgwpAPVzHU1kdWabH7FauP4i1oEeer2WCA=="],
"react-aria": ["react-aria@3.47.0", "", { "dependencies": { "@internationalized/string": "^3.2.7", "@react-aria/breadcrumbs": "^3.5.32", "@react-aria/button": "^3.14.5", "@react-aria/calendar": "^3.9.5", "@react-aria/checkbox": "^3.16.5", "@react-aria/color": "^3.1.5", "@react-aria/combobox": "^3.15.0", "@react-aria/datepicker": "^3.16.1", "@react-aria/dialog": "^3.5.34", "@react-aria/disclosure": "^3.1.3", "@react-aria/dnd": "^3.11.6", "@react-aria/focus": "^3.21.5", "@react-aria/gridlist": "^3.14.4", "@react-aria/i18n": "^3.12.16", "@react-aria/interactions": "^3.27.1", "@react-aria/label": "^3.7.25", "@react-aria/landmark": "^3.0.10", "@react-aria/link": "^3.8.9", "@react-aria/listbox": "^3.15.3", "@react-aria/menu": "^3.21.0", "@react-aria/meter": "^3.4.30", "@react-aria/numberfield": "^3.12.5", "@react-aria/overlays": "^3.31.2", "@react-aria/progress": "^3.4.30", "@react-aria/radio": "^3.12.5", "@react-aria/searchfield": "^3.8.12", "@react-aria/select": "^3.17.3", "@react-aria/selection": "^3.27.2", "@react-aria/separator": "^3.4.16", "@react-aria/slider": "^3.8.5", "@react-aria/ssr": "^3.9.10", "@react-aria/switch": "^3.7.11", "@react-aria/table": "^3.17.11", "@react-aria/tabs": "^3.11.1", "@react-aria/tag": "^3.8.1", "@react-aria/textfield": "^3.18.5", "@react-aria/toast": "^3.0.11", "@react-aria/tooltip": "^3.9.2", "@react-aria/tree": "^3.1.7", "@react-aria/utils": "^3.33.1", "@react-aria/visually-hidden": "^3.8.31", "@react-types/shared": "^3.33.1" }, "peerDependencies": { "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1", "react-dom": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1" } }, "sha512-nvahimIqdByl/PXk/xPkG30LPRzcin+/Uk0uFfwbbKRRFC9aa22a6BRULZLqVHwa9GaNyKe6CDUxO1Dde4v0kA=="],
@@ -612,6 +746,8 @@
"react-dom": ["react-dom@19.2.5", "", { "dependencies": { "scheduler": "^0.27.0" }, "peerDependencies": { "react": "^19.2.5" } }, "sha512-J5bAZz+DXMMwW/wV3xzKke59Af6CHY7G4uYLN1OvBcKEsWOs4pQExj86BBKamxl/Ik5bx9whOrvBlSDfWzgSag=="],
+ "react-is": ["react-is@17.0.2", "", {}, "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w=="],
+
"react-refresh": ["react-refresh@0.18.0", "", {}, "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw=="],
"react-router": ["react-router@7.14.0", "", { "dependencies": { "cookie": "^1.0.1", "set-cookie-parser": "^2.6.0" }, "peerDependencies": { "react": ">=18", "react-dom": ">=18" }, "optionalPeers": ["react-dom"] }, "sha512-m/xR9N4LQLmAS0ZhkY2nkPA1N7gQ5TUVa5n8TgANuDTARbn1gt+zLPXEm7W0XDTbrQ2AJSJKhoa6yx1D8BcpxQ=="],
@@ -620,16 +756,36 @@
"react-stately": ["react-stately@3.45.0", "", { "dependencies": { "@react-stately/calendar": "^3.9.3", "@react-stately/checkbox": "^3.7.5", "@react-stately/collections": "^3.12.10", "@react-stately/color": "^3.9.5", "@react-stately/combobox": "^3.13.0", "@react-stately/data": "^3.15.2", "@react-stately/datepicker": "^3.16.1", "@react-stately/disclosure": "^3.0.11", "@react-stately/dnd": "^3.7.4", "@react-stately/form": "^3.2.4", "@react-stately/list": "^3.13.4", "@react-stately/menu": "^3.9.11", "@react-stately/numberfield": "^3.11.0", "@react-stately/overlays": "^3.6.23", "@react-stately/radio": "^3.11.5", "@react-stately/searchfield": "^3.5.19", "@react-stately/select": "^3.9.2", "@react-stately/selection": "^3.20.9", "@react-stately/slider": "^3.7.5", "@react-stately/table": "^3.15.4", "@react-stately/tabs": "^3.8.9", "@react-stately/toast": "^3.1.3", "@react-stately/toggle": "^3.9.5", "@react-stately/tooltip": "^3.5.11", "@react-stately/tree": "^3.9.6", "@react-types/shared": "^3.33.1" }, "peerDependencies": { "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1" } }, "sha512-G3bYr0BIiookpt4H05VeZUuVS/FslQAj2TeT8vDfCiL314Y+LtPXIPe/a3eamCA0wljy7z1EDYKV50Qbz7pcJg=="],
+ "redent": ["redent@3.0.0", "", { "dependencies": { "indent-string": "^4.0.0", "strip-indent": "^3.0.0" } }, "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg=="],
+
"rollup": ["rollup@4.60.1", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.60.1", "@rollup/rollup-android-arm64": "4.60.1", "@rollup/rollup-darwin-arm64": "4.60.1", "@rollup/rollup-darwin-x64": "4.60.1", "@rollup/rollup-freebsd-arm64": "4.60.1", "@rollup/rollup-freebsd-x64": "4.60.1", "@rollup/rollup-linux-arm-gnueabihf": "4.60.1", "@rollup/rollup-linux-arm-musleabihf": "4.60.1", "@rollup/rollup-linux-arm64-gnu": "4.60.1", "@rollup/rollup-linux-arm64-musl": "4.60.1", "@rollup/rollup-linux-loong64-gnu": "4.60.1", "@rollup/rollup-linux-loong64-musl": "4.60.1", "@rollup/rollup-linux-ppc64-gnu": "4.60.1", "@rollup/rollup-linux-ppc64-musl": "4.60.1", "@rollup/rollup-linux-riscv64-gnu": "4.60.1", "@rollup/rollup-linux-riscv64-musl": "4.60.1", "@rollup/rollup-linux-s390x-gnu": "4.60.1", "@rollup/rollup-linux-x64-gnu": "4.60.1", "@rollup/rollup-linux-x64-musl": "4.60.1", "@rollup/rollup-openbsd-x64": "4.60.1", "@rollup/rollup-openharmony-arm64": "4.60.1", "@rollup/rollup-win32-arm64-msvc": "4.60.1", "@rollup/rollup-win32-ia32-msvc": "4.60.1", "@rollup/rollup-win32-x64-gnu": "4.60.1", "@rollup/rollup-win32-x64-msvc": "4.60.1", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w=="],
+ "rrweb-cssom": ["rrweb-cssom@0.8.0", "", {}, "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw=="],
+
+ "safer-buffer": ["safer-buffer@2.1.2", "", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="],
+
+ "saxes": ["saxes@6.0.0", "", { "dependencies": { "xmlchars": "^2.2.0" } }, "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA=="],
+
"scheduler": ["scheduler@0.27.0", "", {}, "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q=="],
"semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="],
"set-cookie-parser": ["set-cookie-parser@2.7.2", "", {}, "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw=="],
+ "siginfo": ["siginfo@2.0.0", "", {}, "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g=="],
+
"source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="],
+ "stackback": ["stackback@0.0.2", "", {}, "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw=="],
+
+ "std-env": ["std-env@3.10.0", "", {}, "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg=="],
+
+ "strip-indent": ["strip-indent@3.0.0", "", { "dependencies": { "min-indent": "^1.0.0" } }, "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ=="],
+
+ "strip-literal": ["strip-literal@3.1.0", "", { "dependencies": { "js-tokens": "^9.0.1" } }, "sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg=="],
+
+ "symbol-tree": ["symbol-tree@3.2.4", "", {}, "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw=="],
+
"tailwind-merge": ["tailwind-merge@3.4.0", "", {}, "sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g=="],
"tailwind-variants": ["tailwind-variants@3.2.2", "", { "peerDependencies": { "tailwind-merge": ">=3.0.0", "tailwindcss": "*" }, "optionalPeers": ["tailwind-merge"] }, "sha512-Mi4kHeMTLvKlM98XPnK+7HoBPmf4gygdFmqQPaDivc3DpYS6aIY6KiG/PgThrGvii5YZJqRsPz0aPyhoFzmZgg=="],
@@ -638,8 +794,26 @@
"tapable": ["tapable@2.3.2", "", {}, "sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA=="],
+ "tinybench": ["tinybench@2.9.0", "", {}, "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg=="],
+
+ "tinyexec": ["tinyexec@0.3.2", "", {}, "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA=="],
+
"tinyglobby": ["tinyglobby@0.2.16", "", { "dependencies": { "fdir": "^6.5.0", "picomatch": "^4.0.4" } }, "sha512-pn99VhoACYR8nFHhxqix+uvsbXineAasWm5ojXoN8xEwK5Kd3/TrhNn1wByuD52UxWRLy8pu+kRMniEi6Eq9Zg=="],
+ "tinypool": ["tinypool@1.1.1", "", {}, "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg=="],
+
+ "tinyrainbow": ["tinyrainbow@2.0.0", "", {}, "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw=="],
+
+ "tinyspy": ["tinyspy@4.0.4", "", {}, "sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q=="],
+
+ "tldts": ["tldts@6.1.86", "", { "dependencies": { "tldts-core": "^6.1.86" }, "bin": { "tldts": "bin/cli.js" } }, "sha512-WMi/OQ2axVTf/ykqCQgXiIct+mSQDFdH2fkwhPwgEwvJ1kSzZRiinb0zF2Xb8u4+OqPChmyI6MEu4EezNJz+FQ=="],
+
+ "tldts-core": ["tldts-core@6.1.86", "", {}, "sha512-Je6p7pkk+KMzMv2XXKmAE3McmolOQFdxkKw0R8EYNr7sELW46JqnNeTX8ybPiQgvg1ymCoF8LXs5fzFaZvJPTA=="],
+
+ "tough-cookie": ["tough-cookie@5.1.2", "", { "dependencies": { "tldts": "^6.1.32" } }, "sha512-FVDYdxtnj0G6Qm/DhNPSb8Ju59ULcup3tuJxkFb5K8Bv2pUXILbf0xZWU8PX8Ov19OXljbUyveOFwRMwkXzO+A=="],
+
+ "tr46": ["tr46@5.1.1", "", { "dependencies": { "punycode": "^2.3.1" } }, "sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw=="],
+
"tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
"tw-animate-css": ["tw-animate-css@1.4.0", "", {}, "sha512-7bziOlRqH0hJx80h/3mbicLW7o8qLsH5+RaLR2t+OHM3D0JlWGODQKQ4cxbK7WlvmUxpcj6Kgu6EKqjrGFe3QQ=="],
@@ -654,10 +828,34 @@
"vite": ["vite@7.3.2", "", { "dependencies": { "esbuild": "^0.27.0", "fdir": "^6.5.0", "picomatch": "^4.0.3", "postcss": "^8.5.6", "rollup": "^4.43.0", "tinyglobby": "^0.2.15" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "peerDependencies": { "@types/node": "^20.19.0 || >=22.12.0", "jiti": ">=1.21.0", "less": "^4.0.0", "lightningcss": "^1.21.0", "sass": "^1.70.0", "sass-embedded": "^1.70.0", "stylus": ">=0.54.8", "sugarss": "^5.0.0", "terser": "^5.16.0", "tsx": "^4.8.1", "yaml": "^2.4.2" }, "optionalPeers": ["@types/node", "jiti", "less", "lightningcss", "sass", "sass-embedded", "stylus", "sugarss", "terser", "tsx", "yaml"], "bin": { "vite": "bin/vite.js" } }, "sha512-Bby3NOsna2jsjfLVOHKes8sGwgl4TT0E6vvpYgnAYDIF/tie7MRaFthmKuHx1NSXjiTueXH3do80FMQgvEktRg=="],
+ "vite-node": ["vite-node@3.2.4", "", { "dependencies": { "cac": "^6.7.14", "debug": "^4.4.1", "es-module-lexer": "^1.7.0", "pathe": "^2.0.3", "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" }, "bin": { "vite-node": "vite-node.mjs" } }, "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg=="],
+
+ "vitest": ["vitest@3.2.4", "", { "dependencies": { "@types/chai": "^5.2.2", "@vitest/expect": "3.2.4", "@vitest/mocker": "3.2.4", "@vitest/pretty-format": "^3.2.4", "@vitest/runner": "3.2.4", "@vitest/snapshot": "3.2.4", "@vitest/spy": "3.2.4", "@vitest/utils": "3.2.4", "chai": "^5.2.0", "debug": "^4.4.1", "expect-type": "^1.2.1", "magic-string": "^0.30.17", "pathe": "^2.0.3", "picomatch": "^4.0.2", "std-env": "^3.9.0", "tinybench": "^2.9.0", "tinyexec": "^0.3.2", "tinyglobby": "^0.2.14", "tinypool": "^1.1.1", "tinyrainbow": "^2.0.0", "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", "vite-node": "3.2.4", "why-is-node-running": "^2.3.0" }, "peerDependencies": { "@edge-runtime/vm": "*", "@types/debug": "^4.1.12", "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", "@vitest/browser": "3.2.4", "@vitest/ui": "3.2.4", "happy-dom": "*", "jsdom": "*" }, "optionalPeers": ["@edge-runtime/vm", "@types/debug", "@types/node", "@vitest/browser", "@vitest/ui", "happy-dom", "jsdom"], "bin": { "vitest": "vitest.mjs" } }, "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A=="],
+
+ "w3c-xmlserializer": ["w3c-xmlserializer@5.0.0", "", { "dependencies": { "xml-name-validator": "^5.0.0" } }, "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA=="],
+
+ "webidl-conversions": ["webidl-conversions@7.0.0", "", {}, "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g=="],
+
+ "whatwg-encoding": ["whatwg-encoding@3.1.1", "", { "dependencies": { "iconv-lite": "0.6.3" } }, "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ=="],
+
+ "whatwg-mimetype": ["whatwg-mimetype@4.0.0", "", {}, "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg=="],
+
+ "whatwg-url": ["whatwg-url@14.2.0", "", { "dependencies": { "tr46": "^5.1.0", "webidl-conversions": "^7.0.0" } }, "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw=="],
+
+ "why-is-node-running": ["why-is-node-running@2.3.0", "", { "dependencies": { "siginfo": "^2.0.0", "stackback": "0.0.2" }, "bin": { "why-is-node-running": "cli.js" } }, "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w=="],
+
+ "ws": ["ws@8.20.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="],
+
+ "xml-name-validator": ["xml-name-validator@5.0.0", "", {}, "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg=="],
+
+ "xmlchars": ["xmlchars@2.2.0", "", {}, "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw=="],
+
"yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="],
"zustand": ["zustand@5.0.12", "", { "peerDependencies": { "@types/react": ">=18.0.0", "immer": ">=9.0.6", "react": ">=18.0.0", "use-sync-external-store": ">=1.2.0" }, "optionalPeers": ["@types/react", "immer", "react", "use-sync-external-store"] }, "sha512-i77ae3aZq4dhMlRhJVCYgMLKuSiZAaUPAct2AksxQ+gOtimhGMdXljRT21P5BNpeT4kXlLIckvkPM029OljD7g=="],
+ "@asamuzakjp/css-color/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="],
+
"@tailwindcss/oxide-wasm32-wasi/@emnapi/core": ["@emnapi/core@1.9.2", "", { "dependencies": { "@emnapi/wasi-threads": "1.2.1", "tslib": "^2.4.0" }, "bundled": true }, "sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA=="],
"@tailwindcss/oxide-wasm32-wasi/@emnapi/runtime": ["@emnapi/runtime@1.9.2", "", { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw=="],
@@ -669,5 +867,11 @@
"@tailwindcss/oxide-wasm32-wasi/@tybys/wasm-util": ["@tybys/wasm-util@0.10.1", "", { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg=="],
"@tailwindcss/oxide-wasm32-wasi/tslib": ["tslib@2.8.1", "", { "bundled": true }, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
+
+ "@testing-library/dom/aria-query": ["aria-query@5.3.0", "", { "dependencies": { "dequal": "^2.0.3" } }, "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A=="],
+
+ "@testing-library/dom/dom-accessibility-api": ["dom-accessibility-api@0.5.16", "", {}, "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg=="],
+
+ "strip-literal/js-tokens": ["js-tokens@9.0.1", "", {}, "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ=="],
}
}
diff --git a/docs/SPEC.md b/docs/SPEC.md
index b70a571..131ea57 100644
--- a/docs/SPEC.md
+++ b/docs/SPEC.md
@@ -143,3 +143,4 @@ This prevents review from launching a second execution engine that could diverge
* Opened workspace file tabs remain in-memory only; there is still no save-to-disk flow.
* The desktop runtime is required for real project persistence, chat sessions, and CLI-backed turns.
* The provider set remains limited to Codex CLI and Claude Code for this version.
+* **Planned dependencies not yet installed:** `react-markdown`, `react-syntax-highlighter`, and `tauri-plugin-store` are referenced in design documents but are not currently in `package.json` or `Cargo.toml`. Features that depend on them (rich markdown rendering, syntax-highlighted code blocks, native key-value persistence) are aspirational and should not be assumed functional until the dependencies are added.
diff --git a/index.html b/index.html
index bd81a03..b87d7a4 100644
--- a/index.html
+++ b/index.html
@@ -3,6 +3,7 @@
+
SpecForge
diff --git a/package.json b/package.json
index fc39bc8..a1a690a 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "specforge",
- "private": false,
+ "private": true,
"version": "0.1.0",
"type": "module",
"packageManager": "bun@1.3.6",
@@ -9,6 +9,10 @@
"tauri:dev": "tauri dev",
"build": "tsc && vite build",
"typecheck": "tsc --noEmit",
+ "test": "vitest run",
+ "test:watch": "vitest",
+ "lint": "biome check src",
+ "lint:fix": "biome check --fix src",
"preview": "vite preview",
"tauri": "tauri"
},
@@ -25,12 +29,18 @@
},
"devDependencies": {
"@tailwindcss/vite": "^4.1.0",
+ "@testing-library/jest-dom": "^6.6.3",
+ "@testing-library/react": "^16.3.0",
"@tauri-apps/cli": "^2.8.0",
"@types/node": "^24.0.0",
"@types/react": "^19.0.0",
"@types/react-dom": "^19.0.0",
"@vitejs/plugin-react": "^5.0.0",
"typescript": "^5.9.0",
- "vite": "^7.0.0"
+ "jsdom": "^26.1.0",
+ "tailwindcss": "^4.1.0",
+ "vite": "^7.0.0",
+ "@biomejs/biome": "^2.0.0",
+ "vitest": "^3.2.1"
}
}
\ No newline at end of file
diff --git a/src-tauri/src/chat.rs b/src-tauri/src/chat.rs
deleted file mode 100644
index 8983798..0000000
--- a/src-tauri/src/chat.rs
+++ /dev/null
@@ -1,1312 +0,0 @@
-use crate::{
- documents::parse_workspace_document,
- environment::{current_timestamp, resolve_cli_binary},
- generation::{
- create_spec_generation_temp_dir, format_process_failure, map_claude_reasoning,
- map_codex_reasoning, run_command_with_stdin,
- },
- git::git_get_diff_for_root,
- models::{
- ChatContextItem, ChatEventPayload, ChatMessage, ChatRuntimeState, ChatSessionIndexPayload,
- ChatSessionSnapshot, ChatSessionSummary, ProjectSettings,
- },
- paths::resolve_relative_path_under_root,
- project::{
- build_default_project_settings, load_project_settings_from_workspace_root,
- normalize_project_model, normalize_project_reasoning,
- },
- state::{ChatExecutionRuntime, SharedState, WorkspaceContext},
-};
-use std::{
- collections::BTreeSet,
- fs,
- path::{Path, PathBuf},
- process::{Command, Stdio},
- sync::{
- atomic::{AtomicU64, Ordering},
- Arc,
- },
- thread,
- time::{SystemTime, UNIX_EPOCH},
-};
-use tauri::{AppHandle, Emitter, State};
-
-const SESSION_DIRECTORY_RELATIVE_PATH: &str = ".specforge/sessions";
-const SESSION_INDEX_FILE_NAME: &str = "index.json";
-const CAVEMAN_PREAMBLE: &str =
- "Default response style: caveman. Keep prose terse and direct while leaving code blocks, commands, and diffs fully normal.";
-
-static SESSION_COUNTER: AtomicU64 = AtomicU64::new(0);
-
-#[tauri::command]
-pub(crate) fn create_chat_session(
- state: State,
- title: Option,
-) -> Result {
- let workspace = active_workspace_context(&state)?;
- let settings = load_workspace_project_settings(&workspace.root)?;
- let mut index = load_chat_session_index(&workspace.root)?;
- let timestamp = current_timestamp();
- let session_id = create_chat_entity_id("session");
- let next_title = normalized_title(title.as_deref())
- .unwrap_or_else(|| format!("Topic {}", index.sessions.len() + 1));
-
- let snapshot = ChatSessionSnapshot {
- id: session_id.clone(),
- title: next_title,
- created_at: timestamp.clone(),
- updated_at: timestamp,
- status: String::from("idle"),
- last_message_preview: String::new(),
- selected_model: settings.selected_model.clone(),
- selected_reasoning: settings.selected_reasoning.clone(),
- autonomy_mode: String::from("milestone"),
- context_items: build_default_context_items(&settings),
- messages: Vec::new(),
- runtime: ChatRuntimeState::default(),
- };
-
- write_chat_session_snapshot(&workspace.root, &snapshot)?;
- upsert_chat_session_summary(&mut index, summarize_session(&snapshot));
- index.last_active_session_id = Some(session_id);
- write_chat_session_index(&workspace.root, &index)?;
-
- Ok(snapshot)
-}
-
-#[tauri::command]
-pub(crate) fn load_chat_session(
- state: State,
- session_id: String,
-) -> Result {
- let workspace = active_workspace_context(&state)?;
- let snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
- let mut index = load_chat_session_index(&workspace.root)?;
- index.last_active_session_id = Some(session_id);
- upsert_chat_session_summary(&mut index, summarize_session(&snapshot));
- write_chat_session_index(&workspace.root, &index)?;
- Ok(snapshot)
-}
-
-#[tauri::command]
-pub(crate) fn save_chat_session(
- state: State,
- session_id: String,
- selected_model: String,
- selected_reasoning: String,
- autonomy_mode: String,
- context_items: Vec,
-) -> Result {
- let workspace = active_workspace_context(&state)?;
- let mut snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
- snapshot.selected_model =
- normalize_project_model(&selected_model, &snapshot.selected_model)?;
- snapshot.selected_reasoning =
- normalize_project_reasoning(&selected_reasoning, &snapshot.selected_reasoning)?;
- snapshot.autonomy_mode = normalize_autonomy_mode(&autonomy_mode);
- snapshot.context_items = normalize_context_items(context_items);
- snapshot.updated_at = current_timestamp();
- write_chat_session_snapshot(&workspace.root, &snapshot)?;
-
- let mut index = load_chat_session_index(&workspace.root)?;
- upsert_chat_session_summary(&mut index, summarize_session(&snapshot));
- write_chat_session_index(&workspace.root, &index)?;
-
- Ok(snapshot)
-}
-
-#[tauri::command]
-pub(crate) fn rename_chat_session(
- state: State,
- session_id: String,
- title: String,
-) -> Result {
- let workspace = active_workspace_context(&state)?;
- let mut snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
- snapshot.title = normalized_title(Some(&title))
- .ok_or_else(|| String::from("A non-empty session title is required."))?;
- snapshot.updated_at = current_timestamp();
- write_chat_session_snapshot(&workspace.root, &snapshot)?;
-
- let summary = summarize_session(&snapshot);
- let mut index = load_chat_session_index(&workspace.root)?;
- upsert_chat_session_summary(&mut index, summary.clone());
- write_chat_session_index(&workspace.root, &index)?;
-
- Ok(summary)
-}
-
-#[tauri::command]
-pub(crate) fn delete_chat_session(
- state: State,
- session_id: String,
-) -> Result {
- let workspace = active_workspace_context(&state)?;
- let session_path = session_snapshot_path(&workspace.root, &session_id);
-
- if session_path.exists() {
- fs::remove_file(&session_path).map_err(|error| {
- format!(
- "Unable to delete chat session {}: {error}",
- session_path.display()
- )
- })?;
- }
-
- let mut index = load_chat_session_index(&workspace.root)?;
- index.sessions.retain(|entry| entry.id != session_id);
-
- if index
- .last_active_session_id
- .as_ref()
- .is_some_and(|active_id| active_id == &session_id)
- {
- index.last_active_session_id = index.sessions.first().map(|entry| entry.id.clone());
- }
-
- write_chat_session_index(&workspace.root, &index)?;
- Ok(index)
-}
-
-#[tauri::command]
-pub(crate) fn approve_chat_session(
- state: State,
- session_id: String,
-) -> Result<(), String> {
- let mut controls = state
- .chat_runtime
- .control
- .lock()
- .map_err(|_| String::from("Chat execution lock was poisoned."))?;
- let control = controls.entry(session_id).or_default();
- control.awaiting_approval = false;
- state.chat_runtime.signal.notify_all();
- Ok(())
-}
-
-#[tauri::command]
-pub(crate) fn stop_chat_session(
- state: State,
- session_id: String,
-) -> Result<(), String> {
- let mut controls = state
- .chat_runtime
- .control
- .lock()
- .map_err(|_| String::from("Chat execution lock was poisoned."))?;
- let control = controls.entry(session_id).or_default();
- control.stop_requested = true;
- control.awaiting_approval = false;
- state.chat_runtime.signal.notify_all();
- Ok(())
-}
-
-#[tauri::command]
-pub(crate) fn send_chat_message(
- app: AppHandle,
- state: State,
- session_id: String,
- message: String,
- claude_path: Option,
- codex_path: Option,
-) -> Result<(), String> {
- let trimmed_message = message.trim().to_string();
-
- if trimmed_message.is_empty() {
- return Err(String::from("A message is required before sending."));
- }
-
- let workspace = active_workspace_context(&state)?;
- let snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
-
- if snapshot.runtime.is_busy || snapshot.runtime.awaiting_approval {
- return Err(String::from(
- "This topic is still waiting on the current turn. Approve or stop it before sending another message.",
- ));
- }
-
- let run_id = {
- let mut controls = state
- .chat_runtime
- .control
- .lock()
- .map_err(|_| String::from("Chat execution lock was poisoned."))?;
- let control = controls.entry(session_id.clone()).or_default();
- control.run_id = control.run_id.wrapping_add(1);
- control.stop_requested = false;
- control.awaiting_approval = false;
- control.run_id
- };
-
- let runtime = state.chat_runtime.clone();
- thread::spawn(move || {
- run_chat_turn(
- app,
- runtime,
- workspace,
- session_id,
- run_id,
- trimmed_message,
- claude_path,
- codex_path,
- );
- });
-
- Ok(())
-}
-
-pub(crate) fn load_chat_session_index(
- workspace_root: &Path,
-) -> Result {
- let index_path = session_index_path(workspace_root);
-
- if !index_path.exists() {
- return Ok(ChatSessionIndexPayload {
- sessions: Vec::new(),
- last_active_session_id: None,
- });
- }
-
- let raw_value = fs::read_to_string(&index_path).map_err(|error| {
- format!(
- "Unable to read the chat session index {}: {error}",
- index_path.display()
- )
- })?;
-
- serde_json::from_str::(&raw_value).map_err(|error| {
- format!(
- "Unable to parse the chat session index {}: {error}",
- index_path.display()
- )
- })
-}
-
-fn run_chat_turn(
- app: AppHandle,
- runtime: Arc,
- workspace: WorkspaceContext,
- session_id: String,
- run_id: u64,
- user_message: String,
- claude_path: Option,
- codex_path: Option,
-) {
- let result = (|| -> Result<(), String> {
- let mut snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
- snapshot.messages.push(ChatMessage {
- id: create_chat_entity_id("msg"),
- role: String::from("user"),
- content: user_message.clone(),
- created_at: current_timestamp(),
- });
- snapshot.status = String::from("executing");
- snapshot.last_message_preview = build_message_preview(&user_message);
- snapshot.updated_at = current_timestamp();
- snapshot.runtime.status = String::from("executing");
- snapshot.runtime.is_busy = true;
- snapshot.runtime.awaiting_approval = false;
- snapshot.runtime.last_error = None;
- snapshot.runtime.pending_request = None;
- snapshot.runtime.execution_summary =
- Some(String::from("Preparing context and launching the selected CLI."));
- snapshot.runtime.pending_diff = None;
- snapshot.runtime.current_milestone = Some(String::from("Queue Turn"));
- write_chat_session_snapshot(&workspace.root, &snapshot)?;
- refresh_index_summary(&workspace.root, &snapshot)?;
- emit_session_event(
- &app,
- &session_id,
- "messageStarted",
- Some(snapshot.clone()),
- None,
- None,
- Some(snapshot.runtime.clone()),
- );
-
- append_terminal_line(
- &app,
- &workspace.root,
- &session_id,
- &mut snapshot,
- "Queued the new user turn and resolved the session context.",
- )?;
-
- if matches!(stop_state(&runtime, &session_id, run_id), ChatStopState::StopRequested) {
- halt_session(
- &app,
- &workspace.root,
- &session_id,
- &mut snapshot,
- "Turn stopped before execution began.",
- )?;
- return Ok(());
- }
-
- if snapshot.autonomy_mode == "stepped" {
- execute_chat_phase(
- &app,
- &workspace,
- &session_id,
- &runtime,
- run_id,
- &mut snapshot,
- &user_message,
- &claude_path,
- &codex_path,
- ChatExecutionPhase::Proposal,
- )?;
- snapshot.runtime.awaiting_approval = true;
- snapshot.runtime.is_busy = true;
- snapshot.runtime.status = String::from("awaiting_approval");
- snapshot.runtime.pending_request =
- Some(String::from("Approve the proposal to rerun this turn with write access."));
- snapshot.runtime.execution_summary = Some(String::from(
- "Stepped mode paused after the proposal phase. Approve to rerun the turn with write access.",
- ));
- snapshot.runtime.pending_diff = Some(git_get_diff_for_root(&workspace.root)?);
- snapshot.updated_at = current_timestamp();
- snapshot.status = String::from("awaiting_approval");
- write_chat_session_snapshot(&workspace.root, &snapshot)?;
- refresh_index_summary(&workspace.root, &snapshot)?;
- emit_session_event(
- &app,
- &session_id,
- "approvalRequired",
- Some(snapshot.clone()),
- None,
- None,
- Some(snapshot.runtime.clone()),
- );
-
- match wait_for_approval(&runtime, &session_id, run_id)? {
- ApprovalOutcome::Approved => {
- snapshot.runtime.awaiting_approval = false;
- snapshot.runtime.status = String::from("executing");
- snapshot.runtime.pending_request = None;
- snapshot.runtime.execution_summary = Some(String::from(
- "Approval received. Replaying the turn with write access enabled.",
- ));
- snapshot.status = String::from("executing");
- snapshot.updated_at = current_timestamp();
- write_chat_session_snapshot(&workspace.root, &snapshot)?;
- refresh_index_summary(&workspace.root, &snapshot)?;
- }
- ApprovalOutcome::StopRequested => {
- halt_session(
- &app,
- &workspace.root,
- &session_id,
- &mut snapshot,
- "Turn stopped during the stepped approval gate.",
- )?;
- return Ok(());
- }
- ApprovalOutcome::Replaced => return Ok(()),
- }
-
- execute_chat_phase(
- &app,
- &workspace,
- &session_id,
- &runtime,
- run_id,
- &mut snapshot,
- &user_message,
- &claude_path,
- &codex_path,
- ChatExecutionPhase::Write,
- )?;
- } else {
- execute_chat_phase(
- &app,
- &workspace,
- &session_id,
- &runtime,
- run_id,
- &mut snapshot,
- &user_message,
- &claude_path,
- &codex_path,
- ChatExecutionPhase::Write,
- )?;
- }
-
- if snapshot.autonomy_mode == "milestone" {
- snapshot.runtime.awaiting_approval = true;
- snapshot.runtime.is_busy = true;
- snapshot.runtime.status = String::from("awaiting_approval");
- snapshot.runtime.execution_summary = Some(String::from(
- "Milestone mode paused after this turn. Review the current diff before the next prompt.",
- ));
- snapshot.runtime.pending_request =
- Some(String::from("Approve the current diff to unlock the next turn."));
- snapshot.runtime.pending_diff = Some(git_get_diff_for_root(&workspace.root)?);
- snapshot.updated_at = current_timestamp();
- snapshot.status = String::from("awaiting_approval");
- write_chat_session_snapshot(&workspace.root, &snapshot)?;
- refresh_index_summary(&workspace.root, &snapshot)?;
- emit_session_event(
- &app,
- &session_id,
- "approvalRequired",
- Some(snapshot.clone()),
- None,
- None,
- Some(snapshot.runtime.clone()),
- );
-
- match wait_for_approval(&runtime, &session_id, run_id)? {
- ApprovalOutcome::Approved => {
- snapshot.runtime.awaiting_approval = false;
- snapshot.runtime.pending_request = None;
- snapshot.runtime.execution_summary = Some(String::from(
- "Diff approved. The topic is ready for the next prompt.",
- ));
- }
- ApprovalOutcome::StopRequested => {
- halt_session(
- &app,
- &workspace.root,
- &session_id,
- &mut snapshot,
- "Turn stopped during the milestone approval gate.",
- )?;
- return Ok(());
- }
- ApprovalOutcome::Replaced => return Ok(()),
- }
- }
-
- snapshot.runtime.status = String::from("completed");
- snapshot.runtime.is_busy = false;
- snapshot.runtime.awaiting_approval = false;
- snapshot.runtime.pending_request = None;
- snapshot.runtime.current_milestone = Some(String::from("Complete"));
- snapshot.runtime.pending_diff = Some(git_get_diff_for_root(&workspace.root)?);
- snapshot.runtime.execution_summary = Some(String::from(
- "Turn completed. The transcript, terminal stream, and current diff are ready.",
- ));
- snapshot.status = String::from("completed");
- snapshot.updated_at = current_timestamp();
- write_chat_session_snapshot(&workspace.root, &snapshot)?;
- refresh_index_summary(&workspace.root, &snapshot)?;
- emit_session_event(
- &app,
- &session_id,
- "completed",
- Some(snapshot),
- None,
- None,
- None,
- );
-
- Ok(())
- })();
-
- if let Err(error) = result {
- let _ = mark_session_error(&app, &workspace.root, &session_id, error);
- }
-}
-
-fn execute_chat_phase(
- app: &AppHandle,
- workspace: &WorkspaceContext,
- session_id: &str,
- runtime: &Arc,
- run_id: u64,
- snapshot: &mut ChatSessionSnapshot,
- user_message: &str,
- claude_path: &Option,
- codex_path: &Option,
- phase: ChatExecutionPhase,
-) -> Result<(), String> {
- if !matches!(stop_state(runtime, session_id, run_id), ChatStopState::Continue) {
- halt_session(
- app,
- &workspace.root,
- session_id,
- snapshot,
- "Turn stopped before the provider phase finished.",
- )?;
- return Ok(());
- }
-
- let phase_copy = phase.copy();
- snapshot.runtime.current_milestone = Some(String::from(phase_copy.milestone()));
- snapshot.runtime.execution_summary = Some(String::from(phase_copy.summary()));
- write_chat_session_snapshot(&workspace.root, snapshot)?;
- refresh_index_summary(&workspace.root, snapshot)?;
- append_terminal_line(app, &workspace.root, session_id, snapshot, phase_copy.line())?;
-
- let context_blocks = build_context_blocks(workspace, snapshot)?;
- let prompt_payload = build_chat_prompt(snapshot, &context_blocks, user_message, phase_copy);
- let assistant_content = run_chat_provider_request(
- &workspace.root,
- &snapshot.selected_model,
- &snapshot.selected_reasoning,
- phase_copy,
- &prompt_payload,
- claude_path.as_deref(),
- codex_path.as_deref(),
- )?;
-
- let assistant_message = ChatMessage {
- id: create_chat_entity_id("msg"),
- role: String::from("assistant"),
- content: assistant_content.trim().to_string(),
- created_at: current_timestamp(),
- };
- snapshot.messages.push(assistant_message.clone());
- snapshot.last_message_preview = build_message_preview(&assistant_message.content);
- snapshot.updated_at = current_timestamp();
- snapshot.runtime.pending_diff = Some(git_get_diff_for_root(&workspace.root)?);
- snapshot.runtime.current_milestone = Some(String::from(phase_copy.completed_milestone()));
- snapshot.runtime.execution_summary = Some(String::from(phase_copy.completed_summary()));
- snapshot.status = String::from("executing");
- write_chat_session_snapshot(&workspace.root, snapshot)?;
- refresh_index_summary(&workspace.root, snapshot)?;
- emit_session_event(
- app,
- session_id,
- "messageDelta",
- None,
- Some(assistant_message.content.clone()),
- None,
- None,
- );
- emit_session_event(
- app,
- session_id,
- "sessionUpdated",
- Some(snapshot.clone()),
- Some(assistant_message.content),
- None,
- Some(snapshot.runtime.clone()),
- );
-
- Ok(())
-}
-
-fn build_context_blocks(
- workspace: &WorkspaceContext,
- snapshot: &ChatSessionSnapshot,
-) -> Result, String> {
- let mut blocks = Vec::new();
-
- for item in &snapshot.context_items {
- let content = match item.kind.as_str() {
- "workspace_summary" => build_workspace_summary(workspace),
- _ => {
- let Some(path) = item.path.as_deref() else {
- continue;
- };
- let resolved_path = resolve_relative_path_under_root(&workspace.root, path)?;
-
- if !resolved_path.exists() {
- format!("Missing file at {path}.")
- } else {
- parse_workspace_document(&resolved_path)?
- }
- }
- };
-
- if content.trim().is_empty() {
- continue;
- }
-
- blocks.push((item.label.clone(), content));
- }
-
- Ok(blocks)
-}
-
-fn build_chat_prompt(
- snapshot: &ChatSessionSnapshot,
- context_blocks: &[(String, String)],
- user_message: &str,
- phase: ChatExecutionPhase,
-) -> String {
- let mut prompt = String::new();
- prompt.push_str(CAVEMAN_PREAMBLE);
- prompt.push_str("\n\n");
- prompt.push_str("You are SpecForge Chat, a desktop coding assistant operating on a project-scoped topic.\n");
- prompt.push_str("Keep responses direct. Preserve technical accuracy. Use the attached project context.\n");
- prompt.push_str("Current topic: ");
- prompt.push_str(&snapshot.title);
- prompt.push_str("\nAutonomy mode: ");
- prompt.push_str(&snapshot.autonomy_mode);
- prompt.push_str("\nExecution phase: ");
- prompt.push_str(phase.label());
- prompt.push_str("\n");
- prompt.push_str(phase.instructions());
-
- if !context_blocks.is_empty() {
- prompt.push_str("\n\nAttached context:\n");
-
- for (label, content) in context_blocks {
- prompt.push_str("\n### ");
- prompt.push_str(label);
- prompt.push('\n');
- prompt.push_str(content.trim());
- prompt.push('\n');
- }
- }
-
- if !snapshot.messages.is_empty() {
- prompt.push_str("\nConversation so far:\n");
-
- for message in &snapshot.messages {
- prompt.push_str("\n");
- prompt.push_str(&message.role.to_uppercase());
- prompt.push_str(": ");
- prompt.push_str(message.content.trim());
- prompt.push('\n');
- }
- } else {
- prompt.push_str("\nConversation so far:\n\nNo prior turns yet.\n");
- }
-
- prompt.push_str("\nCurrent user request:\n");
- prompt.push_str(user_message.trim());
- prompt.push('\n');
- prompt
-}
-
-fn run_chat_provider_request(
- workspace_root: &Path,
- model: &str,
- reasoning: &str,
- phase: ChatExecutionPhase,
- prompt_payload: &str,
- claude_path: Option<&str>,
- codex_path: Option<&str>,
-) -> Result {
- if model.starts_with("claude") {
- run_claude_chat_request(
- workspace_root,
- &resolve_cli_binary("claude", claude_path)?,
- model,
- reasoning,
- phase,
- prompt_payload,
- )
- } else {
- run_codex_chat_request(
- workspace_root,
- &resolve_cli_binary("codex", codex_path)?,
- model,
- reasoning,
- phase,
- prompt_payload,
- )
- }
-}
-
-fn run_codex_chat_request(
- workspace_root: &Path,
- binary_path: &Path,
- model: &str,
- reasoning: &str,
- phase: ChatExecutionPhase,
- prompt_payload: &str,
-) -> Result {
- let temp_dir = create_spec_generation_temp_dir("codex-chat")?;
- let output_path = temp_dir.join("assistant-message.md");
- let mut command = Command::new(binary_path);
- command
- .current_dir(workspace_root)
- .stdin(Stdio::piped())
- .stdout(Stdio::piped())
- .stderr(Stdio::piped())
- .arg("exec")
- .arg("--color")
- .arg("never")
- .arg("--skip-git-repo-check")
- .arg("--sandbox")
- .arg(phase.codex_sandbox())
- .arg("--model")
- .arg(model)
- .arg("--config")
- .arg(format!(
- "model_reasoning_effort=\"{}\"",
- map_codex_reasoning(reasoning)
- ))
- .arg("--output-last-message")
- .arg(&output_path);
-
- let output = run_command_with_stdin(&mut command, "Codex CLI", prompt_payload)?;
-
- if !output.status.success() {
- let _ = fs::remove_dir_all(&temp_dir);
- return Err(format_process_failure("Codex CLI", &output));
- }
-
- let result = fs::read_to_string(&output_path).or_else(|_| {
- let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string();
-
- if stdout.is_empty() {
- Err(std::io::Error::new(
- std::io::ErrorKind::Other,
- "The Codex CLI returned no assistant content.",
- ))
- } else {
- Ok(stdout)
- }
- });
- let _ = fs::remove_dir_all(&temp_dir);
-
- result.map_err(|error| format!("Unable to read the Codex assistant output: {error}"))
-}
-
-fn run_claude_chat_request(
- workspace_root: &Path,
- binary_path: &Path,
- model: &str,
- reasoning: &str,
- phase: ChatExecutionPhase,
- prompt_payload: &str,
-) -> Result {
- let mut command = Command::new(binary_path);
- command
- .current_dir(workspace_root)
- .stdin(Stdio::piped())
- .stdout(Stdio::piped())
- .stderr(Stdio::piped())
- .arg("--print")
- .arg("Respond to the request provided on stdin.")
- .arg("--model")
- .arg(model)
- .arg("--output-format")
- .arg("text")
- .arg("--permission-mode")
- .arg(phase.claude_permission_mode())
- .arg("--max-turns")
- .arg("8")
- .arg("--effort")
- .arg(map_claude_reasoning(reasoning));
-
- let output = run_command_with_stdin(&mut command, "Claude CLI", prompt_payload)?;
-
- if !output.status.success() {
- return Err(format_process_failure("Claude CLI", &output));
- }
-
- Ok(String::from_utf8_lossy(&output.stdout).trim().to_string())
-}
-
-fn refresh_index_summary(workspace_root: &Path, snapshot: &ChatSessionSnapshot) -> Result<(), String> {
- let mut index = load_chat_session_index(workspace_root)?;
- upsert_chat_session_summary(&mut index, summarize_session(snapshot));
- if index.last_active_session_id.is_none() {
- index.last_active_session_id = Some(snapshot.id.clone());
- }
- write_chat_session_index(workspace_root, &index)
-}
-
-fn append_terminal_line(
- app: &AppHandle,
- workspace_root: &Path,
- session_id: &str,
- snapshot: &mut ChatSessionSnapshot,
- line: &str,
-) -> Result<(), String> {
- snapshot.runtime.terminal_output.push(line.to_string());
-
- if snapshot.runtime.terminal_output.len() > 240 {
- let keep_from = snapshot.runtime.terminal_output.len() - 240;
- snapshot.runtime.terminal_output.drain(0..keep_from);
- }
-
- snapshot.updated_at = current_timestamp();
- write_chat_session_snapshot(workspace_root, snapshot)?;
- refresh_index_summary(workspace_root, snapshot)?;
- emit_session_event(
- app,
- session_id,
- "terminalLine",
- None,
- None,
- Some(line.to_string()),
- Some(snapshot.runtime.clone()),
- );
- Ok(())
-}
-
-fn halt_session(
- app: &AppHandle,
- workspace_root: &Path,
- session_id: &str,
- snapshot: &mut ChatSessionSnapshot,
- message: &str,
-) -> Result<(), String> {
- snapshot.status = String::from("halted");
- snapshot.runtime.status = String::from("halted");
- snapshot.runtime.is_busy = false;
- snapshot.runtime.awaiting_approval = false;
- snapshot.runtime.pending_request = None;
- snapshot.runtime.execution_summary = Some(message.to_string());
- snapshot.updated_at = current_timestamp();
- write_chat_session_snapshot(workspace_root, snapshot)?;
- refresh_index_summary(workspace_root, snapshot)?;
- emit_session_event(
- app,
- session_id,
- "halted",
- Some(snapshot.clone()),
- None,
- None,
- Some(snapshot.runtime.clone()),
- );
- Ok(())
-}
-
-fn mark_session_error(
- app: &AppHandle,
- workspace_root: &Path,
- session_id: &str,
- error: String,
-) -> Result<(), String> {
- let mut snapshot = read_chat_session_snapshot(workspace_root, session_id)?;
- snapshot.status = String::from("error");
- snapshot.runtime.status = String::from("error");
- snapshot.runtime.is_busy = false;
- snapshot.runtime.awaiting_approval = false;
- snapshot.runtime.pending_request = None;
- snapshot.runtime.last_error = Some(error.clone());
- snapshot.runtime.execution_summary = Some(error.clone());
- snapshot.updated_at = current_timestamp();
- write_chat_session_snapshot(workspace_root, &snapshot)?;
- refresh_index_summary(workspace_root, &snapshot)?;
- emit_session_event(
- app,
- session_id,
- "error",
- Some(snapshot),
- Some(error),
- None,
- None,
- );
- Ok(())
-}
-
-fn emit_session_event(
- app: &AppHandle,
- session_id: &str,
- event_type: &str,
- session: Option,
- message_delta: Option,
- terminal_line: Option,
- runtime: Option,
-) {
- let summary = session.as_ref().map(summarize_session);
- let message = session
- .as_ref()
- .and_then(|snapshot| snapshot.messages.last().cloned());
- let payload = ChatEventPayload {
- session_id: session_id.to_string(),
- event_type: event_type.to_string(),
- message,
- message_delta,
- terminal_line,
- session,
- runtime,
- summary,
- };
-
- let _ = app.emit("chat-session-event", payload);
-}
-
-fn wait_for_approval(
- runtime: &Arc,
- session_id: &str,
- run_id: u64,
-) -> Result {
- let mut controls = runtime
- .control
- .lock()
- .map_err(|_| String::from("Chat execution lock was poisoned."))?;
- let control = controls.entry(session_id.to_string()).or_default();
- control.awaiting_approval = true;
- runtime.signal.notify_all();
-
- loop {
- let current = controls.entry(session_id.to_string()).or_default().clone();
-
- if current.run_id != run_id {
- return Ok(ApprovalOutcome::Replaced);
- }
-
- if current.stop_requested {
- return Ok(ApprovalOutcome::StopRequested);
- }
-
- if !current.awaiting_approval {
- return Ok(ApprovalOutcome::Approved);
- }
-
- controls = runtime
- .signal
- .wait(controls)
- .map_err(|_| String::from("Chat execution lock was poisoned."))?;
- }
-}
-
-fn stop_state(runtime: &Arc, session_id: &str, run_id: u64) -> ChatStopState {
- runtime
- .control
- .lock()
- .map(|controls| {
- let Some(control) = controls.get(session_id) else {
- return ChatStopState::Continue;
- };
-
- if control.run_id != run_id {
- ChatStopState::Replaced
- } else if control.stop_requested {
- ChatStopState::StopRequested
- } else {
- ChatStopState::Continue
- }
- })
- .unwrap_or(ChatStopState::StopRequested)
-}
-
-fn active_workspace_context(state: &State) -> Result {
- state
- .workspace
- .lock()
- .map_err(|_| String::from("Workspace lock was poisoned."))?
- .clone()
- .ok_or_else(|| String::from("No workspace folder is currently open."))
-}
-
-fn load_workspace_project_settings(workspace_root: &Path) -> Result {
- let defaults = build_default_project_settings(workspace_root, None, None);
- load_project_settings_from_workspace_root(workspace_root, defaults).map(|(settings, _)| settings)
-}
-
-fn build_default_context_items(settings: &ProjectSettings) -> Vec {
- let mut items = vec![
- build_context_item("prd", "PRD", Some(settings.prd_path.clone()), true),
- build_context_item("spec", "SPEC", Some(settings.spec_path.clone()), true),
- build_context_item("workspace_summary", "Workspace Tree Summary", None, true),
- ];
-
- for path in &settings.supporting_document_paths {
- items.push(build_context_item(
- "supporting_document",
- &format!("Supporting: {path}"),
- Some(path.clone()),
- true,
- ));
- }
-
- normalize_context_items(items)
-}
-
-fn build_context_item(
- kind: &str,
- label: &str,
- path: Option,
- is_default: bool,
-) -> ChatContextItem {
- ChatContextItem {
- id: create_chat_entity_id("ctx"),
- kind: kind.to_string(),
- label: label.to_string(),
- path,
- is_default,
- }
-}
-
-fn normalize_context_items(items: Vec) -> Vec {
- let mut seen = BTreeSet::::new();
- let mut normalized_items = Vec::new();
-
- for item in items {
- let dedupe_key = format!(
- "{}::{}",
- item.kind,
- item.path.as_deref().unwrap_or(item.label.as_str())
- );
-
- if !seen.insert(dedupe_key) {
- continue;
- }
-
- normalized_items.push(ChatContextItem {
- id: if item.id.trim().is_empty() {
- create_chat_entity_id("ctx")
- } else {
- item.id
- },
- kind: item.kind.trim().to_string(),
- label: item.label.trim().to_string(),
- path: item.path.and_then(|value| {
- let trimmed_value = value.trim().replace('\\', "/");
- (!trimmed_value.is_empty()).then_some(trimmed_value)
- }),
- is_default: item.is_default,
- });
- }
-
- normalized_items
-}
-
-fn summarize_session(snapshot: &ChatSessionSnapshot) -> ChatSessionSummary {
- ChatSessionSummary {
- id: snapshot.id.clone(),
- title: snapshot.title.clone(),
- created_at: snapshot.created_at.clone(),
- updated_at: snapshot.updated_at.clone(),
- status: snapshot.status.clone(),
- last_message_preview: snapshot.last_message_preview.clone(),
- selected_model: snapshot.selected_model.clone(),
- selected_reasoning: snapshot.selected_reasoning.clone(),
- autonomy_mode: snapshot.autonomy_mode.clone(),
- }
-}
-
-fn upsert_chat_session_summary(
- index: &mut ChatSessionIndexPayload,
- summary: ChatSessionSummary,
-) {
- if let Some(existing_summary) = index.sessions.iter_mut().find(|entry| entry.id == summary.id) {
- *existing_summary = summary;
- } else {
- index.sessions.push(summary);
- }
-
- index.sessions.sort_by(|left, right| right.updated_at.cmp(&left.updated_at));
-}
-
-fn write_chat_session_index(
- workspace_root: &Path,
- index: &ChatSessionIndexPayload,
-) -> Result<(), String> {
- ensure_session_directory(workspace_root)?;
- let encoded = serde_json::to_string_pretty(index)
- .map_err(|error| format!("Unable to encode the chat session index: {error}"))?;
- fs::write(session_index_path(workspace_root), encoded.as_bytes()).map_err(|error| {
- format!(
- "Unable to write the chat session index {}: {error}",
- session_index_path(workspace_root).display()
- )
- })
-}
-
-fn read_chat_session_snapshot(
- workspace_root: &Path,
- session_id: &str,
-) -> Result {
- let session_path = session_snapshot_path(workspace_root, session_id);
- let raw_value = fs::read_to_string(&session_path).map_err(|error| {
- format!(
- "Unable to read the chat session {}: {error}",
- session_path.display()
- )
- })?;
-
- serde_json::from_str::(&raw_value).map_err(|error| {
- format!(
- "Unable to parse the chat session {}: {error}",
- session_path.display()
- )
- })
-}
-
-fn write_chat_session_snapshot(
- workspace_root: &Path,
- snapshot: &ChatSessionSnapshot,
-) -> Result<(), String> {
- ensure_session_directory(workspace_root)?;
- let encoded = serde_json::to_string_pretty(snapshot)
- .map_err(|error| format!("Unable to encode the chat session {}: {error}", snapshot.id))?;
- fs::write(session_snapshot_path(workspace_root, &snapshot.id), encoded.as_bytes()).map_err(
- |error| {
- format!(
- "Unable to write the chat session {}: {error}",
- session_snapshot_path(workspace_root, &snapshot.id).display()
- )
- },
- )
-}
-
-fn ensure_session_directory(workspace_root: &Path) -> Result<(), String> {
- let sessions_path = sessions_directory_path(workspace_root);
- fs::create_dir_all(&sessions_path).map_err(|error| {
- format!(
- "Unable to create the chat session directory {}: {error}",
- sessions_path.display()
- )
- })
-}
-
-fn sessions_directory_path(workspace_root: &Path) -> PathBuf {
- workspace_root.join(SESSION_DIRECTORY_RELATIVE_PATH)
-}
-
-fn session_index_path(workspace_root: &Path) -> PathBuf {
- sessions_directory_path(workspace_root).join(SESSION_INDEX_FILE_NAME)
-}
-
-fn session_snapshot_path(workspace_root: &Path, session_id: &str) -> PathBuf {
- sessions_directory_path(workspace_root).join(format!("{session_id}.json"))
-}
-
-fn build_workspace_summary(workspace: &WorkspaceContext) -> String {
- let mut paths = workspace.files.keys().cloned().collect::>();
- paths.sort();
-
- if paths.is_empty() {
- return String::from("No workspace files were discovered for this project.");
- }
-
- let mut summary = String::from("Workspace files:\n");
-
- for path in paths.iter().take(180) {
- summary.push_str("- ");
- summary.push_str(path);
- summary.push('\n');
- }
-
- if paths.len() > 180 {
- summary.push_str(&format!(
- "- ... and {} more files not shown in this summary.\n",
- paths.len() - 180
- ));
- }
-
- summary
-}
-
-fn normalized_title(title: Option<&str>) -> Option {
- title
- .map(str::trim)
- .filter(|value| !value.is_empty())
- .map(|value| value.replace('\n', " "))
-}
-
-fn normalize_autonomy_mode(value: &str) -> String {
- match value.trim() {
- "stepped" => String::from("stepped"),
- "god_mode" => String::from("god_mode"),
- _ => String::from("milestone"),
- }
-}
-
-fn build_message_preview(value: &str) -> String {
- let collapsed = value.split_whitespace().collect::>().join(" ");
- let mut preview = collapsed.chars().take(120).collect::();
-
- if collapsed.chars().count() > 120 {
- preview.push('…');
- }
-
- preview
-}
-
-fn create_chat_entity_id(prefix: &str) -> String {
- let millis = SystemTime::now()
- .duration_since(UNIX_EPOCH)
- .map(|duration| duration.as_millis())
- .unwrap_or_default();
- let counter = SESSION_COUNTER.fetch_add(1, Ordering::Relaxed);
- format!("{prefix}-{millis:x}-{counter:x}")
-}
-
-#[derive(Clone, Copy)]
-enum ChatExecutionPhase {
- Proposal,
- Write,
-}
-
-impl ChatExecutionPhase {
- fn copy(self) -> Self {
- self
- }
-
- fn label(self) -> &'static str {
- match self {
- Self::Proposal => "proposal",
- Self::Write => "write",
- }
- }
-
- fn milestone(self) -> &'static str {
- match self {
- Self::Proposal => "Proposal Pass",
- Self::Write => "Execution Pass",
- }
- }
-
- fn completed_milestone(self) -> &'static str {
- match self {
- Self::Proposal => "Proposal Complete",
- Self::Write => "Execution Complete",
- }
- }
-
- fn summary(self) -> &'static str {
- match self {
- Self::Proposal => {
- "Running a read-only pass to propose the patch or command plan before approval."
- }
- Self::Write => "Running the selected CLI against the project workspace.",
- }
- }
-
- fn completed_summary(self) -> &'static str {
- match self {
- Self::Proposal => "Proposal phase completed. Review the suggested plan before continuing.",
- Self::Write => "Provider turn completed. Refresh the diff and transcript before continuing.",
- }
- }
-
- fn line(self) -> &'static str {
- match self {
- Self::Proposal => {
- "Launching the proposal pass with read-only permissions and the attached project context."
- }
- Self::Write => "Launching the write pass with the configured autonomy permissions.",
- }
- }
-
- fn instructions(self) -> &'static str {
- match self {
- Self::Proposal => {
- "Proposal-only pass. Do not mutate files or run write commands. Produce the clearest patch or command plan you would execute after approval."
- }
- Self::Write => {
- "Write-enabled pass. You may edit files and run commands that fit the current autonomy mode. Summarize what changed and call out any blockers."
- }
- }
- }
-
- fn codex_sandbox(self) -> &'static str {
- match self {
- Self::Proposal => "read-only",
- Self::Write => "workspace-write",
- }
- }
-
- fn claude_permission_mode(self) -> &'static str {
- match self {
- Self::Proposal => "default",
- Self::Write => "acceptEdits",
- }
- }
-}
-
-enum ChatStopState {
- Continue,
- StopRequested,
- Replaced,
-}
-
-enum ApprovalOutcome {
- Approved,
- StopRequested,
- Replaced,
-}
diff --git a/src-tauri/src/chat/commands.rs b/src-tauri/src/chat/commands.rs
new file mode 100644
index 0000000..c06b656
--- /dev/null
+++ b/src-tauri/src/chat/commands.rs
@@ -0,0 +1,242 @@
+use crate::{
+ environment::current_timestamp,
+ models::{
+ AutonomyMode, ChatContextItem, ChatRuntimeState, ChatSessionIndexPayload,
+ ChatSessionSnapshot, ChatSessionSummary, SessionStatus,
+ },
+ project::{normalize_project_model, normalize_project_reasoning},
+ state::SharedState,
+};
+use std::{fs, thread};
+use tauri::{AppHandle, State};
+
+use super::{
+ execution::run_chat_turn,
+ helpers::{
+ active_workspace_context, build_default_context_items, build_message_preview,
+ create_chat_entity_id, load_workspace_project_settings, normalize_autonomy_mode,
+ normalize_context_items, normalized_title, summarize_session,
+ upsert_chat_session_summary,
+ },
+ persistence::{
+ load_chat_session_index, read_chat_session_snapshot, session_snapshot_path,
+ write_chat_session_index, write_chat_session_snapshot,
+ },
+};
+
+#[tauri::command]
+pub(crate) fn create_chat_session(
+ state: State,
+ title: Option,
+) -> Result {
+ let workspace = active_workspace_context(&state)?;
+ let settings = load_workspace_project_settings(&workspace.root)?;
+ let mut index = load_chat_session_index(&workspace.root)?;
+ let timestamp = current_timestamp();
+ let session_id = create_chat_entity_id("session");
+ let next_title = normalized_title(title.as_deref())
+ .unwrap_or_else(|| format!("Topic {}", index.sessions.len() + 1));
+
+ let snapshot = ChatSessionSnapshot {
+ id: session_id.clone(),
+ title: next_title,
+ created_at: timestamp.clone(),
+ updated_at: timestamp,
+ status: SessionStatus::Idle,
+ last_message_preview: String::new(),
+ selected_model: settings.selected_model.clone(),
+ selected_reasoning: settings.selected_reasoning.clone(),
+ autonomy_mode: AutonomyMode::Milestone,
+ context_items: build_default_context_items(&settings),
+ messages: Vec::new(),
+ runtime: ChatRuntimeState::default(),
+ };
+
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+ upsert_chat_session_summary(&mut index, summarize_session(&snapshot));
+ index.last_active_session_id = Some(session_id);
+ write_chat_session_index(&workspace.root, &index)?;
+
+ Ok(snapshot)
+}
+
+#[tauri::command]
+pub(crate) fn load_chat_session(
+ state: State,
+ session_id: String,
+) -> Result {
+ let workspace = active_workspace_context(&state)?;
+ let snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
+ let mut index = load_chat_session_index(&workspace.root)?;
+ index.last_active_session_id = Some(session_id);
+ upsert_chat_session_summary(&mut index, summarize_session(&snapshot));
+ write_chat_session_index(&workspace.root, &index)?;
+ Ok(snapshot)
+}
+
+#[tauri::command]
+pub(crate) fn save_chat_session(
+ state: State,
+ session_id: String,
+ selected_model: String,
+ selected_reasoning: String,
+ autonomy_mode: String,
+ context_items: Vec,
+) -> Result {
+ let workspace = active_workspace_context(&state)?;
+ let mut snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
+ snapshot.selected_model =
+ normalize_project_model(&selected_model, &snapshot.selected_model)?;
+ snapshot.selected_reasoning =
+ normalize_project_reasoning(&selected_reasoning, &snapshot.selected_reasoning)?;
+ snapshot.autonomy_mode = normalize_autonomy_mode(&autonomy_mode);
+ snapshot.context_items = normalize_context_items(context_items);
+ snapshot.updated_at = current_timestamp();
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+
+ let mut index = load_chat_session_index(&workspace.root)?;
+ upsert_chat_session_summary(&mut index, summarize_session(&snapshot));
+ write_chat_session_index(&workspace.root, &index)?;
+
+ Ok(snapshot)
+}
+
+#[tauri::command]
+pub(crate) fn rename_chat_session(
+ state: State,
+ session_id: String,
+ title: String,
+) -> Result {
+ let workspace = active_workspace_context(&state)?;
+ let mut snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
+ snapshot.title = normalized_title(Some(&title))
+ .ok_or_else(|| String::from("A non-empty session title is required."))?;
+ snapshot.updated_at = current_timestamp();
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+
+ let summary = summarize_session(&snapshot);
+ let mut index = load_chat_session_index(&workspace.root)?;
+ upsert_chat_session_summary(&mut index, summary.clone());
+ write_chat_session_index(&workspace.root, &index)?;
+
+ Ok(summary)
+}
+
+#[tauri::command]
+pub(crate) fn delete_chat_session(
+ state: State,
+ session_id: String,
+) -> Result {
+ let workspace = active_workspace_context(&state)?;
+ let session_path = session_snapshot_path(&workspace.root, &session_id);
+
+ if session_path.exists() {
+ fs::remove_file(&session_path).map_err(|error| {
+ format!(
+ "Unable to delete chat session {}: {error}",
+ session_path.display()
+ )
+ })?;
+ }
+
+ let mut index = load_chat_session_index(&workspace.root)?;
+ index.sessions.retain(|entry| entry.id != session_id);
+
+ if index
+ .last_active_session_id
+ .as_ref()
+ .is_some_and(|active_id| active_id == &session_id)
+ {
+ index.last_active_session_id = index.sessions.first().map(|entry| entry.id.clone());
+ }
+
+ write_chat_session_index(&workspace.root, &index)?;
+ Ok(index)
+}
+
+#[tauri::command]
+pub(crate) fn approve_chat_session(
+ state: State,
+ session_id: String,
+) -> Result<(), String> {
+ let mut controls = state
+ .chat_runtime
+ .control
+ .lock()
+ .map_err(|_| String::from("Chat execution lock was poisoned."))?;
+ let control = controls.entry(session_id).or_default();
+ control.awaiting_approval = false;
+ state.chat_runtime.signal.notify_all();
+ Ok(())
+}
+
+#[tauri::command]
+pub(crate) fn stop_chat_session(
+ state: State,
+ session_id: String,
+) -> Result<(), String> {
+ let mut controls = state
+ .chat_runtime
+ .control
+ .lock()
+ .map_err(|_| String::from("Chat execution lock was poisoned."))?;
+ let control = controls.entry(session_id).or_default();
+ control.stop_requested = true;
+ control.awaiting_approval = false;
+ state.chat_runtime.signal.notify_all();
+ Ok(())
+}
+
+#[tauri::command]
+pub(crate) fn send_chat_message(
+ app: AppHandle,
+ state: State,
+ session_id: String,
+ message: String,
+ claude_path: Option,
+ codex_path: Option,
+) -> Result<(), String> {
+ let trimmed_message = message.trim().to_string();
+
+ if trimmed_message.is_empty() {
+ return Err(String::from("A message is required before sending."));
+ }
+
+ let workspace = active_workspace_context(&state)?;
+ let snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
+
+ if snapshot.runtime.is_busy || snapshot.runtime.awaiting_approval {
+ return Err(String::from(
+ "This topic is still waiting on the current turn. Approve or stop it before sending another message.",
+ ));
+ }
+
+ let run_id = {
+ let mut controls = state
+ .chat_runtime
+ .control
+ .lock()
+ .map_err(|_| String::from("Chat execution lock was poisoned."))?;
+ let control = controls.entry(session_id.clone()).or_default();
+ control.run_id = control.run_id.wrapping_add(1);
+ control.stop_requested = false;
+ control.awaiting_approval = false;
+ control.run_id
+ };
+
+ let runtime = state.chat_runtime.clone();
+ thread::spawn(move || {
+ run_chat_turn(
+ app,
+ runtime,
+ workspace,
+ session_id,
+ run_id,
+ trimmed_message,
+ claude_path,
+ codex_path,
+ );
+ });
+
+ Ok(())
+}
diff --git a/src-tauri/src/chat/execution.rs b/src-tauri/src/chat/execution.rs
new file mode 100644
index 0000000..0d3c1cf
--- /dev/null
+++ b/src-tauri/src/chat/execution.rs
@@ -0,0 +1,724 @@
+use crate::{
+ environment::{current_timestamp, resolve_cli_binary},
+ generation::{
+ create_spec_generation_temp_dir, format_process_failure, map_claude_reasoning,
+ map_codex_reasoning, run_command_with_stdin,
+ },
+ git::git_get_diff_for_root,
+ models::{
+ AutonomyMode, ChatEventPayload, ChatMessage, ChatRuntimeState, ChatSessionSnapshot,
+ MessageRole, SessionStatus,
+ },
+ state::{ChatExecutionRuntime, WorkspaceContext},
+};
+use std::{
+ fs,
+ path::Path,
+ process::{Command, Stdio},
+ sync::Arc,
+};
+use tauri::{AppHandle, Emitter};
+
+use super::{
+ helpers::{
+ build_message_preview, create_chat_entity_id, summarize_session,
+ },
+ persistence::{
+ read_chat_session_snapshot, refresh_index_summary, write_chat_session_snapshot,
+ },
+ prompt::{build_chat_prompt, build_context_blocks},
+};
+
+pub(super) enum ApprovalGateResult {
+ Approved,
+ Stopped,
+ Replaced,
+}
+
+enum ApprovalOutcome {
+ Approved,
+ StopRequested,
+ Replaced,
+}
+
+enum ChatStopState {
+ Continue,
+ StopRequested,
+ Replaced,
+}
+
+#[derive(Clone, Copy)]
+pub(super) enum ChatExecutionPhase {
+ Proposal,
+ Write,
+}
+
+impl ChatExecutionPhase {
+ pub(super) fn label(self) -> &'static str {
+ match self {
+ Self::Proposal => "proposal",
+ Self::Write => "write",
+ }
+ }
+
+ pub(super) fn milestone(self) -> &'static str {
+ match self {
+ Self::Proposal => "Proposal Pass",
+ Self::Write => "Execution Pass",
+ }
+ }
+
+ pub(super) fn completed_milestone(self) -> &'static str {
+ match self {
+ Self::Proposal => "Proposal Complete",
+ Self::Write => "Execution Complete",
+ }
+ }
+
+ pub(super) fn summary(self) -> &'static str {
+ match self {
+ Self::Proposal => {
+ "Running a read-only pass to propose the patch or command plan before approval."
+ }
+ Self::Write => "Running the selected CLI against the project workspace.",
+ }
+ }
+
+ pub(super) fn completed_summary(self) -> &'static str {
+ match self {
+ Self::Proposal => {
+ "Proposal phase completed. Review the suggested plan before continuing."
+ }
+ Self::Write => {
+ "Provider turn completed. Refresh the diff and transcript before continuing."
+ }
+ }
+ }
+
+ pub(super) fn line(self) -> &'static str {
+ match self {
+ Self::Proposal => {
+ "Launching the proposal pass with read-only permissions and the attached project context."
+ }
+ Self::Write => "Launching the write pass with the configured autonomy permissions.",
+ }
+ }
+
+ pub(super) fn instructions(self) -> &'static str {
+ match self {
+ Self::Proposal => {
+ "Proposal-only pass. Do not mutate files or run write commands. Produce the clearest patch or command plan you would execute after approval."
+ }
+ Self::Write => {
+ "Write-enabled pass. You may edit files and run commands that fit the current autonomy mode. Summarize what changed and call out any blockers."
+ }
+ }
+ }
+
+ fn codex_sandbox(self) -> &'static str {
+ match self {
+ Self::Proposal => "read-only",
+ Self::Write => "workspace-write",
+ }
+ }
+
+ fn claude_permission_mode(self) -> &'static str {
+ match self {
+ Self::Proposal => "default",
+ Self::Write => "acceptEdits",
+ }
+ }
+}
+
+pub(super) fn run_approval_gate(
+ app: &AppHandle,
+ workspace: &WorkspaceContext,
+ session_id: &str,
+ runtime: &Arc,
+ run_id: u64,
+ snapshot: &mut ChatSessionSnapshot,
+ pending_request_message: &str,
+ execution_summary_message: &str,
+ halt_message: &str,
+ approved_summary: &str,
+ resume_status: bool,
+) -> Result {
+ snapshot.runtime.awaiting_approval = true;
+ snapshot.runtime.is_busy = true;
+ snapshot.runtime.status = SessionStatus::AwaitingApproval;
+ snapshot.runtime.pending_request = Some(pending_request_message.to_string());
+ snapshot.runtime.execution_summary = Some(execution_summary_message.to_string());
+ snapshot.runtime.pending_diff = Some(git_get_diff_for_root(&workspace.root)?);
+ snapshot.updated_at = current_timestamp();
+ snapshot.status = SessionStatus::AwaitingApproval;
+ write_chat_session_snapshot(&workspace.root, snapshot)?;
+ refresh_index_summary(&workspace.root, snapshot)?;
+ emit_session_event(
+ app,
+ session_id,
+ "approvalRequired",
+ Some(snapshot.clone()),
+ None,
+ None,
+ Some(snapshot.runtime.clone()),
+ );
+
+ match wait_for_approval(runtime, session_id, run_id)? {
+ ApprovalOutcome::Approved => {
+ snapshot.runtime.awaiting_approval = false;
+ snapshot.runtime.pending_request = None;
+ snapshot.runtime.execution_summary = Some(approved_summary.to_string());
+ if resume_status {
+ snapshot.runtime.status = SessionStatus::Executing;
+ snapshot.status = SessionStatus::Executing;
+ }
+ snapshot.updated_at = current_timestamp();
+ write_chat_session_snapshot(&workspace.root, snapshot)?;
+ refresh_index_summary(&workspace.root, snapshot)?;
+ Ok(ApprovalGateResult::Approved)
+ }
+ ApprovalOutcome::StopRequested => {
+ halt_session(app, &workspace.root, session_id, snapshot, halt_message)?;
+ Ok(ApprovalGateResult::Stopped)
+ }
+ ApprovalOutcome::Replaced => Ok(ApprovalGateResult::Replaced),
+ }
+}
+
+pub(super) fn run_chat_turn(
+ app: AppHandle,
+ runtime: Arc,
+ workspace: WorkspaceContext,
+ session_id: String,
+ run_id: u64,
+ user_message: String,
+ claude_path: Option,
+ codex_path: Option,
+) {
+ let result = (|| -> Result<(), String> {
+ let mut snapshot = read_chat_session_snapshot(&workspace.root, &session_id)?;
+ snapshot.messages.push(ChatMessage {
+ id: create_chat_entity_id("msg"),
+ role: MessageRole::User,
+ content: user_message.clone(),
+ created_at: current_timestamp(),
+ });
+ snapshot.status = SessionStatus::Executing;
+ snapshot.last_message_preview = build_message_preview(&user_message);
+ snapshot.updated_at = current_timestamp();
+ snapshot.runtime.status = SessionStatus::Executing;
+ snapshot.runtime.is_busy = true;
+ snapshot.runtime.awaiting_approval = false;
+ snapshot.runtime.last_error = None;
+ snapshot.runtime.pending_request = None;
+ snapshot.runtime.execution_summary =
+ Some(String::from("Preparing context and launching the selected CLI."));
+ snapshot.runtime.pending_diff = None;
+ snapshot.runtime.current_milestone = Some(String::from("Queue Turn"));
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+ refresh_index_summary(&workspace.root, &snapshot)?;
+ emit_session_event(
+ &app,
+ &session_id,
+ "messageStarted",
+ Some(snapshot.clone()),
+ None,
+ None,
+ Some(snapshot.runtime.clone()),
+ );
+
+ append_terminal_line(
+ &app,
+ &session_id,
+ &mut snapshot,
+ "Queued the new user turn and resolved the session context.",
+ );
+
+ if matches!(
+ stop_state(&runtime, &session_id, run_id),
+ ChatStopState::StopRequested
+ ) {
+ halt_session(
+ &app,
+ &workspace.root,
+ &session_id,
+ &mut snapshot,
+ "Turn stopped before execution began.",
+ )?;
+ return Ok(());
+ }
+
+ if snapshot.autonomy_mode == AutonomyMode::Stepped {
+ execute_chat_phase(
+ &app,
+ &workspace,
+ &session_id,
+ &runtime,
+ run_id,
+ &mut snapshot,
+ &user_message,
+ &claude_path,
+ &codex_path,
+ ChatExecutionPhase::Proposal,
+ )?;
+
+ match run_approval_gate(
+ &app,
+ &workspace,
+ &session_id,
+ &runtime,
+ run_id,
+ &mut snapshot,
+ "Approve the proposal to rerun this turn with write access.",
+ "Stepped mode paused after the proposal phase. Approve to rerun the turn with write access.",
+ "Turn stopped during the stepped approval gate.",
+ "Approval received. Replaying the turn with write access enabled.",
+ true,
+ )? {
+ ApprovalGateResult::Approved => {}
+ ApprovalGateResult::Stopped | ApprovalGateResult::Replaced => return Ok(()),
+ }
+
+ execute_chat_phase(
+ &app,
+ &workspace,
+ &session_id,
+ &runtime,
+ run_id,
+ &mut snapshot,
+ &user_message,
+ &claude_path,
+ &codex_path,
+ ChatExecutionPhase::Write,
+ )?;
+ } else {
+ execute_chat_phase(
+ &app,
+ &workspace,
+ &session_id,
+ &runtime,
+ run_id,
+ &mut snapshot,
+ &user_message,
+ &claude_path,
+ &codex_path,
+ ChatExecutionPhase::Write,
+ )?;
+ }
+
+ if snapshot.autonomy_mode == AutonomyMode::Milestone {
+ match run_approval_gate(
+ &app,
+ &workspace,
+ &session_id,
+ &runtime,
+ run_id,
+ &mut snapshot,
+ "Approve the current diff to unlock the next turn.",
+ "Milestone mode paused after this turn. Review the current diff before the next prompt.",
+ "Turn stopped during the milestone approval gate.",
+ "Diff approved. The topic is ready for the next prompt.",
+ false,
+ )? {
+ ApprovalGateResult::Approved => {}
+ ApprovalGateResult::Stopped | ApprovalGateResult::Replaced => return Ok(()),
+ }
+ }
+
+ snapshot.runtime.status = SessionStatus::Completed;
+ snapshot.runtime.is_busy = false;
+ snapshot.runtime.awaiting_approval = false;
+ snapshot.runtime.pending_request = None;
+ snapshot.runtime.current_milestone = Some(String::from("Complete"));
+ snapshot.runtime.pending_diff = Some(git_get_diff_for_root(&workspace.root)?);
+ snapshot.runtime.execution_summary = Some(String::from(
+ "Turn completed. The transcript, terminal stream, and current diff are ready.",
+ ));
+ snapshot.status = SessionStatus::Completed;
+ snapshot.updated_at = current_timestamp();
+ write_chat_session_snapshot(&workspace.root, &snapshot)?;
+ refresh_index_summary(&workspace.root, &snapshot)?;
+ emit_session_event(
+ &app,
+ &session_id,
+ "completed",
+ Some(snapshot),
+ None,
+ None,
+ None,
+ );
+
+ Ok(())
+ })();
+
+ if let Err(error) = result {
+ let _ = mark_session_error(&app, &workspace.root, &session_id, error);
+ }
+}
+
+fn execute_chat_phase(
+ app: &AppHandle,
+ workspace: &WorkspaceContext,
+ session_id: &str,
+ runtime: &Arc,
+ run_id: u64,
+ snapshot: &mut ChatSessionSnapshot,
+ user_message: &str,
+ claude_path: &Option,
+ codex_path: &Option,
+ phase: ChatExecutionPhase,
+) -> Result<(), String> {
+ if !matches!(stop_state(runtime, session_id, run_id), ChatStopState::Continue) {
+ halt_session(
+ app,
+ &workspace.root,
+ session_id,
+ snapshot,
+ "Turn stopped before the provider phase finished.",
+ )?;
+ return Ok(());
+ }
+
+ snapshot.runtime.current_milestone = Some(String::from(phase.milestone()));
+ snapshot.runtime.execution_summary = Some(String::from(phase.summary()));
+ write_chat_session_snapshot(&workspace.root, snapshot)?;
+ refresh_index_summary(&workspace.root, snapshot)?;
+ append_terminal_line(app, session_id, snapshot, phase.line());
+
+ let context_blocks = build_context_blocks(workspace, snapshot)?;
+ let prompt_payload = build_chat_prompt(snapshot, &context_blocks, user_message, phase);
+ let assistant_content = run_chat_provider_request(
+ &workspace.root,
+ &snapshot.selected_model,
+ &snapshot.selected_reasoning,
+ phase,
+ &prompt_payload,
+ claude_path.as_deref(),
+ codex_path.as_deref(),
+ )?;
+
+ let assistant_message = ChatMessage {
+ id: create_chat_entity_id("msg"),
+ role: MessageRole::Assistant,
+ content: assistant_content.trim().to_string(),
+ created_at: current_timestamp(),
+ };
+ snapshot.messages.push(assistant_message.clone());
+ snapshot.last_message_preview = build_message_preview(&assistant_message.content);
+ snapshot.updated_at = current_timestamp();
+ snapshot.runtime.pending_diff = Some(git_get_diff_for_root(&workspace.root)?);
+ snapshot.runtime.current_milestone = Some(String::from(phase.completed_milestone()));
+ snapshot.runtime.execution_summary = Some(String::from(phase.completed_summary()));
+ snapshot.status = SessionStatus::Executing;
+ write_chat_session_snapshot(&workspace.root, snapshot)?;
+ refresh_index_summary(&workspace.root, snapshot)?;
+ emit_session_event(
+ app,
+ session_id,
+ "messageDelta",
+ None,
+ Some(assistant_message.content.clone()),
+ None,
+ None,
+ );
+ emit_session_event(
+ app,
+ session_id,
+ "sessionUpdated",
+ Some(snapshot.clone()),
+ Some(assistant_message.content),
+ None,
+ Some(snapshot.runtime.clone()),
+ );
+
+ Ok(())
+}
+
+fn run_chat_provider_request(
+ workspace_root: &Path,
+ model: &str,
+ reasoning: &str,
+ phase: ChatExecutionPhase,
+ prompt_payload: &str,
+ claude_path: Option<&str>,
+ codex_path: Option<&str>,
+) -> Result