From 1af754e735089df2a54514d781bd87d10c6a76d2 Mon Sep 17 00:00:00 2001 From: Oleksandr Ostrovskyi Date: Thu, 26 Mar 2026 22:01:32 +0200 Subject: [PATCH 01/10] feat(config): add HttpConfig and ApiKeyConfig structs --- src/config.rs | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/src/config.rs b/src/config.rs index 80b9f01..51215b0 100644 --- a/src/config.rs +++ b/src/config.rs @@ -34,6 +34,39 @@ pub struct AgentsConfig { pub windsurf: bool, } +/// HTTP REST API configuration. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct HttpConfig { + pub enabled: bool, + pub port: u16, + pub host: String, + pub rate_limit: u32, // requests per minute per key, 0 = unlimited + pub cors_origins: Vec, + pub api_keys: Vec, +} + +impl Default for HttpConfig { + fn default() -> Self { + Self { + enabled: false, + port: 3000, + host: "127.0.0.1".to_string(), + rate_limit: 60, + cors_origins: vec![], + api_keys: vec![], + } + } +} + +/// API key entry for HTTP authentication. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ApiKeyConfig { + pub key: String, + pub name: String, + pub permissions: String, // "read" | "write" +} + /// Application configuration, loaded from `~/.engraph/config.toml` with CLI overrides. #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(default)] @@ -56,6 +89,9 @@ pub struct Config { /// Agent integration settings. #[serde(default)] pub agents: AgentsConfig, + /// HTTP REST API settings. + #[serde(default)] + pub http: HttpConfig, } impl Default for Config { @@ -69,6 +105,7 @@ impl Default for Config { models: ModelConfig::default(), obsidian: ObsidianConfig::default(), agents: AgentsConfig::default(), + http: HttpConfig::default(), } } } @@ -267,6 +304,40 @@ vault_name = "Personal" assert_eq!(config.obsidian.vault_name.as_deref(), Some("Personal")); } + #[test] + fn test_config_with_http() { + let toml = r#" +[http] +enabled = true +port = 8080 +host = "0.0.0.0" +rate_limit = 120 +cors_origins = ["https://chat.openai.com"] + +[[http.api_keys]] +key = "eg_test123" +name = "test-key" +permissions = "read" +"#; + let config: Config = toml::from_str(toml).unwrap(); + assert!(config.http.enabled); + assert_eq!(config.http.port, 8080); + assert_eq!(config.http.api_keys.len(), 1); + assert_eq!(config.http.api_keys[0].permissions, "read"); + } + + #[test] + fn test_config_http_defaults() { + let toml = r#"top_n = 5"#; + let config: Config = toml::from_str(toml).unwrap(); + assert!(!config.http.enabled); + assert_eq!(config.http.port, 3000); + assert_eq!(config.http.host, "127.0.0.1"); + assert_eq!(config.http.rate_limit, 60); + assert!(config.http.cors_origins.is_empty()); + assert!(config.http.api_keys.is_empty()); + } + #[test] fn test_config_roundtrip_with_intelligence() { let dir = tempfile::tempdir().unwrap(); From a7299b2af2ab7c1dd185b0a41a9d5996857b254f Mon Sep 17 00:00:00 2001 From: Oleksandr Ostrovskyi Date: Thu, 26 Mar 2026 22:04:47 +0200 Subject: [PATCH 02/10] feat(http): add ApiState, ApiError, auth helpers, and key generation --- Cargo.lock | 245 ++++++++++++++++++++++++++++++++++++++++++++++++++++ Cargo.toml | 7 +- src/http.rs | 217 ++++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 1 + 4 files changed, 469 insertions(+), 1 deletion(-) create mode 100644 src/http.rs diff --git a/Cargo.lock b/Cargo.lock index f1ee97e..3fe1ad7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -107,12 +107,70 @@ dependencies = [ "syn", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "autocfg" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "axum" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" +dependencies = [ + "axum-core", + "bytes", + "form_urlencoded", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "serde_core", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "base64" version = "0.13.1" @@ -619,6 +677,7 @@ name = "engraph" version = "1.0.2" dependencies = [ "anyhow", + "axum", "clap", "dirs", "encoding_rs", @@ -627,6 +686,7 @@ dependencies = [ "llama-cpp-2", "notify", "notify-debouncer-full", + "rand", "rayon", "rmcp", "rusqlite", @@ -641,7 +701,10 @@ dependencies = [ "time", "tokenizers", "tokio", + "tokio-util", "toml", + "tower", + "tower-http", "tracing", "tracing-subscriber", "ureq", @@ -997,6 +1060,87 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", +] + +[[package]] +name = "hyper-util" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" +dependencies = [ + "bytes", + "http", + "http-body", + "hyper", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "iana-time-zone" version = "0.1.65" @@ -1397,12 +1541,24 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + [[package]] name = "memchr" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1576,6 +1732,12 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + [[package]] name = "pkg-config" version = "0.3.32" @@ -2011,6 +2173,17 @@ dependencies = [ "zmij", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" +dependencies = [ + "itoa", + "serde", + "serde_core", +] + [[package]] name = "serde_spanned" version = "0.6.9" @@ -2020,6 +2193,18 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + [[package]] name = "serde_yaml" version = "0.9.34+deprecated" @@ -2099,6 +2284,16 @@ version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +[[package]] +name = "socket2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + [[package]] name = "spm_precompiled" version = "0.1.4" @@ -2155,6 +2350,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" + [[package]] name = "synstructure" version = "0.13.2" @@ -2313,6 +2514,7 @@ dependencies = [ "mio", "pin-project-lite", "signal-hook-registry", + "socket2", "tokio-macros", "windows-sys 0.61.2", ] @@ -2382,12 +2584,55 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" +[[package]] +name = "tower" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags 2.11.0", + "bytes", + "http", + "pin-project-lite", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + [[package]] name = "tracing" version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", diff --git a/Cargo.toml b/Cargo.toml index f3aac95..d53b0a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,12 +32,17 @@ time = { version = "0.3", features = ["parsing", "formatting", "macros"] } strsim = "0.11" ignore = "0.4" rmcp = { version = "1.2", features = ["transport-io"] } -tokio = { version = "1", features = ["macros", "rt-multi-thread", "process", "time"] } +tokio = { version = "1", features = ["macros", "rt-multi-thread", "process", "time", "net"] } notify = "7.0" notify-debouncer-full = "0.4" llama-cpp-2 = "0.1" encoding_rs = "0.8" shimmytok = "0.7" +axum = "0.8" +tower-http = { version = "0.6", features = ["cors"] } +tower = "0.5" +rand = "0.9" +tokio-util = "0.7" [features] default = [] diff --git a/src/http.rs b/src/http.rs new file mode 100644 index 0000000..5031e6c --- /dev/null +++ b/src/http.rs @@ -0,0 +1,217 @@ +use std::sync::Arc; + +use axum::{Json, Router, http::StatusCode, response::IntoResponse, routing::get}; +use tokio::sync::Mutex; + +use crate::config::{ApiKeyConfig, HttpConfig}; +use crate::llm::{EmbedModel, OrchestratorModel, RerankModel}; +use crate::profile::VaultProfile; +use crate::serve::RecentWrites; +use crate::store::Store; + +// --------------------------------------------------------------------------- +// Shared state +// --------------------------------------------------------------------------- + +#[derive(Clone)] +pub struct ApiState { + pub store: Arc>, + pub embedder: Arc>>, + pub vault_path: Arc, + pub profile: Arc>, + pub orchestrator: Option>>>, + pub reranker: Option>>>, + pub http_config: Arc, + pub no_auth: bool, + pub recent_writes: RecentWrites, +} + +// --------------------------------------------------------------------------- +// Error type +// --------------------------------------------------------------------------- + +pub struct ApiError { + pub status: StatusCode, + pub message: String, +} + +impl IntoResponse for ApiError { + fn into_response(self) -> axum::response::Response { + let body = serde_json::json!({ "error": self.message }); + (self.status, Json(body)).into_response() + } +} + +impl ApiError { + pub fn unauthorized(msg: &str) -> Self { + Self { + status: StatusCode::UNAUTHORIZED, + message: msg.to_string(), + } + } + pub fn forbidden(msg: &str) -> Self { + Self { + status: StatusCode::FORBIDDEN, + message: msg.to_string(), + } + } + pub fn bad_request(msg: &str) -> Self { + Self { + status: StatusCode::BAD_REQUEST, + message: msg.to_string(), + } + } + pub fn not_found(msg: &str) -> Self { + Self { + status: StatusCode::NOT_FOUND, + message: msg.to_string(), + } + } + pub fn internal(msg: &str) -> Self { + Self { + status: StatusCode::INTERNAL_SERVER_ERROR, + message: msg.to_string(), + } + } +} + +// --------------------------------------------------------------------------- +// Auth helpers +// --------------------------------------------------------------------------- + +/// Validate API key from Authorization header. Returns the matching key config. +pub fn validate_api_key<'a>(key: &str, config: &'a HttpConfig) -> Option<&'a ApiKeyConfig> { + config.api_keys.iter().find(|k| k.key == key) +} + +/// Check if a permission level allows the requested operation. +pub fn check_permission(permission: &str, is_write: bool) -> bool { + if !is_write { + return true; + } + permission == "write" +} + +/// Extract and validate auth from request headers. +pub fn authorize( + headers: &axum::http::HeaderMap, + state: &ApiState, + is_write: bool, +) -> Result<(), ApiError> { + if state.no_auth { + return Ok(()); + } + let auth = headers + .get("authorization") + .and_then(|v| v.to_str().ok()) + .ok_or_else(|| ApiError::unauthorized("Missing Authorization header"))?; + let key = auth + .strip_prefix("Bearer ") + .ok_or_else(|| ApiError::unauthorized("Authorization must use Bearer scheme"))?; + let key_config = validate_api_key(key, &state.http_config) + .ok_or_else(|| ApiError::unauthorized("Invalid API key"))?; + if !check_permission(&key_config.permissions, is_write) { + return Err(ApiError::forbidden( + "Insufficient permissions: write access required", + )); + } + Ok(()) +} + +/// Generate a new API key with `eg_` prefix + 32 hex chars. +pub fn generate_api_key() -> String { + use rand::Rng; + let mut rng = rand::rng(); + let hex: String = (0..32) + .map(|_| format!("{:x}", rng.random_range(0..16u8))) + .collect(); + format!("eg_{hex}") +} + +// --------------------------------------------------------------------------- +// Router +// --------------------------------------------------------------------------- + +/// Build the axum router with all API endpoints. +pub fn build_router(state: ApiState) -> Router { + Router::new() + // Placeholder -- handlers will be added in Tasks 3-5 + .route("/api/health-check", get(health_check)) + .with_state(state) +} + +async fn health_check() -> &'static str { + "ok" +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + fn test_http_config() -> HttpConfig { + HttpConfig { + enabled: true, + port: 3000, + host: "127.0.0.1".to_string(), + rate_limit: 0, + cors_origins: vec![], + api_keys: vec![ + ApiKeyConfig { + key: "eg_readkey".into(), + name: "reader".into(), + permissions: "read".into(), + }, + ApiKeyConfig { + key: "eg_writekey".into(), + name: "writer".into(), + permissions: "write".into(), + }, + ], + } + } + + #[test] + fn test_validate_api_key_valid() { + let config = test_http_config(); + let result = validate_api_key("eg_readkey", &config); + assert!(result.is_some()); + assert_eq!(result.unwrap().permissions, "read"); + } + + #[test] + fn test_validate_api_key_invalid() { + let config = test_http_config(); + assert!(validate_api_key("eg_badkey", &config).is_none()); + } + + #[test] + fn test_generate_api_key_format() { + let key = generate_api_key(); + assert!(key.starts_with("eg_")); + assert_eq!(key.len(), 35); // "eg_" + 32 hex chars + } + + #[test] + fn test_check_permission_read_on_read() { + assert!(check_permission("read", false)); + } + + #[test] + fn test_check_permission_read_on_write() { + assert!(!check_permission("read", true)); + } + + #[test] + fn test_check_permission_write_on_write() { + assert!(check_permission("write", true)); + } + + #[test] + fn test_check_permission_write_on_read() { + assert!(check_permission("write", false)); + } +} diff --git a/src/lib.rs b/src/lib.rs index 237516f..4dc4d67 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,6 +6,7 @@ pub mod fts; pub mod fusion; pub mod graph; pub mod health; +pub mod http; pub mod indexer; pub mod links; pub mod llm; From b6762307f814a79cdee9760e91fefb0cb66520b1 Mon Sep 17 00:00:00 2001 From: Oleksandr Ostrovskyi Date: Thu, 26 Mar 2026 22:09:43 +0200 Subject: [PATCH 03/10] feat(http): add read endpoint handlers (search, read, list, vault_map, who, project, context, health) --- src/http.rs | 401 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 399 insertions(+), 2 deletions(-) diff --git a/src/http.rs b/src/http.rs index 5031e6c..18ad428 100644 --- a/src/http.rs +++ b/src/http.rs @@ -1,11 +1,17 @@ use std::sync::Arc; -use axum::{Json, Router, http::StatusCode, response::IntoResponse, routing::get}; +use axum::extract::{Path, Query, State}; +use axum::http::HeaderMap; +use axum::{Json, Router, http::StatusCode, response::IntoResponse, routing::{get, post}}; +use serde::Deserialize; use tokio::sync::Mutex; use crate::config::{ApiKeyConfig, HttpConfig}; +use crate::context::{self, ContextParams}; +use crate::health; use crate::llm::{EmbedModel, OrchestratorModel, RerankModel}; use crate::profile::VaultProfile; +use crate::search; use crate::serve::RecentWrites; use crate::store::Store; @@ -128,6 +134,37 @@ pub fn generate_api_key() -> String { format!("eg_{hex}") } +// --------------------------------------------------------------------------- +// Request body / query structs +// --------------------------------------------------------------------------- + +#[derive(Debug, Deserialize)] +struct SearchBody { + query: String, + top_n: Option, +} + +#[derive(Debug, Deserialize)] +struct ReadSectionQuery { + file: String, + heading: String, +} + +#[derive(Debug, Deserialize)] +struct ListQuery { + folder: Option, + #[serde(default)] + tags: Vec, + limit: Option, + created_by: Option, +} + +#[derive(Debug, Deserialize)] +struct ContextBody { + topic: String, + budget: Option, +} + // --------------------------------------------------------------------------- // Router // --------------------------------------------------------------------------- @@ -135,8 +172,16 @@ pub fn generate_api_key() -> String { /// Build the axum router with all API endpoints. pub fn build_router(state: ApiState) -> Router { Router::new() - // Placeholder -- handlers will be added in Tasks 3-5 .route("/api/health-check", get(health_check)) + .route("/api/search", post(handle_search)) + .route("/api/read/{*file}", get(handle_read)) + .route("/api/read-section", get(handle_read_section)) + .route("/api/list", get(handle_list)) + .route("/api/vault-map", get(handle_vault_map)) + .route("/api/who/{name}", get(handle_who)) + .route("/api/project/{name}", get(handle_project)) + .route("/api/context", post(handle_context)) + .route("/api/health", get(handle_health)) .with_state(state) } @@ -144,6 +189,183 @@ async fn health_check() -> &'static str { "ok" } +// --------------------------------------------------------------------------- +// Read endpoint handlers +// --------------------------------------------------------------------------- + +async fn handle_search( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, false)?; + let top_n = body.top_n.unwrap_or(10); + let store = state.store.lock().await; + let mut embedder = state.embedder.lock().await; + + let mut orch_guard = match &state.orchestrator { + Some(o) => Some(o.lock().await), + None => None, + }; + let mut rerank_guard = match &state.reranker { + Some(r) => Some(r.lock().await), + None => None, + }; + + let mut config = search::SearchConfig { + orchestrator: orch_guard + .as_mut() + .map(|g| g.as_mut() as &mut dyn OrchestratorModel), + reranker: rerank_guard + .as_mut() + .map(|g| g.as_mut() as &mut dyn RerankModel), + store: &store, + rerank_candidates: 30, + }; + + let output = search::search_with_intelligence(&body.query, top_n, &mut *embedder, &mut config) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + Ok(Json(serde_json::json!(output.results))) +} + +async fn handle_read( + State(state): State, + headers: HeaderMap, + Path(file): Path, +) -> Result { + authorize(&headers, &state, false)?; + let store = state.store.lock().await; + let ctx = ContextParams { + store: &store, + vault_path: &state.vault_path, + profile: state.profile.as_ref().as_ref(), + }; + let note = context::context_read(&ctx, &file) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + Ok(Json(serde_json::json!(note))) +} + +async fn handle_read_section( + State(state): State, + headers: HeaderMap, + Query(params): Query, +) -> Result { + authorize(&headers, &state, false)?; + let store = state.store.lock().await; + let result = context::read_section(&store, &state.vault_path, ¶ms.file, ¶ms.heading) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + Ok(Json(serde_json::json!(result))) +} + +async fn handle_list( + State(state): State, + headers: HeaderMap, + Query(params): Query, +) -> Result { + authorize(&headers, &state, false)?; + let store = state.store.lock().await; + let ctx = ContextParams { + store: &store, + vault_path: &state.vault_path, + profile: state.profile.as_ref().as_ref(), + }; + let limit = params.limit.unwrap_or(20); + let items = context::context_list( + &ctx, + params.folder.as_deref(), + ¶ms.tags, + params.created_by.as_deref(), + limit, + ) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + Ok(Json(serde_json::json!(items))) +} + +async fn handle_vault_map( + State(state): State, + headers: HeaderMap, +) -> Result { + authorize(&headers, &state, false)?; + let store = state.store.lock().await; + let ctx = ContextParams { + store: &store, + vault_path: &state.vault_path, + profile: state.profile.as_ref().as_ref(), + }; + let map = context::vault_map(&ctx) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + Ok(Json(serde_json::json!(map))) +} + +async fn handle_who( + State(state): State, + headers: HeaderMap, + Path(name): Path, +) -> Result { + authorize(&headers, &state, false)?; + let store = state.store.lock().await; + let ctx = ContextParams { + store: &store, + vault_path: &state.vault_path, + profile: state.profile.as_ref().as_ref(), + }; + let person = context::context_who(&ctx, &name) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + Ok(Json(serde_json::json!(person))) +} + +async fn handle_project( + State(state): State, + headers: HeaderMap, + Path(name): Path, +) -> Result { + authorize(&headers, &state, false)?; + let store = state.store.lock().await; + let ctx = ContextParams { + store: &store, + vault_path: &state.vault_path, + profile: state.profile.as_ref().as_ref(), + }; + let proj = context::context_project(&ctx, &name) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + Ok(Json(serde_json::json!(proj))) +} + +async fn handle_context( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, false)?; + let budget = body.budget.unwrap_or(32000); + let store = state.store.lock().await; + let mut embedder = state.embedder.lock().await; + let ctx = ContextParams { + store: &store, + vault_path: &state.vault_path, + profile: state.profile.as_ref().as_ref(), + }; + let bundle = context::context_topic_with_search(&ctx, &body.topic, budget, &mut *embedder) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + Ok(Json(serde_json::json!(bundle))) +} + +async fn handle_health( + State(state): State, + headers: HeaderMap, +) -> Result { + authorize(&headers, &state, false)?; + let store = state.store.lock().await; + let profile_ref = state.profile.as_ref().as_ref(); + let config = health::HealthConfig { + daily_folder: profile_ref.and_then(|p| p.structure.folders.daily.clone()), + inbox_folder: profile_ref.and_then(|p| p.structure.folders.inbox.clone()), + }; + let report = health::generate_health_report(&store, &config) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + Ok(Json(serde_json::json!(report))) +} + // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- @@ -151,6 +373,12 @@ async fn health_check() -> &'static str { #[cfg(test)] mod tests { use super::*; + use std::collections::HashMap; + use std::path::PathBuf; + use std::time::SystemTime; + + use axum::body::Body; + use tower::ServiceExt; fn test_http_config() -> HttpConfig { HttpConfig { @@ -174,6 +402,36 @@ mod tests { } } + /// Dummy embedder that returns zero vectors. Only used for constructing + /// `ApiState` in tests that don't exercise search/context endpoints. + struct DummyEmbedder; + impl crate::llm::EmbedModel for DummyEmbedder { + fn embed_batch(&mut self, texts: &[&str]) -> anyhow::Result>> { + Ok(texts.iter().map(|_| vec![0.0; 384]).collect()) + } + fn token_count(&self, text: &str) -> usize { + text.split_whitespace().count() + } + fn dim(&self) -> usize { + 384 + } + } + + fn test_api_state() -> ApiState { + let store = Store::open_memory().expect("in-memory store"); + ApiState { + store: Arc::new(Mutex::new(store)), + embedder: Arc::new(Mutex::new(Box::new(DummyEmbedder) as Box)), + vault_path: Arc::new(PathBuf::from("/tmp/test-vault")), + profile: Arc::new(None), + orchestrator: None, + reranker: None, + http_config: Arc::new(test_http_config()), + no_auth: false, + recent_writes: Arc::new(Mutex::new(HashMap::::new())), + } + } + #[test] fn test_validate_api_key_valid() { let config = test_http_config(); @@ -214,4 +472,143 @@ mod tests { fn test_check_permission_write_on_read() { assert!(check_permission("write", false)); } + + // ----------------------------------------------------------------------- + // Integration tests using axum oneshot + // ----------------------------------------------------------------------- + + #[tokio::test] + async fn test_vault_map_unauthorized() { + let state = test_api_state(); + let app = build_router(state); + let response = app + .oneshot( + axum::http::Request::builder() + .uri("/api/vault-map") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn test_vault_map_invalid_key() { + let state = test_api_state(); + let app = build_router(state); + let response = app + .oneshot( + axum::http::Request::builder() + .uri("/api/vault-map") + .header("authorization", "Bearer eg_badkey") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn test_vault_map_authorized() { + let state = test_api_state(); + let app = build_router(state); + let response = app + .oneshot( + axum::http::Request::builder() + .uri("/api/vault-map") + .header("authorization", "Bearer eg_readkey") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::OK); + } + + #[tokio::test] + async fn test_health_authorized() { + let state = test_api_state(); + let app = build_router(state); + let response = app + .oneshot( + axum::http::Request::builder() + .uri("/api/health") + .header("authorization", "Bearer eg_readkey") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::OK); + } + + #[tokio::test] + async fn test_health_unauthorized() { + let state = test_api_state(); + let app = build_router(state); + let response = app + .oneshot( + axum::http::Request::builder() + .uri("/api/health") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn test_search_unauthorized() { + let state = test_api_state(); + let app = build_router(state); + let response = app + .oneshot( + axum::http::Request::builder() + .method("POST") + .uri("/api/search") + .header("content-type", "application/json") + .body(Body::from(r#"{"query":"test"}"#)) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn test_list_authorized_empty() { + let state = test_api_state(); + let app = build_router(state); + let response = app + .oneshot( + axum::http::Request::builder() + .uri("/api/list") + .header("authorization", "Bearer eg_readkey") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::OK); + } + + #[tokio::test] + async fn test_no_auth_mode_skips_check() { + let mut state = test_api_state(); + state.no_auth = true; + let app = build_router(state); + let response = app + .oneshot( + axum::http::Request::builder() + .uri("/api/vault-map") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::OK); + } } From 97c7c8616bd6b0db2a81f0f1443fd2d87ba5c6e3 Mon Sep 17 00:00:00 2001 From: Oleksandr Ostrovskyi Date: Thu, 26 Mar 2026 22:13:44 +0200 Subject: [PATCH 04/10] feat(http): add write endpoint handlers with permission enforcement --- src/http.rs | 418 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 418 insertions(+) diff --git a/src/http.rs b/src/http.rs index 18ad428..d8f6003 100644 --- a/src/http.rs +++ b/src/http.rs @@ -14,6 +14,10 @@ use crate::profile::VaultProfile; use crate::search; use crate::serve::RecentWrites; use crate::store::Store; +use crate::writer::{ + self, AppendInput, CreateNoteInput, DeleteMode, EditFrontmatterInput, EditInput, EditMode, + FrontmatterOp, RewriteInput, UpdateMetadataInput, +}; // --------------------------------------------------------------------------- // Shared state @@ -165,6 +169,74 @@ struct ContextBody { budget: Option, } +// -- Write request bodies -- + +#[derive(Debug, Deserialize)] +struct CreateBody { + content: String, + filename: Option, + type_hint: Option, + #[serde(default)] + tags: Vec, + folder: Option, +} + +#[derive(Debug, Deserialize)] +struct AppendBody { + file: String, + content: String, +} + +#[derive(Debug, Deserialize)] +struct EditBody { + file: String, + heading: String, + content: String, + mode: Option, +} + +#[derive(Debug, Deserialize)] +struct RewriteBody { + file: String, + content: String, + preserve_frontmatter: Option, +} + +#[derive(Debug, Deserialize)] +struct EditFrontmatterBody { + file: String, + operations: Vec, +} + +#[derive(Debug, Deserialize)] +struct MoveBody { + file: String, + new_folder: String, +} + +#[derive(Debug, Deserialize)] +struct ArchiveBody { + file: String, +} + +#[derive(Debug, Deserialize)] +struct UnarchiveBody { + file: String, +} + +#[derive(Debug, Deserialize)] +struct UpdateMetadataBody { + file: String, + tags: Option>, + aliases: Option>, +} + +#[derive(Debug, Deserialize)] +struct DeleteBody { + file: String, + mode: Option, +} + // --------------------------------------------------------------------------- // Router // --------------------------------------------------------------------------- @@ -182,6 +254,17 @@ pub fn build_router(state: ApiState) -> Router { .route("/api/project/{name}", get(handle_project)) .route("/api/context", post(handle_context)) .route("/api/health", get(handle_health)) + // Write endpoints + .route("/api/create", post(handle_create)) + .route("/api/append", post(handle_append)) + .route("/api/edit", post(handle_edit)) + .route("/api/rewrite", post(handle_rewrite)) + .route("/api/edit-frontmatter", post(handle_edit_frontmatter)) + .route("/api/move", post(handle_move)) + .route("/api/archive", post(handle_archive)) + .route("/api/unarchive", post(handle_unarchive)) + .route("/api/update-metadata", post(handle_update_metadata)) + .route("/api/delete", post(handle_delete)) .with_state(state) } @@ -366,6 +449,296 @@ async fn handle_health( Ok(Json(serde_json::json!(report))) } +// --------------------------------------------------------------------------- +// Write helpers +// --------------------------------------------------------------------------- + +/// Record a write to the recent-writes map so the file watcher skips re-indexing. +async fn record_write(recent_writes: &RecentWrites, path: &std::path::Path) { + if let Ok(meta) = std::fs::metadata(path) + && let Ok(mtime) = meta.modified() + { + recent_writes.lock().await.insert(path.to_path_buf(), mtime); + } +} + +/// Parse a JSON operations array into `Vec`. +fn parse_frontmatter_ops(operations: &[serde_json::Value]) -> Result, ApiError> { + let mut ops = Vec::with_capacity(operations.len()); + for op_val in operations { + let op_str = op_val + .get("op") + .and_then(|v| v.as_str()) + .ok_or_else(|| ApiError::bad_request("each operation must have an \"op\" string field"))?; + match op_str { + "set" => { + let key = op_val + .get("key") + .and_then(|v| v.as_str()) + .ok_or_else(|| ApiError::bad_request("\"set\" operation requires a \"key\" field"))?; + let value = op_val + .get("value") + .and_then(|v| v.as_str()) + .ok_or_else(|| ApiError::bad_request("\"set\" operation requires a \"value\" field"))?; + ops.push(FrontmatterOp::Set(key.to_string(), value.to_string())); + } + "remove" => { + let key = op_val + .get("key") + .and_then(|v| v.as_str()) + .ok_or_else(|| ApiError::bad_request("\"remove\" operation requires a \"key\" field"))?; + ops.push(FrontmatterOp::Remove(key.to_string())); + } + "add_tag" => { + let value = op_val + .get("value") + .and_then(|v| v.as_str()) + .ok_or_else(|| ApiError::bad_request("\"add_tag\" operation requires a \"value\" field"))?; + ops.push(FrontmatterOp::AddTag(value.to_string())); + } + "remove_tag" => { + let value = op_val + .get("value") + .and_then(|v| v.as_str()) + .ok_or_else(|| ApiError::bad_request("\"remove_tag\" operation requires a \"value\" field"))?; + ops.push(FrontmatterOp::RemoveTag(value.to_string())); + } + "add_alias" => { + let value = op_val + .get("value") + .and_then(|v| v.as_str()) + .ok_or_else(|| ApiError::bad_request("\"add_alias\" operation requires a \"value\" field"))?; + ops.push(FrontmatterOp::AddAlias(value.to_string())); + } + "remove_alias" => { + let value = op_val + .get("value") + .and_then(|v| v.as_str()) + .ok_or_else(|| ApiError::bad_request("\"remove_alias\" operation requires a \"value\" field"))?; + ops.push(FrontmatterOp::RemoveAlias(value.to_string())); + } + unknown => { + return Err(ApiError::bad_request(&format!( + "unknown frontmatter operation: \"{unknown}\"" + ))); + } + } + } + Ok(ops) +} + +// --------------------------------------------------------------------------- +// Write endpoint handlers +// --------------------------------------------------------------------------- + +async fn handle_create( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, true)?; + let store = state.store.lock().await; + let mut embedder = state.embedder.lock().await; + let input = CreateNoteInput { + content: body.content, + filename: body.filename, + type_hint: body.type_hint, + tags: body.tags, + folder: body.folder, + created_by: "http-api".into(), + }; + let result = writer::create_note( + input, + &store, + &mut *embedder, + &state.vault_path, + state.profile.as_ref().as_ref(), + ) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let full_path = state.vault_path.join(&result.path); + record_write(&state.recent_writes, &full_path).await; + Ok(Json(serde_json::json!(result))) +} + +async fn handle_append( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, true)?; + let store = state.store.lock().await; + let mut embedder = state.embedder.lock().await; + let input = AppendInput { + file: body.file, + content: body.content, + modified_by: "http-api".into(), + }; + let result = writer::append_to_note(input, &store, &mut *embedder, &state.vault_path) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let full_path = state.vault_path.join(&result.path); + record_write(&state.recent_writes, &full_path).await; + Ok(Json(serde_json::json!(result))) +} + +async fn handle_edit( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, true)?; + let store = state.store.lock().await; + let mode = match body.mode.as_deref().unwrap_or("append") { + "replace" => EditMode::Replace, + "prepend" => EditMode::Prepend, + _ => EditMode::Append, + }; + let input = EditInput { + file: body.file, + heading: body.heading, + content: body.content, + mode, + modified_by: "http-api".into(), + }; + let result = writer::edit_note(&store, &state.vault_path, &input, None) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let full_path = state.vault_path.join(&result.path); + record_write(&state.recent_writes, &full_path).await; + Ok(Json(serde_json::json!(result))) +} + +async fn handle_rewrite( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, true)?; + let store = state.store.lock().await; + let input = RewriteInput { + file: body.file, + content: body.content, + preserve_frontmatter: body.preserve_frontmatter.unwrap_or(true), + modified_by: "http-api".into(), + }; + let result = writer::rewrite_note(&store, &state.vault_path, &input) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let full_path = state.vault_path.join(&result.path); + record_write(&state.recent_writes, &full_path).await; + Ok(Json(serde_json::json!(result))) +} + +async fn handle_edit_frontmatter( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, true)?; + let ops = parse_frontmatter_ops(&body.operations)?; + let store = state.store.lock().await; + let input = EditFrontmatterInput { + file: body.file, + operations: ops, + modified_by: "http-api".into(), + }; + let result = writer::edit_frontmatter(&store, &state.vault_path, &input) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let full_path = state.vault_path.join(&result.path); + record_write(&state.recent_writes, &full_path).await; + Ok(Json(serde_json::json!(result))) +} + +async fn handle_move( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, true)?; + let store = state.store.lock().await; + let result = writer::move_note(&body.file, &body.new_folder, &store, &state.vault_path) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let full_path = state.vault_path.join(&result.path); + record_write(&state.recent_writes, &full_path).await; + Ok(Json(serde_json::json!(result))) +} + +async fn handle_archive( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, true)?; + let store = state.store.lock().await; + let result = writer::archive_note( + &body.file, + &store, + &state.vault_path, + state.profile.as_ref().as_ref(), + ) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let full_path = state.vault_path.join(&result.path); + record_write(&state.recent_writes, &full_path).await; + Ok(Json(serde_json::json!(result))) +} + +async fn handle_unarchive( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, true)?; + let store = state.store.lock().await; + let mut embedder = state.embedder.lock().await; + let result = writer::unarchive_note(&body.file, &store, &mut *embedder, &state.vault_path) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let full_path = state.vault_path.join(&result.path); + record_write(&state.recent_writes, &full_path).await; + Ok(Json(serde_json::json!(result))) +} + +async fn handle_update_metadata( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, true)?; + let store = state.store.lock().await; + let input = UpdateMetadataInput { + file: body.file, + tags: body.tags, + aliases: body.aliases, + modified_by: "http-api".into(), + }; + let result = writer::update_metadata(input, &store, &state.vault_path) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let full_path = state.vault_path.join(&result.path); + record_write(&state.recent_writes, &full_path).await; + Ok(Json(serde_json::json!(result))) +} + +async fn handle_delete( + State(state): State, + headers: HeaderMap, + Json(body): Json, +) -> Result { + authorize(&headers, &state, true)?; + let store = state.store.lock().await; + let mode = match body.mode.as_deref().unwrap_or("soft") { + "hard" => DeleteMode::Hard, + _ => DeleteMode::Soft, + }; + let archive_folder = state + .profile + .as_ref() + .as_ref() + .and_then(|p| p.structure.folders.archive.as_deref()) + .unwrap_or("04-Archive"); + writer::delete_note(&store, &state.vault_path, &body.file, mode, archive_folder) + .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + Ok(Json(serde_json::json!({ + "deleted": body.file, + "mode": body.mode.as_deref().unwrap_or("soft"), + }))) +} + // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- @@ -611,4 +984,49 @@ mod tests { .unwrap(); assert_eq!(response.status(), StatusCode::OK); } + + // ----------------------------------------------------------------------- + // Write endpoint permission tests + // ----------------------------------------------------------------------- + + #[tokio::test] + async fn test_write_endpoint_read_key_rejected() { + let state = test_api_state(); + let app = build_router(state); + let response = app + .oneshot( + axum::http::Request::builder() + .method("POST") + .uri("/api/create") + .header("content-type", "application/json") + .header("authorization", "Bearer eg_readkey") + .body(Body::from(r##"{"content":"# Test"}"##)) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::FORBIDDEN); + } + + #[tokio::test] + async fn test_write_endpoint_write_key_accepted() { + let state = test_api_state(); + let app = build_router(state); + let response = app + .oneshot( + axum::http::Request::builder() + .method("POST") + .uri("/api/edit") + .header("content-type", "application/json") + .header("authorization", "Bearer eg_writekey") + .body(Body::from( + r#"{"file":"nonexistent","heading":"Test","content":"new"}"#, + )) + .unwrap(), + ) + .await + .unwrap(); + // Should be 500 (file not found via store) but NOT 403 + assert_ne!(response.status(), StatusCode::FORBIDDEN); + } } From aee4858bc1b33480054e6e2369746685b7c75426 Mon Sep 17 00:00:00 2001 From: Oleksandr Ostrovskyi Date: Thu, 26 Mar 2026 22:17:54 +0200 Subject: [PATCH 05/10] feat(http): add rate limiting and CORS middleware --- src/http.rs | 191 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 186 insertions(+), 5 deletions(-) diff --git a/src/http.rs b/src/http.rs index d8f6003..c16e390 100644 --- a/src/http.rs +++ b/src/http.rs @@ -1,10 +1,13 @@ +use std::collections::HashMap; use std::sync::Arc; +use std::time::Instant; use axum::extract::{Path, Query, State}; -use axum::http::HeaderMap; +use axum::http::{HeaderMap, HeaderValue, Method}; use axum::{Json, Router, http::StatusCode, response::IntoResponse, routing::{get, post}}; use serde::Deserialize; use tokio::sync::Mutex; +use tower_http::cors::{Any, CorsLayer}; use crate::config::{ApiKeyConfig, HttpConfig}; use crate::context::{self, ContextParams}; @@ -34,6 +37,56 @@ pub struct ApiState { pub http_config: Arc, pub no_auth: bool, pub recent_writes: RecentWrites, + pub rate_limiter: Arc, +} + +// --------------------------------------------------------------------------- +// Rate limiter (in-memory token bucket) +// --------------------------------------------------------------------------- + +pub struct RateLimiter { + buckets: std::sync::Mutex>, + limit: u32, // requests per minute, 0 = unlimited +} + +struct RateBucket { + tokens: u32, + last_refill: Instant, +} + +impl RateLimiter { + pub fn new(limit: u32) -> Self { + Self { + buckets: std::sync::Mutex::new(HashMap::new()), + limit, + } + } + + /// Check if a request is allowed. Returns Ok(()) or Err with retry-after seconds. + pub fn check(&self, key: &str) -> Result<(), u64> { + if self.limit == 0 { + return Ok(()); + } + let mut buckets = self.buckets.lock().unwrap(); + let bucket = buckets.entry(key.to_string()).or_insert(RateBucket { + tokens: self.limit, + last_refill: Instant::now(), + }); + // Refill tokens based on elapsed time + let elapsed = bucket.last_refill.elapsed().as_secs_f64(); + let refill = (elapsed * self.limit as f64 / 60.0) as u32; + if refill > 0 { + bucket.tokens = (bucket.tokens + refill).min(self.limit); + bucket.last_refill = Instant::now(); + } + if bucket.tokens > 0 { + bucket.tokens -= 1; + Ok(()) + } else { + let retry_after = (60.0 / self.limit as f64).ceil() as u64; + Err(retry_after) + } + } } // --------------------------------------------------------------------------- @@ -43,12 +96,22 @@ pub struct ApiState { pub struct ApiError { pub status: StatusCode, pub message: String, + pub headers: Vec<(String, String)>, } impl IntoResponse for ApiError { fn into_response(self) -> axum::response::Response { let body = serde_json::json!({ "error": self.message }); - (self.status, Json(body)).into_response() + let mut response = (self.status, Json(body)).into_response(); + for (name, value) in &self.headers { + if let (Ok(n), Ok(v)) = ( + axum::http::header::HeaderName::from_bytes(name.as_bytes()), + HeaderValue::from_str(value), + ) { + response.headers_mut().insert(n, v); + } + } + response } } @@ -57,30 +120,42 @@ impl ApiError { Self { status: StatusCode::UNAUTHORIZED, message: msg.to_string(), + headers: vec![], } } pub fn forbidden(msg: &str) -> Self { Self { status: StatusCode::FORBIDDEN, message: msg.to_string(), + headers: vec![], } } pub fn bad_request(msg: &str) -> Self { Self { status: StatusCode::BAD_REQUEST, message: msg.to_string(), + headers: vec![], } } pub fn not_found(msg: &str) -> Self { Self { status: StatusCode::NOT_FOUND, message: msg.to_string(), + headers: vec![], } } pub fn internal(msg: &str) -> Self { Self { status: StatusCode::INTERNAL_SERVER_ERROR, message: msg.to_string(), + headers: vec![], + } + } + pub fn rate_limited(retry_after: u64) -> Self { + Self { + status: StatusCode::TOO_MANY_REQUESTS, + message: format!("Rate limit exceeded. Retry after {retry_after}s"), + headers: vec![("retry-after".to_string(), retry_after.to_string())], } } } @@ -102,13 +177,17 @@ pub fn check_permission(permission: &str, is_write: bool) -> bool { permission == "write" } -/// Extract and validate auth from request headers. +/// Extract and validate auth from request headers, then check rate limit. pub fn authorize( headers: &axum::http::HeaderMap, state: &ApiState, is_write: bool, ) -> Result<(), ApiError> { if state.no_auth { + state + .rate_limiter + .check("no_auth") + .map_err(ApiError::rate_limited)?; return Ok(()); } let auth = headers @@ -125,6 +204,10 @@ pub fn authorize( "Insufficient permissions: write access required", )); } + state + .rate_limiter + .check(key) + .map_err(ApiError::rate_limited)?; Ok(()) } @@ -237,12 +320,38 @@ struct DeleteBody { mode: Option, } +// --------------------------------------------------------------------------- +// CORS +// --------------------------------------------------------------------------- + +fn cors_layer(origins: &[String]) -> CorsLayer { + if origins.is_empty() { + return CorsLayer::new(); + } + if origins.iter().any(|o| o == "*") { + return CorsLayer::new() + .allow_origin(Any) + .allow_methods(Any) + .allow_headers(Any); + } + let origins: Vec = origins + .iter() + .filter_map(|o| o.parse().ok()) + .collect(); + CorsLayer::new() + .allow_origin(origins) + .allow_methods([Method::GET, Method::POST, Method::OPTIONS]) + .allow_headers(Any) + .allow_credentials(true) +} + // --------------------------------------------------------------------------- // Router // --------------------------------------------------------------------------- /// Build the axum router with all API endpoints. pub fn build_router(state: ApiState) -> Router { + let cors = cors_layer(&state.http_config.cors_origins); Router::new() .route("/api/health-check", get(health_check)) .route("/api/search", post(handle_search)) @@ -265,6 +374,7 @@ pub fn build_router(state: ApiState) -> Router { .route("/api/unarchive", post(handle_unarchive)) .route("/api/update-metadata", post(handle_update_metadata)) .route("/api/delete", post(handle_delete)) + .layer(cors) .with_state(state) } @@ -746,7 +856,6 @@ async fn handle_delete( #[cfg(test)] mod tests { use super::*; - use std::collections::HashMap; use std::path::PathBuf; use std::time::SystemTime; @@ -792,6 +901,8 @@ mod tests { fn test_api_state() -> ApiState { let store = Store::open_memory().expect("in-memory store"); + let config = test_http_config(); + let rate_limiter = Arc::new(RateLimiter::new(config.rate_limit)); ApiState { store: Arc::new(Mutex::new(store)), embedder: Arc::new(Mutex::new(Box::new(DummyEmbedder) as Box)), @@ -799,9 +910,10 @@ mod tests { profile: Arc::new(None), orchestrator: None, reranker: None, - http_config: Arc::new(test_http_config()), + http_config: Arc::new(config), no_auth: false, recent_writes: Arc::new(Mutex::new(HashMap::::new())), + rate_limiter, } } @@ -1029,4 +1141,73 @@ mod tests { // Should be 500 (file not found via store) but NOT 403 assert_ne!(response.status(), StatusCode::FORBIDDEN); } + + // ----------------------------------------------------------------------- + // Rate limiter unit tests + // ----------------------------------------------------------------------- + + #[test] + fn test_rate_limiter_allows_under_limit() { + let limiter = RateLimiter::new(5); + for _ in 0..5 { + assert!(limiter.check("key1").is_ok()); + } + } + + #[test] + fn test_rate_limiter_rejects_over_limit() { + let limiter = RateLimiter::new(2); + assert!(limiter.check("key1").is_ok()); + assert!(limiter.check("key1").is_ok()); + assert!(limiter.check("key1").is_err()); + } + + #[test] + fn test_rate_limiter_unlimited() { + let limiter = RateLimiter::new(0); + for _ in 0..1000 { + assert!(limiter.check("key1").is_ok()); + } + } + + #[test] + fn test_rate_limiter_separate_keys() { + let limiter = RateLimiter::new(1); + assert!(limiter.check("key1").is_ok()); + assert!(limiter.check("key2").is_ok()); // different key, separate bucket + assert!(limiter.check("key1").is_err()); // key1 exhausted + } + + #[tokio::test] + async fn test_rate_limit_returns_429() { + let mut state = test_api_state(); + state.rate_limiter = Arc::new(RateLimiter::new(1)); + let app = build_router(state); + // First request passes (consumes the single token) + let response = app + .clone() + .oneshot( + axum::http::Request::builder() + .uri("/api/vault-map") + .header("authorization", "Bearer eg_readkey") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::OK); + // Second request gets 429 + let response = app + .oneshot( + axum::http::Request::builder() + .uri("/api/vault-map") + .header("authorization", "Bearer eg_readkey") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::TOO_MANY_REQUESTS); + assert!(response.headers().get("retry-after").is_some()); + } } From 774fcca25c028ac03ad28a9864d930553ca8b1ec Mon Sep 17 00:00:00 2001 From: Oleksandr Ostrovskyi Date: Thu, 26 Mar 2026 22:21:18 +0200 Subject: [PATCH 06/10] feat(serve): spawn HTTP server alongside MCP with graceful shutdown Add HttpServeOpts struct and update run_serve to optionally spawn an axum HTTP server as a tokio task before the blocking MCP stdio loop. Uses CancellationToken for coordinated shutdown when MCP exits. Validates --no-auth is only allowed on 127.0.0.1. --- src/main.rs | 2 +- src/serve.rs | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index af436b1..1e66bbc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1056,7 +1056,7 @@ async fn main() -> Result<()> { eprintln!("No index found. Run 'engraph index ' first."); std::process::exit(1); } - engraph::serve::run_serve(&data_dir).await?; + engraph::serve::run_serve(&data_dir, None).await?; } Command::Write { action } => { diff --git a/src/serve.rs b/src/serve.rs index 9259275..0146725 100644 --- a/src/serve.rs +++ b/src/serve.rs @@ -730,11 +730,30 @@ impl rmcp::handler::server::ServerHandler for EngraphServer { } } +// --------------------------------------------------------------------------- +// HTTP server options (populated by CLI flags in Task 7) +// --------------------------------------------------------------------------- + +pub struct HttpServeOpts { + pub port: u16, + pub host: String, + pub no_auth: bool, +} + // --------------------------------------------------------------------------- // Entry point // --------------------------------------------------------------------------- -pub async fn run_serve(data_dir: &Path) -> Result<()> { +pub async fn run_serve(data_dir: &Path, http_opts: Option) -> Result<()> { + if let Some(ref opts) = http_opts { + if opts.no_auth && opts.host != "127.0.0.1" { + anyhow::bail!( + "--no-auth cannot be used with --host {} (only 127.0.0.1 is allowed)", + opts.host + ); + } + } + let db_path = data_dir.join("engraph.db"); let models_dir = data_dir.join("models"); @@ -800,6 +819,15 @@ pub async fn run_serve(data_dir: &Path) -> Result<()> { let profile_arc = Arc::new(profile); let recent_writes: RecentWrites = Arc::new(Mutex::new(HashMap::new())); + // Clone Arcs for HTTP server before MCP consumes them + let http_store = store_arc.clone(); + let http_embedder = embedder_arc.clone(); + let http_vault_path = vault_path_arc.clone(); + let http_profile = profile_arc.clone(); + let http_orchestrator = orchestrator.as_ref().map(Arc::clone); + let http_reranker = reranker.as_ref().map(Arc::clone); + let http_recent_writes = recent_writes.clone(); + // Start file watcher for real-time index updates let mut exclude = config.exclude.clone(); if let Some(ref prof) = *profile_arc @@ -831,12 +859,45 @@ pub async fn run_serve(data_dir: &Path) -> Result<()> { recent_writes, }; + // Cancellation token for coordinated shutdown of HTTP + MCP + let cancel_token = tokio_util::sync::CancellationToken::new(); + + // Spawn HTTP server as a background task (before MCP blocks on stdio) + if let Some(ref opts) = http_opts { + let config = Config::load()?; + let api_state = crate::http::ApiState { + store: http_store, + embedder: http_embedder, + vault_path: http_vault_path, + profile: http_profile, + orchestrator: http_orchestrator, + reranker: http_reranker, + http_config: Arc::new(config.http.clone()), + no_auth: opts.no_auth, + recent_writes: http_recent_writes, + rate_limiter: Arc::new(crate::http::RateLimiter::new(config.http.rate_limit)), + }; + let router = crate::http::build_router(api_state); + let addr = format!("{}:{}", opts.host, opts.port); + let listener = tokio::net::TcpListener::bind(&addr).await?; + let cancel = cancel_token.clone(); + eprintln!("HTTP server listening on http://{}", addr); + tokio::spawn(async move { + axum::serve(listener, router) + .with_graceful_shutdown(cancel.cancelled_owned()) + .await + .ok(); + }); + } + eprintln!("engraph MCP server starting..."); let transport = rmcp::transport::io::stdio(); let server_handle = server.serve(transport).await?; server_handle.waiting().await?; + cancel_token.cancel(); // triggers HTTP graceful shutdown + // Shut down watcher cleanly after MCP transport exits let _ = watcher_shutdown.send(()); if let Err(e) = watcher_handle.join() { From f8fcb3add75ca7e0f3889fc8e23ac20e6b3c02c3 Mon Sep 17 00:00:00 2001 From: Oleksandr Ostrovskyi Date: Thu, 26 Mar 2026 22:23:53 +0200 Subject: [PATCH 07/10] feat(cli): add --http/--port/--host/--no-auth to serve, API key management to configure --- src/main.rs | 96 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 93 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index 1e66bbc..73fa8cf 100644 --- a/src/main.rs +++ b/src/main.rs @@ -97,6 +97,26 @@ enum Command { /// Register with an AI agent: "claude-code", "cursor", or "windsurf". #[arg(long)] register: Option, + + /// Generate and add a new API key. + #[arg(long)] + add_api_key: bool, + + /// Name for the new API key (requires --add-api-key). + #[arg(long, requires = "add_api_key")] + key_name: Option, + + /// Permissions for the new key: "read" or "write" (requires --add-api-key). + #[arg(long, requires = "add_api_key")] + key_permissions: Option, + + /// List all API keys. + #[arg(long)] + list_api_keys: bool, + + /// Revoke an API key by name. + #[arg(long)] + revoke_api_key: Option, }, /// Manage embedding models. @@ -106,7 +126,20 @@ enum Command { }, /// Start MCP stdio server for AI agent access. - Serve, + Serve { + /// Enable HTTP REST API alongside MCP. + #[arg(long)] + http: bool, + /// HTTP port (default: from config or 3000). + #[arg(long)] + port: Option, + /// HTTP host to bind to (default: 127.0.0.1). + #[arg(long)] + host: Option, + /// Disable API key authentication (local development only, 127.0.0.1 only). + #[arg(long)] + no_auth: bool, + }, /// Inspect vault graph connections. Graph { @@ -618,6 +651,11 @@ async fn main() -> Result<()> { enable_obsidian_cli, disable_obsidian_cli, register, + add_api_key, + key_name, + key_permissions, + list_api_keys, + revoke_api_key, } => { let mut cfg = Config::load()?; @@ -717,6 +755,48 @@ async fn main() -> Result<()> { } } + if add_api_key { + let name = key_name.unwrap_or_else(|| "default".into()); + let perms = key_permissions.unwrap_or_else(|| "read".into()); + if perms != "read" && perms != "write" { + anyhow::bail!("Permissions must be 'read' or 'write', got: {perms}"); + } + let key = engraph::http::generate_api_key(); + cfg.http.api_keys.push(engraph::config::ApiKeyConfig { + key: key.clone(), + name: name.clone(), + permissions: perms.clone(), + }); + cfg.save()?; + println!("API key created:"); + println!(" Name: {name}"); + println!(" Permissions: {perms}"); + println!(" Key: {key}"); + println!("\nSave this key — it won't be shown again."); + } + + if list_api_keys { + if cfg.http.api_keys.is_empty() { + println!("No API keys configured."); + } else { + println!("API keys:"); + for k in &cfg.http.api_keys { + println!(" {} ({})", k.name, k.permissions); + } + } + } + + if let Some(ref name) = revoke_api_key { + let before = cfg.http.api_keys.len(); + cfg.http.api_keys.retain(|k| k.name != *name); + if cfg.http.api_keys.len() < before { + cfg.save()?; + println!("Revoked API key: {name}"); + } else { + println!("No API key found with name: {name}"); + } + } + cfg.save()?; println!( "Configuration saved to {}", @@ -1051,12 +1131,22 @@ async fn main() -> Result<()> { } } - Command::Serve => { + Command::Serve { http, port, host, no_auth } => { if !index_exists(&data_dir) { eprintln!("No index found. Run 'engraph index ' first."); std::process::exit(1); } - engraph::serve::run_serve(&data_dir, None).await?; + let http_opts = if http { + let cfg = Config::load()?; + Some(engraph::serve::HttpServeOpts { + port: port.unwrap_or(cfg.http.port), + host: host.unwrap_or(cfg.http.host.clone()), + no_auth, + }) + } else { + None + }; + engraph::serve::run_serve(&data_dir, http_opts).await?; } Command::Write { action } => { From 3d954a2f4dfa676a654f8888534b4fbee907f288 Mon Sep 17 00:00:00 2001 From: Oleksandr Ostrovskyi Date: Thu, 26 Mar 2026 22:27:49 +0200 Subject: [PATCH 08/10] fix: resolve clippy warnings and apply cargo fmt for v1.3 --- src/config.rs | 2 +- src/http.rs | 74 ++++++++++++++++++++++++++++++--------------------- src/main.rs | 7 ++++- src/serve.rs | 15 ++++++----- 4 files changed, 58 insertions(+), 40 deletions(-) diff --git a/src/config.rs b/src/config.rs index 51215b0..cd64583 100644 --- a/src/config.rs +++ b/src/config.rs @@ -41,7 +41,7 @@ pub struct HttpConfig { pub enabled: bool, pub port: u16, pub host: String, - pub rate_limit: u32, // requests per minute per key, 0 = unlimited + pub rate_limit: u32, // requests per minute per key, 0 = unlimited pub cors_origins: Vec, pub api_keys: Vec, } diff --git a/src/http.rs b/src/http.rs index c16e390..7fce4b6 100644 --- a/src/http.rs +++ b/src/http.rs @@ -4,7 +4,12 @@ use std::time::Instant; use axum::extract::{Path, Query, State}; use axum::http::{HeaderMap, HeaderValue, Method}; -use axum::{Json, Router, http::StatusCode, response::IntoResponse, routing::{get, post}}; +use axum::{ + Json, Router, + http::StatusCode, + response::IntoResponse, + routing::{get, post}, +}; use serde::Deserialize; use tokio::sync::Mutex; use tower_http::cors::{Any, CorsLayer}; @@ -334,10 +339,7 @@ fn cors_layer(origins: &[String]) -> CorsLayer { .allow_methods(Any) .allow_headers(Any); } - let origins: Vec = origins - .iter() - .filter_map(|o| o.parse().ok()) - .collect(); + let origins: Vec = origins.iter().filter_map(|o| o.parse().ok()).collect(); CorsLayer::new() .allow_origin(origins) .allow_methods([Method::GET, Method::POST, Method::OPTIONS]) @@ -433,8 +435,8 @@ async fn handle_read( vault_path: &state.vault_path, profile: state.profile.as_ref().as_ref(), }; - let note = context::context_read(&ctx, &file) - .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let note = + context::context_read(&ctx, &file).map_err(|e| ApiError::internal(&format!("{e:#}")))?; Ok(Json(serde_json::json!(note))) } @@ -485,8 +487,7 @@ async fn handle_vault_map( vault_path: &state.vault_path, profile: state.profile.as_ref().as_ref(), }; - let map = context::vault_map(&ctx) - .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let map = context::vault_map(&ctx).map_err(|e| ApiError::internal(&format!("{e:#}")))?; Ok(Json(serde_json::json!(map))) } @@ -502,8 +503,8 @@ async fn handle_who( vault_path: &state.vault_path, profile: state.profile.as_ref().as_ref(), }; - let person = context::context_who(&ctx, &name) - .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let person = + context::context_who(&ctx, &name).map_err(|e| ApiError::internal(&format!("{e:#}")))?; Ok(Json(serde_json::json!(person))) } @@ -519,8 +520,8 @@ async fn handle_project( vault_path: &state.vault_path, profile: state.profile.as_ref().as_ref(), }; - let proj = context::context_project(&ctx, &name) - .map_err(|e| ApiError::internal(&format!("{e:#}")))?; + let proj = + context::context_project(&ctx, &name).map_err(|e| ApiError::internal(&format!("{e:#}")))?; Ok(Json(serde_json::json!(proj))) } @@ -576,55 +577,64 @@ async fn record_write(recent_writes: &RecentWrites, path: &std::path::Path) { fn parse_frontmatter_ops(operations: &[serde_json::Value]) -> Result, ApiError> { let mut ops = Vec::with_capacity(operations.len()); for op_val in operations { - let op_str = op_val - .get("op") - .and_then(|v| v.as_str()) - .ok_or_else(|| ApiError::bad_request("each operation must have an \"op\" string field"))?; + let op_str = op_val.get("op").and_then(|v| v.as_str()).ok_or_else(|| { + ApiError::bad_request("each operation must have an \"op\" string field") + })?; match op_str { "set" => { - let key = op_val - .get("key") - .and_then(|v| v.as_str()) - .ok_or_else(|| ApiError::bad_request("\"set\" operation requires a \"key\" field"))?; + let key = op_val.get("key").and_then(|v| v.as_str()).ok_or_else(|| { + ApiError::bad_request("\"set\" operation requires a \"key\" field") + })?; let value = op_val .get("value") .and_then(|v| v.as_str()) - .ok_or_else(|| ApiError::bad_request("\"set\" operation requires a \"value\" field"))?; + .ok_or_else(|| { + ApiError::bad_request("\"set\" operation requires a \"value\" field") + })?; ops.push(FrontmatterOp::Set(key.to_string(), value.to_string())); } "remove" => { - let key = op_val - .get("key") - .and_then(|v| v.as_str()) - .ok_or_else(|| ApiError::bad_request("\"remove\" operation requires a \"key\" field"))?; + let key = op_val.get("key").and_then(|v| v.as_str()).ok_or_else(|| { + ApiError::bad_request("\"remove\" operation requires a \"key\" field") + })?; ops.push(FrontmatterOp::Remove(key.to_string())); } "add_tag" => { let value = op_val .get("value") .and_then(|v| v.as_str()) - .ok_or_else(|| ApiError::bad_request("\"add_tag\" operation requires a \"value\" field"))?; + .ok_or_else(|| { + ApiError::bad_request("\"add_tag\" operation requires a \"value\" field") + })?; ops.push(FrontmatterOp::AddTag(value.to_string())); } "remove_tag" => { let value = op_val .get("value") .and_then(|v| v.as_str()) - .ok_or_else(|| ApiError::bad_request("\"remove_tag\" operation requires a \"value\" field"))?; + .ok_or_else(|| { + ApiError::bad_request("\"remove_tag\" operation requires a \"value\" field") + })?; ops.push(FrontmatterOp::RemoveTag(value.to_string())); } "add_alias" => { let value = op_val .get("value") .and_then(|v| v.as_str()) - .ok_or_else(|| ApiError::bad_request("\"add_alias\" operation requires a \"value\" field"))?; + .ok_or_else(|| { + ApiError::bad_request("\"add_alias\" operation requires a \"value\" field") + })?; ops.push(FrontmatterOp::AddAlias(value.to_string())); } "remove_alias" => { let value = op_val .get("value") .and_then(|v| v.as_str()) - .ok_or_else(|| ApiError::bad_request("\"remove_alias\" operation requires a \"value\" field"))?; + .ok_or_else(|| { + ApiError::bad_request( + "\"remove_alias\" operation requires a \"value\" field", + ) + })?; ops.push(FrontmatterOp::RemoveAlias(value.to_string())); } unknown => { @@ -905,7 +915,9 @@ mod tests { let rate_limiter = Arc::new(RateLimiter::new(config.rate_limit)); ApiState { store: Arc::new(Mutex::new(store)), - embedder: Arc::new(Mutex::new(Box::new(DummyEmbedder) as Box)), + embedder: Arc::new(Mutex::new( + Box::new(DummyEmbedder) as Box + )), vault_path: Arc::new(PathBuf::from("/tmp/test-vault")), profile: Arc::new(None), orchestrator: None, diff --git a/src/main.rs b/src/main.rs index 73fa8cf..a7e3928 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1131,7 +1131,12 @@ async fn main() -> Result<()> { } } - Command::Serve { http, port, host, no_auth } => { + Command::Serve { + http, + port, + host, + no_auth, + } => { if !index_exists(&data_dir) { eprintln!("No index found. Run 'engraph index ' first."); std::process::exit(1); diff --git a/src/serve.rs b/src/serve.rs index 0146725..14390d6 100644 --- a/src/serve.rs +++ b/src/serve.rs @@ -745,13 +745,14 @@ pub struct HttpServeOpts { // --------------------------------------------------------------------------- pub async fn run_serve(data_dir: &Path, http_opts: Option) -> Result<()> { - if let Some(ref opts) = http_opts { - if opts.no_auth && opts.host != "127.0.0.1" { - anyhow::bail!( - "--no-auth cannot be used with --host {} (only 127.0.0.1 is allowed)", - opts.host - ); - } + if let Some(ref opts) = http_opts + && opts.no_auth + && opts.host != "127.0.0.1" + { + anyhow::bail!( + "--no-auth cannot be used with --host {} (only 127.0.0.1 is allowed)", + opts.host + ); } let db_path = data_dir.join("engraph.db"); From 1038bcc5a78c452920762a063c0d3ce1469259f5 Mon Sep 17 00:00:00 2001 From: Oleksandr Ostrovskyi Date: Thu, 26 Mar 2026 22:27:53 +0200 Subject: [PATCH 09/10] docs: update CLAUDE.md, README, CHANGELOG for v1.3 --- CHANGELOG.md | 18 ++++++++ CLAUDE.md | 12 ++++-- README.md | 120 +++++++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 138 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d53e904..edc2718 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## v1.3.0 — HTTP/REST Transport (2026-03-26) + +### Added +- **HTTP REST API** (`http.rs`) — axum-based HTTP server alongside MCP, enabled via `engraph serve --http` +- **20 REST endpoints** mirroring all 19 MCP tools + update-metadata +- **API key authentication** — `eg_` prefixed keys with read/write permission levels +- **Rate limiting** — configurable per-key token bucket (requests/minute) +- **CORS** — configurable allowed origins for web-based agents +- **Graceful shutdown** — CancellationToken coordinates MCP + HTTP + watcher exit +- **API key management CLI** — `engraph configure --add-api-key/--list-api-keys/--revoke-api-key` +- **`--no-auth` mode** — local development without API keys (127.0.0.1 only) + +### Changed +- `engraph serve` gains `--http`, `--port`, `--host`, `--no-auth` flags +- Module count: 23 → 24 +- Test count: 361 → 385 +- New dependencies: axum, tower-http, tower, rand, tokio-util + ## v1.2.0 — Temporal Search (2026-03-26) ### Added diff --git a/CLAUDE.md b/CLAUDE.md index e2a30c2..62b366b 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -4,7 +4,7 @@ Local knowledge graph + intelligence layer for Obsidian vaults. Rust CLI + MCP s ## Architecture -Single binary with 23 modules behind a lib crate: +Single binary with 24 modules behind a lib crate: - `config.rs` — loads `~/.engraph/config.toml` and `vault.toml`, merges CLI args, provides `data_dir()`. Includes `intelligence: Option`, `[models]` section for model overrides, `[obsidian]` section (CLI path, enabled flag), and `[agents]` section (registered AI agent names). `Config::save()` writes back to disk. - `chunker.rs` — smart chunking with break-point scoring algorithm. Finds optimal split points considering headings, code fences, blank lines, and thematic breaks. `split_oversized_chunks()` handles token-aware secondary splitting with overlap @@ -23,6 +23,7 @@ Single binary with 23 modules behind a lib crate: - `writer.rs` — write pipeline orchestrator. 5-step pipeline: resolve tags (fuzzy match + register new), discover links (exact + fuzzy), place in folder, atomic file write (temp + rename), and index update. Supports create, append, update_metadata, move_note, archive, unarchive, edit (section-level replace/prepend/append), rewrite (full content with frontmatter preservation), edit_frontmatter (granular set/remove/add_tag/remove_tag/add_alias/remove_alias ops), and delete (soft archive or hard permanent) operations with mtime-based conflict detection and crash recovery via temp file cleanup - `watcher.rs` — file watcher for `engraph serve`. OS thread producer (notify-debouncer-full, 2s debounce) sends `Vec` over tokio::mpsc to async consumer task. Two-pass batch processing: mutations (index_file/remove_file/rename_file) then edge rebuild. Move detection via content hash matching. Placement correction on file moves. Centroid adjustment on file add/remove. Startup reconciliation via `run_index_shared`. `recent_writes` map coordination with MCP server to prevent double re-indexing of files written through the write pipeline - `serve.rs` — MCP stdio server via rmcp SDK. Exposes 19 tools: 8 read (search, read, read_section, list, vault_map, who, project, context) + 10 write (create, append, update_metadata, move_note, archive, unarchive, edit, rewrite, edit_frontmatter, delete) + 1 diagnostic (health). `edit_frontmatter` replaces `update_metadata` for granular frontmatter mutations. EngraphServer struct with Arc+Mutex wrapping for async handlers. Loads intelligence models (orchestrator + reranker) when enabled, wires into `search_with_intelligence`. Spawns file watcher on startup. CLI events table provides audit log for write operations. `recent_writes` map prevents double re-indexing of MCP-written files +- `http.rs` — axum-based HTTP REST API server, enabled via `engraph serve --http`. 20 REST endpoints mirroring all 19 MCP tools + update-metadata. API key authentication with `eg_` prefixed keys and read/write permission levels. Per-key token bucket rate limiting (configurable requests/minute). CORS with configurable allowed origins for web-based agents. `--no-auth` mode for local development (127.0.0.1 only). Graceful shutdown via `CancellationToken` coordinating MCP + HTTP + watcher exit - `graph.rs` — vault graph agent. Extracts wikilink targets, expands search results by following graph connections 1-2 hops. Relevance filtering via FTS5 term check and shared tags - `profile.rs` — vault profile detection. Auto-detects PARA/Folders/Flat structure, vault type (Obsidian/Logseq/Plain), wikilinks, frontmatter, tags. Content-based role detection for people/daily/archive folders by content patterns (not just names). Writes/loads `vault.toml` - `store.rs` — SQLite persistence. Tables: `meta`, `files` (with docid, created_by), `chunks` (with vector BLOBs), `chunks_fts` (FTS5), `edges` (vault graph), `tombstones`, `tag_registry`, `folder_centroids`, `placement_corrections`, `link_skiplist` (reserved), `llm_cache` (orchestrator result cache), `cli_events` (audit log for CLI operations). `vec_chunks` virtual table (sqlite-vec) for KNN search. Dynamic embedding dimension stored in meta. `has_dimension_mismatch()` and `reset_for_reindex()` for migration. Enhanced `resolve_file()` with fuzzy Levenshtein matching as final fallback @@ -30,7 +31,7 @@ Single binary with 23 modules behind a lib crate: - `temporal.rs` — temporal search lane. Extracts note dates from frontmatter `date:` field or `YYYY-MM-DD` filename patterns. Heuristic date parsing for natural language ("today", "yesterday", "last week", "this month", "recent", month names, ISO dates, date ranges). Smooth decay scoring for files near but outside target date range. Provides `extract_note_date()` for indexing and `score_temporal()` + `parse_date_range_heuristic()` for search - `search.rs` — hybrid search orchestrator. `search_with_intelligence()` runs the full pipeline: orchestrate (intent + expansions) → 5-lane RRF retrieval (semantic + FTS5 + graph + reranker + temporal) per expansion → two-pass RRF fusion. `search_internal()` is a thin wrapper without intelligence models. Adaptive lane weights per query intent including temporal (1.5 weight for time-aware queries). Results display normalized confidence percentages (0-100%) instead of raw RRF scores. -`main.rs` is a thin clap CLI (async via `#[tokio::main]`). Subcommands: `index` (with progress bar), `search` (with `--explain`, loads intelligence models when enabled), `status` (shows intelligence state + date coverage stats), `clear`, `init` (intelligence onboarding prompt, detects Obsidian CLI + AI agents), `configure` (`--enable-intelligence`, `--disable-intelligence`, `--model`, `--obsidian-cli`, `--no-obsidian-cli`, `--agent`), `models`, `graph` (show/stats), `context` (read/list/vault-map/who/project/topic), `write` (create/append/update-metadata/move/edit/rewrite/edit-frontmatter/delete), `serve` (MCP stdio server with file watcher + intelligence). +`main.rs` is a thin clap CLI (async via `#[tokio::main]`). Subcommands: `index` (with progress bar), `search` (with `--explain`, loads intelligence models when enabled), `status` (shows intelligence state + date coverage stats), `clear`, `init` (intelligence onboarding prompt, detects Obsidian CLI + AI agents), `configure` (`--enable-intelligence`, `--disable-intelligence`, `--model`, `--obsidian-cli`, `--no-obsidian-cli`, `--agent`, `--add-api-key`, `--list-api-keys`, `--revoke-api-key`), `models`, `graph` (show/stats), `context` (read/list/vault-map/who/project/topic), `write` (create/append/update-metadata/move/edit/rewrite/edit-frontmatter/delete), `serve` (MCP stdio server with file watcher + intelligence + optional `--http`/`--port`/`--host`/`--no-auth` for HTTP REST API). ## Key patterns @@ -69,12 +70,17 @@ Single vault only. Re-indexing a different vault path triggers a confirmation pr - `ignore` (0.4) — vault walking with `.gitignore` support - `rusqlite` (0.32) — bundled SQLite with FTS5 support - `rmcp` (1.2) — MCP server SDK for stdio transport +- `axum` — HTTP framework for REST API server +- `tower-http` — CORS middleware for axum +- `tower` — middleware layer utilities +- `rand` — random API key generation +- `tokio-util` — `CancellationToken` for graceful multi-server shutdown - `notify` (7.0) — cross-platform filesystem notification (FSEvents on macOS, inotify on Linux) - `notify-debouncer-full` (0.4) — debouncing + best-effort inode-based rename tracking ## Testing -- Unit tests in each module (`cargo test --lib`) — 361 tests, no network required +- Unit tests in each module (`cargo test --lib`) — 385 tests, no network required - Integration tests (`cargo test --test integration -- --ignored`) — require GGUF model download - Build requires CMake (for llama.cpp C++ compilation) diff --git a/README.md b/README.md index 1ec196d..5a1dc04 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ Plain vector search treats your notes as isolated documents. But knowledge isn't - **5-lane hybrid search** — semantic embeddings + BM25 full-text + graph expansion + cross-encoder reranking + temporal scoring, fused via [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf). An LLM orchestrator classifies queries and adapts lane weights per intent. Time-aware queries like "what happened last week" or "March 2026 notes" activate the temporal lane automatically. - **MCP server for AI agents** — `engraph serve` exposes 19 tools (search, read, section-level editing, frontmatter mutations, vault health, context bundles, note creation) that Claude, Cursor, or any MCP client can call directly. +- **HTTP REST API** — `engraph serve --http` adds an axum-based HTTP server alongside MCP with 20 REST endpoints, API key authentication, rate limiting, and CORS. Web-based agents and scripts can query your vault with simple `curl` calls. - **Section-level editing** — AI agents can read, replace, prepend, or append to specific sections by heading. Full note rewriting with frontmatter preservation. Granular frontmatter mutations (set/remove fields, add/remove tags and aliases). - **Vault health diagnostics** — detect orphan notes, broken wikilinks, stale content, and tag hygiene issues. Available as MCP tool and CLI command. - **Obsidian CLI integration** — auto-detects running Obsidian and delegates compatible operations. Circuit breaker (Closed/Degraded/Open) ensures graceful fallback. @@ -51,17 +52,16 @@ Your vault (markdown files) │ engraph serve │ │ │ │ MCP Server (stdio) + File Watcher │ +│ + HTTP REST API (--http, optional) │ │ │ │ Search: Orchestrator → 4-lane retrieval │ │ → Reranker → Two-pass RRF fusion │ │ │ -│ 19 tools: search, read, read_section, │ -│ edit, rewrite, edit_frontmatter, delete, │ -│ health, context, who, project, create... │ +│ 19 MCP tools + 20 REST endpoints │ └─────────────────────────────────────────────┘ │ ▼ - Claude / Cursor / any MCP client + Claude / Cursor / any MCP client / curl / web agents ``` 1. **Index** — walks your vault, chunks markdown by headings, embeds with a local GGUF model via llama.cpp (Metal GPU on macOS), stores everything in SQLite with FTS5 + sqlite-vec + a wikilink graph @@ -129,6 +129,32 @@ engraph serve Now Claude can search your vault, read notes, build context bundles, and create new notes — all through structured tool calls. +**Enable HTTP REST API:** + +```bash +# Start MCP + HTTP server on port 3030 +engraph serve --http + +# Custom port and host +engraph serve --http --port 8080 --host 0.0.0.0 + +# Local development without API keys (127.0.0.1 only) +engraph serve --http --no-auth +``` + +**API key management:** + +```bash +# Add a new API key (read or write permission) +engraph configure --add-api-key + +# List existing keys +engraph configure --list-api-keys + +# Revoke a key +engraph configure --revoke-api-key eg_abc123... +``` + **Enable intelligence (optional, ~1.3GB download):** ```bash @@ -234,6 +260,81 @@ engraph context health Returns orphan notes (no links in or out), broken wikilinks, stale notes, and tag hygiene issues. +## HTTP REST API + +`engraph serve --http` adds a full REST API alongside the MCP server, exposing the same capabilities over HTTP for web agents, scripts, and integrations. + +**20 endpoints:** + +| Method | Endpoint | Permission | Description | +|--------|----------|------------|-------------| +| GET | `/api/health-check` | read | Server health check | +| POST | `/api/search` | read | Hybrid search (semantic + FTS5 + graph + reranker + temporal) | +| GET | `/api/read/{file}` | read | Read full note content + metadata | +| GET | `/api/read-section` | read | Read a specific section by heading | +| GET | `/api/list` | read | List notes with optional tag/folder/created_by filters | +| GET | `/api/vault-map` | read | Vault structure overview (folders, tags, recent files) | +| GET | `/api/who/{name}` | read | Person context bundle | +| GET | `/api/project/{name}` | read | Project context bundle | +| POST | `/api/context` | read | Rich topic context with token budget | +| GET | `/api/health` | read | Vault health diagnostics | +| POST | `/api/create` | write | Create a new note | +| POST | `/api/append` | write | Append content to existing note | +| POST | `/api/edit` | write | Section-level editing (replace/prepend/append) | +| POST | `/api/rewrite` | write | Full note rewrite (preserves frontmatter) | +| POST | `/api/edit-frontmatter` | write | Granular frontmatter mutations | +| POST | `/api/move` | write | Move note to different folder | +| POST | `/api/archive` | write | Soft-delete (archive) a note | +| POST | `/api/unarchive` | write | Restore archived note | +| POST | `/api/update-metadata` | write | Update note metadata | +| POST | `/api/delete` | write | Delete note (soft or hard) | + +**Authentication:** + +All requests require an API key via the `Authorization` header: + +```bash +curl -H "Authorization: Bearer eg_abc123..." http://localhost:3030/api/vault-map +``` + +Keys have either `read` or `write` permission. Write keys can access all endpoints; read keys are restricted to read-only endpoints. Use `--no-auth` for local development without keys (127.0.0.1 only). + +**curl examples:** + +```bash +# Search +curl -X POST http://localhost:3030/api/search \ + -H "Authorization: Bearer eg_..." \ + -H "Content-Type: application/json" \ + -d '{"query": "authentication architecture", "top_n": 5}' + +# Read a note +curl http://localhost:3030/api/read/01-Projects/API-Design.md \ + -H "Authorization: Bearer eg_..." + +# Create a note +curl -X POST http://localhost:3030/api/create \ + -H "Authorization: Bearer eg_..." \ + -H "Content-Type: application/json" \ + -d '{"content": "# Meeting Notes\n\nDiscussed auth timeline.", "tags": ["meeting", "auth"]}' +``` + +**Rate limiting:** Configurable per-key token bucket (requests per minute). Defaults to 60 req/min. Returns `429 Too Many Requests` when exceeded. + +**CORS:** Configurable allowed origins in `config.toml` under `[http]`. Defaults to allow all origins for local development. + +```toml +[http] +port = 3030 +host = "127.0.0.1" +cors_origins = ["http://localhost:3000", "https://myapp.example.com"] +rate_limit = 60 + +[[http.api_keys]] +key = "eg_..." +permission = "write" +``` + ## Use cases **AI-assisted knowledge work** — Give Claude or Cursor deep access to your personal knowledge base. Instead of copy-pasting context, the agent searches, reads, and cross-references your notes directly. @@ -251,7 +352,7 @@ Returns orphan notes (no links in or out), broken wikilinks, stale notes, and ta | Search method | 5-lane RRF (semantic + BM25 + graph + reranker + temporal) | Vector similarity only | Keyword only | | Query understanding | LLM orchestrator classifies intent, adapts weights | None | None | | Understands note links | Yes (wikilink graph traversal) | No | Limited (backlinks panel) | -| AI agent access | MCP server (19 tools) | Custom API needed | No | +| AI agent access | MCP server (19 tools) + HTTP REST API (20 endpoints) | Custom API needed | No | | Write capability | Create/edit/rewrite/delete with smart filing | No | Manual | | Vault health | Orphans, broken links, stale notes, tag hygiene | No | Limited | | Real-time sync | File watcher, 2s debounce | Manual re-index | N/A | @@ -269,6 +370,7 @@ engraph is not a replacement for Obsidian — it's the intelligence layer that s - llama.cpp inference via Rust bindings (GGUF models, Metal GPU on macOS, CUDA on Linux) - Intelligence opt-in: heuristic fallback when disabled, LLM-powered when enabled - MCP server with 19 tools (8 read, 10 write, 1 diagnostic) via stdio +- HTTP REST API with 20 endpoints, API key auth (`eg_` prefix), rate limiting, CORS — enabled via `engraph serve --http` - Section-level reading and editing: target specific headings with replace/prepend/append modes - Full note rewriting with automatic frontmatter preservation - Granular frontmatter mutations: set/remove fields, add/remove tags and aliases @@ -283,7 +385,7 @@ engraph is not a replacement for Obsidian — it's the intelligence layer that s - Enhanced file resolution with fuzzy Levenshtein matching fallback - Content-based folder role detection (people, daily, archive) by content patterns - Configurable model overrides for multilingual support -- 361 unit tests, CI on macOS + Ubuntu +- 385 unit tests, CI on macOS + Ubuntu ## Roadmap @@ -293,7 +395,7 @@ engraph is not a replacement for Obsidian — it's the intelligence layer that s - [x] ~~Vault health monitor — orphan notes, broken links, stale content, tag hygiene~~ (v1.1) - [x] ~~Obsidian CLI integration — auto-detect and delegate with circuit breaker~~ (v1.1) - [x] ~~Temporal search — find notes by time period, date-aware queries~~ (v1.2) -- [ ] HTTP/REST API — complement MCP with a standard web API (v1.3) +- [x] ~~HTTP/REST API — complement MCP with a standard web API~~ (v1.3) - [ ] Multi-vault — search across multiple vaults (v1.4) ## Configuration @@ -328,7 +430,7 @@ All data stored in `~/.engraph/` — single SQLite database (~10MB typical), GGU ## Development ```bash -cargo test --lib # 361 unit tests, no network (requires CMake for llama.cpp) +cargo test --lib # 385 unit tests, no network (requires CMake for llama.cpp) cargo clippy -- -D warnings cargo fmt --check @@ -340,7 +442,7 @@ cargo test --test integration -- --ignored Contributions welcome. Please open an issue first to discuss what you'd like to change. -The codebase is 23 Rust modules behind a lib crate. `CLAUDE.md` in the repo root has detailed architecture documentation for AI-assisted development. +The codebase is 24 Rust modules behind a lib crate. `CLAUDE.md` in the repo root has detailed architecture documentation for AI-assisted development. ## License From 1afb8c2deb76138ffdf3aa17dd767a6476e092cc Mon Sep 17 00:00:00 2001 From: Oleksandr Ostrovskyi Date: Thu, 26 Mar 2026 22:31:35 +0200 Subject: [PATCH 10/10] fix(serve): keep HTTP running when MCP transport is unavailable --- src/serve.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/serve.rs b/src/serve.rs index 14390d6..9e0a9fd 100644 --- a/src/serve.rs +++ b/src/serve.rs @@ -894,8 +894,20 @@ pub async fn run_serve(data_dir: &Path, http_opts: Option) -> Res eprintln!("engraph MCP server starting..."); let transport = rmcp::transport::io::stdio(); - let server_handle = server.serve(transport).await?; - server_handle.waiting().await?; + match server.serve(transport).await { + Ok(server_handle) => { + server_handle.waiting().await?; + } + Err(e) => { + if http_opts.is_some() { + // MCP transport failed (e.g., no stdin) but HTTP is running — stay alive + eprintln!("MCP transport unavailable ({e:#}), HTTP server still running..."); + cancel_token.cancelled().await; + } else { + return Err(anyhow::anyhow!("{e}")); + } + } + } cancel_token.cancel(); // triggers HTTP graceful shutdown