diff --git a/api/.cargo/config.toml b/api/.cargo/config.toml index 19f0a3a5..2ebe0f5f 100644 --- a/api/.cargo/config.toml +++ b/api/.cargo/config.toml @@ -1,2 +1,7 @@ [env] SQLX_OFFLINE = "true" + +[alias] +checkw = "check --workspace" +testw = "test --workspace" +clippyw = "clippy --workspace --all-targets" diff --git a/api/Cargo.lock b/api/Cargo.lock index cc5d7c62..6956e571 100644 --- a/api/Cargo.lock +++ b/api/Cargo.lock @@ -155,62 +155,41 @@ checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100" name = "api" version = "0.1.0" dependencies = [ - "aead", - "aes-gcm", - "ammonia", "anyhow", - "argon2", - "async-lock", + "bootstrap", +] + +[[package]] +name = "application" +version = "0.1.0" +dependencies = [ + "anyhow", "async-trait", - "aws-config", - "aws-sdk-s3", - "axum", "base64 0.21.7", "chrono", - "clap", - "comrak", - "dotenvy", - "extism", + "contracts", + "domain", "futures-core", "futures-util", - "git2", "hex", "hmac", "htmlescape", "http 1.3.1", - "jsonwebtoken", "mime_guess", - "notify", "once_cell", - "pandoc", - "password-hash 0.5.0", "rand 0.8.5", - "redis", "regex", - "reqwest 0.11.27", - "semver", "serde", "serde_json", "serde_yaml", "sha2", "similar", - "sqlx", - "syntect", - "syntect-assets", - "tempfile", "thiserror 1.0.69", "tokio", - "tokio-stream", - "tower-http", "tracing", - "tracing-subscriber", "urlencoding", - "utoipa", - "utoipa-swagger-ui", "uuid", - "walkdir", "yrs", - "yrs-warp", "zip 0.6.6", ] @@ -921,6 +900,35 @@ dependencies = [ "generic-array", ] +[[package]] +name = "bootstrap" +version = "0.1.0" +dependencies = [ + "anyhow", + "application", + "axum", + "chrono", + "domain", + "dotenvy", + "futures-util", + "http 1.3.1", + "infrastructure", + "once_cell", + "presentation", + "serde", + "serde_json", + "serde_yaml", + "sqlx", + "tokio", + "tokio-stream", + "tower-http", + "tracing", + "tracing-subscriber", + "utoipa", + "utoipa-swagger-ui", + "uuid", +] + [[package]] name = "bstr" version = "1.12.0" @@ -1181,6 +1189,23 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" +[[package]] +name = "cli" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "bootstrap", + "chrono", + "clap", + "dotenvy", + "serde_json", + "sqlx", + "tokio", + "utoipa", + "uuid", +] + [[package]] name = "cmake" version = "0.1.54" @@ -1260,6 +1285,14 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "contracts" +version = "0.1.0" +dependencies = [ + "serde", + "utoipa", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -1758,6 +1791,15 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "domain" +version = "0.1.0" +dependencies = [ + "chrono", + "serde", + "uuid", +] + [[package]] name = "dotenvy" version = "0.15.7" @@ -2828,6 +2870,62 @@ dependencies = [ "serde", ] +[[package]] +name = "infrastructure" +version = "0.1.0" +dependencies = [ + "aead", + "aes-gcm", + "ammonia", + "anyhow", + "application", + "argon2", + "async-trait", + "aws-config", + "aws-sdk-s3", + "base64 0.21.7", + "chrono", + "comrak", + "domain", + "dotenvy", + "extism", + "futures-core", + "futures-util", + "git2", + "hex", + "hmac", + "htmlescape", + "http 1.3.1", + "jsonwebtoken", + "mime_guess", + "notify", + "once_cell", + "pandoc", + "password-hash 0.5.0", + "rand 0.8.5", + "redis", + "regex", + "reqwest 0.11.27", + "semver", + "serde", + "serde_json", + "sha2", + "sqlx", + "syntect", + "syntect-assets", + "tempfile", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "urlencoding", + "uuid", + "walkdir", + "yrs", + "yrs-warp", + "zip 0.6.6", +] + [[package]] name = "inotify" version = "0.9.6" @@ -3921,6 +4019,29 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" +[[package]] +name = "presentation" +version = "0.1.0" +dependencies = [ + "anyhow", + "application", + "axum", + "chrono", + "contracts", + "domain", + "futures-util", + "http 1.3.1", + "rand 0.8.5", + "serde", + "serde_json", + "tokio", + "tracing", + "utoipa", + "uuid", + "yrs", + "yrs-warp", +] + [[package]] name = "prettyplease" version = "0.2.37" @@ -4231,6 +4352,16 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "refmd-bins" +version = "0.1.0" +dependencies = [ + "anyhow", + "api", + "cli", + "tokio", +] + [[package]] name = "regalloc2" version = "0.11.2" diff --git a/api/Cargo.toml b/api/Cargo.toml index 736013b7..aeef882c 100644 --- a/api/Cargo.toml +++ b/api/Cargo.toml @@ -1,67 +1,32 @@ [package] -name = "api" +name = "refmd-bins" version = "0.1.0" edition = "2024" +publish = false + +[[bin]] +name = "api" +path = "src/bin/api.rs" + +[[bin]] +name = "refmd" +path = "src/bin/cli.rs" [dependencies] -axum = { version = "0.7", features = ["macros", "json", "multipart", "ws"] } -tokio = { version = "1.46", features = ["rt-multi-thread", "macros", "signal", "process"] } -tower-http = { version = "0.6", features = ["cors", "trace", "fs"] } -tracing = "0.1" -tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] } -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -serde_yaml = "0.9" -tokio-stream = { version = "0.1", features = ["sync"] } -dotenvy = "0.15" -http = "1" -futures-util = { version = "0.3", features = ["sink"] } -futures-core = "0.3" -yrs = { version = "0.24", features = ["sync"] } -uuid = { version = "1", features = ["v4", "serde"] } anyhow = "1" -chrono = { version = "0.4", features = ["serde", "clock"] } -yrs-warp = "0.9" -async-trait = "0.1" -async-lock = "3" -extism = { version = "1" } - -# Markdown rendering -comrak = { version = "0.22" } -ammonia = { version = "3" } +tokio = { version = "1.46", features = ["rt-multi-thread", "macros"] } +api_app = { package = "api", path = "crates/api" } +cli_app = { package = "cli", path = "crates/cli" } -# Database & security -sqlx = { version = "0.8", features = ["runtime-tokio", "postgres", "uuid", "chrono", "macros"] } -argon2 = "0.5" -password-hash = "0.5" -jsonwebtoken = { version = "9", default-features = false, features = ["use_pem"] } -thiserror = "1" -once_cell = "1" -regex = "1" -utoipa = { version = "4", features = ["axum_extras", "chrono", "uuid"] } -utoipa-swagger-ui = { version = "7", features = ["axum"] } -aes-gcm = "0.10" -aead = "0.5" -sha2 = "0.10" -hex = "0.4" -hmac = "0.12" -syntect = { version = "5", default-features = true } -htmlescape = "0.3" -syntect-assets = "0.23" -rand = "0.8" -base64 = "0.21" -git2 = { version = "0.18", default-features = true, features = ["vendored-libgit2"] } -reqwest = { version = "0.11", features = ["json", "stream", "rustls-tls"] } -zip = { version = "0.6" } -urlencoding = "2" -mime_guess = "2" -similar = "2" -aws-config = { version = "1", features = ["behavior-version-latest"] } -aws-sdk-s3 = "1" -walkdir = "2.5" -tempfile = "3" -redis = { version = "0.27", features = ["tokio-comp", "aio", "streams", "script", "connection-manager"] } -semver = "1" -pandoc = "0.8" -notify = "6" -clap = { version = "4.5", features = ["derive"] } +[workspace] +resolver = "3" +members = [ + "crates/domain", + "crates/contracts", + "crates/application", + "crates/presentation", + "crates/infrastructure", + "crates/bootstrap", + "crates/api", + "crates/cli", +] diff --git a/api/Dockerfile b/api/Dockerfile index c2da6c13..2f9819a1 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -1,5 +1,5 @@ # -- Builder stage ----------------------------------------------------- -FROM rustlang/rust:nightly-bullseye-slim AS build +FROM rust:1.92-slim-bullseye AS build WORKDIR /build # Build dependencies once to leverage Docker layer caching @@ -7,7 +7,8 @@ RUN apt-get update \ && apt-get install -y --no-install-recommends pkg-config libssl-dev ca-certificates binutils curl \ && rm -rf /var/lib/apt/lists/* COPY Cargo.toml Cargo.lock ./ -RUN mkdir src && echo "fn main(){}" > src/main.rs && cargo build --locked --release --bin api || true +RUN mkdir -p src/bin && echo "fn main(){}" > src/bin/api.rs && echo "fn main(){}" > src/bin/cli.rs \ + && cargo build --locked --release --bin api --bin refmd || true # Actual sources COPY . ./ @@ -40,7 +41,7 @@ RUN useradd -m -u 10001 appuser \ && mkdir -p /data/uploads \ && chown -R appuser:appuser /app /data -COPY docker/entrypoint.sh /entrypoint.sh +COPY entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh USER root diff --git a/api/crates/api/Cargo.toml b/api/crates/api/Cargo.toml new file mode 100644 index 00000000..cc58f67a --- /dev/null +++ b/api/crates/api/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "api" +version = "0.1.0" +edition = "2024" + +[dependencies] +anyhow = "1" +bootstrap = { path = "../bootstrap" } diff --git a/api/crates/api/src/lib.rs b/api/crates/api/src/lib.rs new file mode 100644 index 00000000..6f51a49d --- /dev/null +++ b/api/crates/api/src/lib.rs @@ -0,0 +1,3 @@ +pub async fn run() -> anyhow::Result<()> { + bootstrap::app::run().await +} diff --git a/api/crates/application/Cargo.toml b/api/crates/application/Cargo.toml new file mode 100644 index 00000000..1768a623 --- /dev/null +++ b/api/crates/application/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "application" +version = "0.1.0" +edition = "2024" + +[dependencies] +domain = { path = "../domain" } +contracts = { path = "../contracts" } + +anyhow = "1" +async-trait = "0.1" +base64 = "0.21" +chrono = { version = "0.4", features = ["serde", "clock"] } +futures-core = "0.3" +futures-util = { version = "0.3", features = ["sink"] } +hex = "0.4" +htmlescape = "0.3" +hmac = "0.12" +http = "1" +mime_guess = "2" +once_cell = "1" +rand = "0.8" +regex = "1" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +serde_yaml = "0.9" +sha2 = "0.10" +similar = "2" +thiserror = "1" +tracing = "0.1" +urlencoding = "2" +uuid = { version = "1", features = ["v4", "serde"] } +yrs = { version = "0.24", features = ["sync"] } +zip = { version = "0.6" } + +[dev-dependencies] +tokio = { version = "1.46", features = ["rt-multi-thread", "macros", "sync"] } diff --git a/api/crates/application/src/core/dtos/markdown.rs b/api/crates/application/src/core/dtos/markdown.rs new file mode 100644 index 00000000..182d917c --- /dev/null +++ b/api/crates/application/src/core/dtos/markdown.rs @@ -0,0 +1,30 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Deserialize, Serialize, Default, Clone)] +#[serde(default)] +pub struct RenderOptions { + pub flavor: Option, + pub theme: Option, + pub features: Option>, + pub sanitize: Option, + pub hardbreaks: Option, + pub doc_id: Option, + pub base_origin: Option, + pub absolute_attachments: Option, + pub token: Option, +} + +#[derive(Debug, Serialize, Clone)] +pub struct PlaceholderItem { + pub kind: String, + pub id: String, + pub code: String, +} + +#[derive(Debug, Serialize, Clone)] +pub struct RenderResponse { + pub html: String, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub placeholders: Vec, + pub hash: String, +} diff --git a/api/crates/application/src/core/dtos/mod.rs b/api/crates/application/src/core/dtos/mod.rs new file mode 100644 index 00000000..930ac009 --- /dev/null +++ b/api/crates/application/src/core/dtos/mod.rs @@ -0,0 +1,4 @@ +pub use contracts::core::dtos::*; + +pub mod markdown; +pub mod storage_ingest; diff --git a/api/crates/application/src/core/dtos/storage_ingest.rs b/api/crates/application/src/core/dtos/storage_ingest.rs new file mode 100644 index 00000000..6f164247 --- /dev/null +++ b/api/crates/application/src/core/dtos/storage_ingest.rs @@ -0,0 +1,17 @@ +use serde_json::Value; + +use crate::core::ports::storage::storage_ingest_queue::StorageIngestKind; + +#[derive(Debug, Clone)] +pub struct IngestBatch { + pub events: Vec, +} + +#[derive(Debug, Clone)] +pub struct IngestEvent { + pub repo_path: String, + pub kind: StorageIngestKind, + pub backend: Option, + pub content_hash: Option, + pub payload: Option, +} diff --git a/api/crates/application/src/core/mod.rs b/api/crates/application/src/core/mod.rs new file mode 100644 index 00000000..2e8e16cf --- /dev/null +++ b/api/crates/application/src/core/mod.rs @@ -0,0 +1,4 @@ +pub mod dtos; +pub mod ports; +pub mod services; +pub mod use_cases; diff --git a/api/crates/application/src/core/ports/errors.rs b/api/crates/application/src/core/ports/errors.rs new file mode 100644 index 00000000..843a7014 --- /dev/null +++ b/api/crates/application/src/core/ports/errors.rs @@ -0,0 +1,39 @@ +use std::fmt; +use std::ops::Deref; + +#[derive(Debug)] +pub struct PortError(anyhow::Error); + +impl PortError { + pub fn into_anyhow(self) -> anyhow::Error { + self.0 + } +} + +impl Deref for PortError { + type Target = anyhow::Error; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl From for PortError { + fn from(err: anyhow::Error) -> Self { + Self(err) + } +} + +impl fmt::Display for PortError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +pub type PortResult = Result; + +impl From for anyhow::Error { + fn from(err: PortError) -> Self { + err.into_anyhow() + } +} diff --git a/api/src/application/ports/health_probe.rs b/api/crates/application/src/core/ports/health_probe.rs similarity index 64% rename from api/src/application/ports/health_probe.rs rename to api/crates/application/src/core/ports/health_probe.rs index 8a2fbac9..63f76045 100644 --- a/api/src/application/ports/health_probe.rs +++ b/api/crates/application/src/core/ports/health_probe.rs @@ -1,5 +1,7 @@ use async_trait::async_trait; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum HealthStatus { Healthy, @@ -8,5 +10,5 @@ pub enum HealthStatus { #[async_trait] pub trait HealthProbe: Send + Sync { - async fn probe(&self) -> anyhow::Result; + async fn probe(&self) -> PortResult; } diff --git a/api/crates/application/src/core/ports/markdown_renderer.rs b/api/crates/application/src/core/ports/markdown_renderer.rs new file mode 100644 index 00000000..c2878731 --- /dev/null +++ b/api/crates/application/src/core/ports/markdown_renderer.rs @@ -0,0 +1,13 @@ +use std::collections::HashSet; + +use crate::core::dtos::markdown::{RenderOptions, RenderResponse}; +use crate::core::ports::errors::PortResult; + +pub trait MarkdownRenderer: Send + Sync { + fn render( + &self, + text: String, + opts: RenderOptions, + placeholder_kinds: Option<&HashSet>, + ) -> PortResult; +} diff --git a/api/crates/application/src/core/ports/mod.rs b/api/crates/application/src/core/ports/mod.rs new file mode 100644 index 00000000..90238d46 --- /dev/null +++ b/api/crates/application/src/core/ports/mod.rs @@ -0,0 +1,4 @@ +pub mod errors; +pub mod health_probe; +pub mod markdown_renderer; +pub mod storage; diff --git a/api/crates/application/src/core/ports/storage/mod.rs b/api/crates/application/src/core/ports/storage/mod.rs new file mode 100644 index 00000000..1e3f6312 --- /dev/null +++ b/api/crates/application/src/core/ports/storage/mod.rs @@ -0,0 +1,5 @@ +pub mod storage_ingest_queue; +pub mod storage_port; +pub mod storage_projection_queue; +pub mod storage_reconcile_backend; +pub mod storage_reconcile_jobs; diff --git a/api/src/application/ports/storage_ingest_queue.rs b/api/crates/application/src/core/ports/storage/storage_ingest_queue.rs similarity index 77% rename from api/src/application/ports/storage_ingest_queue.rs rename to api/crates/application/src/core/ports/storage/storage_ingest_queue.rs index 8c736823..5f34c05f 100644 --- a/api/src/application/ports/storage_ingest_queue.rs +++ b/api/crates/application/src/core/ports/storage/storage_ingest_queue.rs @@ -3,6 +3,9 @@ use chrono::{DateTime, Utc}; use serde_json::Value; use uuid::Uuid; +use crate::core::ports::errors::PortResult; +use domain::storage::ingest_backend::StorageIngestBackend; + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum StorageIngestKind { Upsert, @@ -25,7 +28,7 @@ pub struct StorageIngestEvent { pub user_id: Uuid, pub actor_id: Option, pub repo_path: String, - pub backend: String, + pub backend: StorageIngestBackend, pub kind: StorageIngestKind, pub content_hash: Option, pub payload: Option, @@ -44,29 +47,30 @@ pub struct StorageIngestQueueStats { #[async_trait] pub trait StorageIngestQueue: Send + Sync { + #[allow(clippy::too_many_arguments)] async fn enqueue_event( &self, workspace_id: Uuid, user_id: Uuid, actor_id: Option, repo_path: &str, - backend: &str, + backend: StorageIngestBackend, kind: StorageIngestKind, content_hash: Option<&str>, payload: Option, permission_snapshot: &[String], - ) -> anyhow::Result<()>; + ) -> PortResult<()>; - async fn fetch_next_event(&self) -> anyhow::Result>; + async fn fetch_next_event(&self) -> PortResult>; - async fn complete_event(&self, event_id: i64, locked_at: DateTime) -> anyhow::Result<()>; + async fn complete_event(&self, event_id: i64, locked_at: DateTime) -> PortResult<()>; async fn fail_event( &self, event_id: i64, locked_at: DateTime, error: &str, - ) -> anyhow::Result<()>; + ) -> PortResult<()>; - async fn stats(&self) -> anyhow::Result; + async fn stats(&self) -> PortResult; } diff --git a/api/src/application/ports/storage_port.rs b/api/crates/application/src/core/ports/storage/storage_port.rs similarity index 50% rename from api/src/application/ports/storage_port.rs rename to api/crates/application/src/core/ports/storage/storage_port.rs index 1510f0f3..2add6edd 100644 --- a/api/src/application/ports/storage_port.rs +++ b/api/crates/application/src/core/ports/storage/storage_port.rs @@ -2,6 +2,8 @@ use async_trait::async_trait; use std::path::{Path, PathBuf}; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Clone)] pub struct StoredAttachment { pub filename: String, @@ -12,28 +14,28 @@ pub struct StoredAttachment { #[async_trait] pub trait StorageResolverPort: Send + Sync { - async fn build_doc_dir(&self, doc_id: Uuid) -> anyhow::Result; - async fn build_doc_file_path(&self, doc_id: Uuid) -> anyhow::Result; + async fn build_doc_dir(&self, doc_id: Uuid) -> PortResult; + async fn build_doc_file_path(&self, doc_id: Uuid) -> PortResult; fn relative_from_uploads(&self, abs: &Path) -> String; fn user_repo_dir(&self, user_id: Uuid) -> String; fn absolute_from_relative(&self, rel: &str) -> PathBuf; - async fn resolve_upload_path(&self, doc_id: Uuid, rest_path: &str) -> anyhow::Result; - async fn read_bytes(&self, abs_path: &Path) -> anyhow::Result>; - async fn exists(&self, abs_path: &Path) -> anyhow::Result; - async fn write_bytes(&self, abs_path: &Path, data: &[u8]) -> anyhow::Result<()>; + async fn resolve_upload_path(&self, doc_id: Uuid, rest_path: &str) -> PortResult; + async fn read_bytes(&self, abs_path: &Path) -> PortResult>; + async fn exists(&self, abs_path: &Path) -> PortResult; + async fn write_bytes(&self, abs_path: &Path, data: &[u8]) -> PortResult<()>; async fn store_doc_attachment( &self, doc_id: Uuid, original_filename: Option<&str>, bytes: &[u8], - ) -> anyhow::Result; + ) -> PortResult; } #[async_trait] pub trait StorageProjectionPort: Send + Sync { - async fn move_folder_subtree(&self, folder_id: Uuid) -> anyhow::Result; - async fn delete_doc_physical(&self, doc_id: Uuid) -> anyhow::Result<()>; - async fn delete_folder_physical(&self, folder_id: Uuid) -> anyhow::Result; - async fn sync_doc_paths(&self, doc_id: Uuid) -> anyhow::Result<()>; - async fn delete_relative_path(&self, rel: &str) -> anyhow::Result<()>; + async fn move_folder_subtree(&self, folder_id: Uuid) -> PortResult; + async fn delete_doc_physical(&self, doc_id: Uuid) -> PortResult<()>; + async fn delete_folder_physical(&self, folder_id: Uuid) -> PortResult; + async fn sync_doc_paths(&self, doc_id: Uuid) -> PortResult<()>; + async fn delete_relative_path(&self, rel: &str) -> PortResult<()>; } diff --git a/api/src/application/ports/storage_projection_queue.rs b/api/crates/application/src/core/ports/storage/storage_projection_queue.rs similarity index 77% rename from api/src/application/ports/storage_projection_queue.rs rename to api/crates/application/src/core/ports/storage/storage_projection_queue.rs index 2aa99d3a..cf8de429 100644 --- a/api/src/application/ports/storage_projection_queue.rs +++ b/api/crates/application/src/core/ports/storage/storage_projection_queue.rs @@ -1,9 +1,11 @@ use async_trait::async_trait; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; -use sqlx::{Postgres, Transaction}; use uuid::Uuid; +use crate::core::ports::errors::PortResult; +use domain::documents::doc_type::DocumentType; + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum StorageProjectionJobKind { DocSync, @@ -28,7 +30,7 @@ pub struct StorageProjectionJob { pub struct StorageDeleteJobMetadata { pub workspace_id: Uuid, pub repo_path: Option, - pub doc_type: String, + pub doc_type: DocumentType, #[serde(default, skip_serializing_if = "Option::is_none")] pub attachment_paths: Option>, #[serde(default)] @@ -56,45 +58,41 @@ pub trait StorageProjectionQueue: Send + Sync { doc_id: Uuid, kind: StorageProjectionJobKind, reason: Option<&str>, - ) -> anyhow::Result<()>; + ) -> PortResult<()>; - async fn enqueue_doc_job_tx( + async fn enqueue_folder_job( &self, - tx: &mut Transaction<'_, Postgres>, workspace_id: Uuid, - doc_id: Uuid, + folder_id: Uuid, kind: StorageProjectionJobKind, reason: Option<&str>, - ) -> anyhow::Result<()>; + ) -> PortResult<()>; - async fn enqueue_folder_job( + async fn fetch_next_job( &self, + lock_timeout_secs: i64, + ) -> PortResult>; + + async fn complete_job(&self, job_id: i64, locked_at: DateTime) -> PortResult<()>; + + async fn fail_job(&self, job_id: i64, locked_at: DateTime, error: &str) -> PortResult<()>; +} + +#[async_trait] +pub trait StorageProjectionQueueTx: Send { + async fn enqueue_doc_job( + &mut self, workspace_id: Uuid, - folder_id: Uuid, + doc_id: Uuid, kind: StorageProjectionJobKind, reason: Option<&str>, - ) -> anyhow::Result<()>; + ) -> PortResult<()>; - async fn enqueue_folder_job_tx( - &self, - tx: &mut Transaction<'_, Postgres>, + async fn enqueue_folder_job( + &mut self, workspace_id: Uuid, folder_id: Uuid, kind: StorageProjectionJobKind, reason: Option<&str>, - ) -> anyhow::Result<()>; - - async fn fetch_next_job( - &self, - lock_timeout_secs: i64, - ) -> anyhow::Result>; - - async fn complete_job(&self, job_id: i64, locked_at: DateTime) -> anyhow::Result<()>; - - async fn fail_job( - &self, - job_id: i64, - locked_at: DateTime, - error: &str, - ) -> anyhow::Result<()>; + ) -> PortResult<()>; } diff --git a/api/crates/application/src/core/ports/storage/storage_reconcile_backend.rs b/api/crates/application/src/core/ports/storage/storage_reconcile_backend.rs new file mode 100644 index 00000000..41caad2e --- /dev/null +++ b/api/crates/application/src/core/ports/storage/storage_reconcile_backend.rs @@ -0,0 +1,9 @@ +use async_trait::async_trait; +use uuid::Uuid; + +use crate::core::ports::errors::PortResult; + +#[async_trait] +pub trait StorageReconcileBackend: Send + Sync { + async fn list_paths(&self, user_id: Uuid) -> PortResult>; +} diff --git a/api/src/application/ports/storage_reconcile_jobs.rs b/api/crates/application/src/core/ports/storage/storage_reconcile_jobs.rs similarity index 52% rename from api/src/application/ports/storage_reconcile_jobs.rs rename to api/crates/application/src/core/ports/storage/storage_reconcile_jobs.rs index cd771afb..7defb8db 100644 --- a/api/src/application/ports/storage_reconcile_jobs.rs +++ b/api/crates/application/src/core/ports/storage/storage_reconcile_jobs.rs @@ -1,6 +1,8 @@ use async_trait::async_trait; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Clone)] pub struct StorageReconcileJob { pub id: i64, @@ -11,11 +13,8 @@ pub struct StorageReconcileJob { #[async_trait] pub trait StorageReconcileJobs: Send + Sync { - async fn enqueue(&self, workspace_id: Uuid, scope: &str) -> anyhow::Result<()>; - async fn fetch_next( - &self, - lock_timeout_secs: i64, - ) -> anyhow::Result>; - async fn complete(&self, job_id: i64) -> anyhow::Result<()>; - async fn fail(&self, job_id: i64, error: &str) -> anyhow::Result<()>; + async fn enqueue(&self, workspace_id: Uuid, scope: &str) -> PortResult<()>; + async fn fetch_next(&self, lock_timeout_secs: i64) -> PortResult>; + async fn complete(&self, job_id: i64) -> PortResult<()>; + async fn fail(&self, job_id: i64, error: &str) -> PortResult<()>; } diff --git a/api/crates/application/src/core/services/access.rs b/api/crates/application/src/core/services/access.rs new file mode 100644 index 00000000..3134e11d --- /dev/null +++ b/api/crates/application/src/core/services/access.rs @@ -0,0 +1,119 @@ +use uuid::Uuid; + +use crate::core::services::errors::ServiceError; +use crate::documents::ports::access_repository::AccessRepository; +use crate::documents::ports::sharing::share_access_port::ShareAccessPort; +use domain::documents::access_policy; +use domain::documents::doc_type::DocumentType; + +pub use domain::documents::access_policy::Capability; + +#[derive(Debug, Clone)] +pub enum Actor { + User(Uuid), + ShareToken(String), + Public, +} + +// Presentation layer is responsible for building Actor from HTTP inputs. +// This module intentionally avoids depending on presentation types. + +pub async fn resolve_document( + access_repo: &A, + shares_repo: &R, + actor: &Actor, + doc_id: Uuid, +) -> Result +where + A: AccessRepository + ?Sized, + R: ShareAccessPort + ?Sized, +{ + match actor { + Actor::User(uid) => { + let access = access_repo + .resolve_user_document_access(doc_id, *uid) + .await + .map_err(ServiceError::from)?; + let Some(access) = access else { + return Ok(Capability::None); + }; + Ok(access_policy::capability_for_user_document( + &access.permissions, + access.is_archived, + )) + } + Actor::ShareToken(t) => { + // Resolve token target and then decide access when document matches token scope + let ctx = shares_repo + .resolve_share_by_token(t) + .await + .map_err(ServiceError::from)?; + let Some(ctx) = ctx else { + return Ok(Capability::None); + }; + let is_archived = access_repo + .is_document_archived(doc_id) + .await + .map_err(ServiceError::from)?; + let materialized_permission = if ctx.shared_type == DocumentType::Folder { + shares_repo + .get_materialized_permission(ctx.share_id, doc_id) + .await + .map_err(ServiceError::from)? + } else { + None + }; + Ok(access_policy::capability_for_share_token( + &ctx, + doc_id, + chrono::Utc::now(), + is_archived, + materialized_permission, + )) + } + Actor::Public => { + let is_public = access_repo + .is_document_public(doc_id) + .await + .map_err(ServiceError::from)?; + // Public documents remain view-only even when archived. + Ok(access_policy::capability_for_public_document(is_public)) + } + } +} + +pub async fn require_view( + access_repo: &A, + shares_repo: &R, + actor: &Actor, + doc_id: Uuid, +) -> Result +where + A: AccessRepository + ?Sized, + R: ShareAccessPort + ?Sized, +{ + let cap = resolve_document(access_repo, shares_repo, actor, doc_id).await?; + if cap >= Capability::View { + Ok(cap) + } else { + Err(ServiceError::Forbidden) + } +} + +pub async fn require_edit( + access_repo: &A, + shares_repo: &R, + actor: &Actor, + doc_id: Uuid, +) -> Result<(), ServiceError> +where + A: AccessRepository + ?Sized, + R: ShareAccessPort + ?Sized, +{ + let cap = resolve_document(access_repo, shares_repo, actor, doc_id).await?; + if cap >= Capability::Edit { + Ok(()) + } else { + Err(ServiceError::Forbidden) + } +} diff --git a/api/crates/application/src/core/services/authorization.rs b/api/crates/application/src/core/services/authorization.rs new file mode 100644 index 00000000..34116be7 --- /dev/null +++ b/api/crates/application/src/core/services/authorization.rs @@ -0,0 +1,97 @@ +use std::sync::Arc; + +use uuid::Uuid; + +use crate::core::services::access::{self, Actor, Capability}; +use crate::core::services::errors::ServiceError; +use crate::documents::ports::access_repository::AccessRepository; +use crate::documents::ports::sharing::share_access_port::ShareAccessPort; +use async_trait::async_trait; + +#[derive(Clone)] +pub struct AuthorizationService { + access_repo: Arc, + share_access: Arc, +} + +#[async_trait] +pub trait AuthorizationServiceFacade: Send + Sync { + async fn resolve_document( + &self, + actor: &Actor, + doc_id: Uuid, + ) -> Result; + + async fn require_view(&self, actor: &Actor, doc_id: Uuid) -> Result; + + async fn require_edit(&self, actor: &Actor, doc_id: Uuid) -> Result<(), ServiceError>; +} + +#[async_trait] +impl AuthorizationServiceFacade for AuthorizationService { + async fn resolve_document( + &self, + actor: &Actor, + doc_id: Uuid, + ) -> Result { + self.resolve_document(actor, doc_id).await + } + + async fn require_view(&self, actor: &Actor, doc_id: Uuid) -> Result { + self.require_view(actor, doc_id).await + } + + async fn require_edit(&self, actor: &Actor, doc_id: Uuid) -> Result<(), ServiceError> { + self.require_edit(actor, doc_id).await + } +} + +impl AuthorizationService { + pub fn new( + access_repo: Arc, + share_access: Arc, + ) -> Self { + Self { + access_repo, + share_access, + } + } + + pub async fn resolve_document( + &self, + actor: &Actor, + doc_id: Uuid, + ) -> Result { + access::resolve_document( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + } + + pub async fn require_view( + &self, + actor: &Actor, + doc_id: Uuid, + ) -> Result { + access::require_view( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + } + + pub async fn require_edit(&self, actor: &Actor, doc_id: Uuid) -> Result<(), ServiceError> { + access::require_edit( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + } +} diff --git a/api/src/application/services/diff/mod.rs b/api/crates/application/src/core/services/diff/mod.rs similarity index 100% rename from api/src/application/services/diff/mod.rs rename to api/crates/application/src/core/services/diff/mod.rs diff --git a/api/src/application/services/diff/text_diff.rs b/api/crates/application/src/core/services/diff/text_diff.rs similarity index 95% rename from api/src/application/services/diff/text_diff.rs rename to api/crates/application/src/core/services/diff/text_diff.rs index 35174a0b..3fe0764d 100644 --- a/api/src/application/services/diff/text_diff.rs +++ b/api/crates/application/src/core/services/diff/text_diff.rs @@ -1,6 +1,6 @@ use similar::{Algorithm, ChangeTag, TextDiff}; -use crate::application::dto::diff::{TextDiffLine, TextDiffLineType, TextDiffResult}; +use crate::core::dtos::{TextDiffLine, TextDiffLineType, TextDiffResult}; pub fn compute_text_diff(old: &str, new: &str, file_path: &str) -> TextDiffResult { let diff = TextDiff::configure() diff --git a/api/src/application/services/doc_events.rs b/api/crates/application/src/core/services/doc_events.rs similarity index 100% rename from api/src/application/services/doc_events.rs rename to api/crates/application/src/core/services/doc_events.rs diff --git a/api/src/application/services/errors.rs b/api/crates/application/src/core/services/errors.rs similarity index 74% rename from api/src/application/services/errors.rs rename to api/crates/application/src/core/services/errors.rs index 0582a9ab..56e5ceaf 100644 --- a/api/src/application/services/errors.rs +++ b/api/crates/application/src/core/services/errors.rs @@ -1,6 +1,8 @@ use anyhow::Error; use thiserror::Error; +use crate::core::ports::errors::PortError; + #[derive(Debug, Error)] pub enum ServiceError { #[error("unauthorized")] @@ -24,3 +26,9 @@ impl ServiceError { matches!(self, ServiceError::Unexpected(_)) } } + +impl From for ServiceError { + fn from(err: PortError) -> Self { + ServiceError::Unexpected(err.into_anyhow()) + } +} diff --git a/api/src/application/services/health.rs b/api/crates/application/src/core/services/health.rs similarity index 58% rename from api/src/application/services/health.rs rename to api/crates/application/src/core/services/health.rs index 2d505aec..6365b32c 100644 --- a/api/src/application/services/health.rs +++ b/api/crates/application/src/core/services/health.rs @@ -1,6 +1,7 @@ use std::sync::Arc; -use crate::application::ports::health_probe::{HealthProbe, HealthStatus}; +use crate::core::ports::health_probe::{HealthProbe, HealthStatus}; +use async_trait::async_trait; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum OverallHealth { @@ -12,6 +13,18 @@ pub struct HealthService { probe: Arc, } +#[async_trait] +pub trait HealthServiceFacade: Send + Sync { + async fn status(&self) -> anyhow::Result; +} + +#[async_trait] +impl HealthServiceFacade for HealthService { + async fn status(&self) -> anyhow::Result { + self.status().await + } +} + impl HealthService { pub fn new(probe: Arc) -> Self { Self { probe } diff --git a/api/crates/application/src/core/services/markdown/mod.rs b/api/crates/application/src/core/services/markdown/mod.rs new file mode 100644 index 00000000..5797b04e --- /dev/null +++ b/api/crates/application/src/core/services/markdown/mod.rs @@ -0,0 +1 @@ +pub use crate::core::dtos::markdown::{PlaceholderItem, RenderOptions, RenderResponse}; diff --git a/api/src/application/services/markdown_render.rs b/api/crates/application/src/core/services/markdown_render.rs similarity index 89% rename from api/src/application/services/markdown_render.rs rename to api/crates/application/src/core/services/markdown_render.rs index 2495a0a8..b9860ef1 100644 --- a/api/src/application/services/markdown_render.rs +++ b/api/crates/application/src/core/services/markdown_render.rs @@ -1,6 +1,7 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; +use async_trait::async_trait; use base64::Engine; use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; use serde::Deserialize; @@ -8,14 +9,14 @@ use serde_json::{Value, json}; use tracing::warn; use uuid::Uuid; -use crate::application::ports::plugin_asset_store::PluginAssetStore; -use crate::application::ports::plugin_installation_repository::PluginInstallationRepository; -use crate::application::ports::plugin_runtime::PluginRuntime; -use crate::application::services::errors::ServiceError; -use crate::application::services::markdown::{ - PlaceholderItem, RenderOptions, RenderResponse, render, -}; -use crate::application::services::plugins::asset_signer::{AssetScope, AssetSigner}; +use crate::core::dtos::markdown::{PlaceholderItem, RenderOptions, RenderResponse}; +use crate::core::ports::markdown_renderer::MarkdownRenderer; +use crate::core::services::errors::ServiceError; +use crate::plugins::ports::plugin_asset_store::PluginAssetStore; +use crate::plugins::ports::plugin_installation_repository::PluginInstallationRepository; +use crate::plugins::ports::plugin_runtime::PluginRuntime; +use crate::plugins::services::asset_signer::{AssetScope, AssetSigner}; +use domain::plugins::scope::PluginInstallationStatus; #[derive(Clone, Debug)] pub struct MarkdownRenderTask { @@ -28,16 +29,52 @@ pub struct MarkdownRenderService { assets: Arc, installations: Arc, runtime: Arc, + renderer: Arc, asset_signer: Arc, asset_ttl_secs: u64, } +#[async_trait] +pub trait MarkdownRenderServiceFacade: Send + Sync { + async fn render_single( + &self, + text: String, + options: RenderOptions, + user_scope: Option, + ) -> Result; + + async fn render_many( + &self, + tasks: Vec, + ) -> Result, ServiceError>; +} + +#[async_trait] +impl MarkdownRenderServiceFacade for MarkdownRenderService { + async fn render_single( + &self, + text: String, + options: RenderOptions, + user_scope: Option, + ) -> Result { + self.render_single(text, options, user_scope).await + } + + async fn render_many( + &self, + tasks: Vec, + ) -> Result, ServiceError> { + self.render_many(tasks).await + } +} + impl MarkdownRenderService { #[allow(clippy::too_many_arguments)] pub fn new( assets: Arc, installations: Arc, runtime: Arc, + renderer: Arc, asset_signer: Arc, asset_ttl_secs: u64, ) -> Self { @@ -45,6 +82,7 @@ impl MarkdownRenderService { assets, installations, runtime, + renderer, asset_signer, asset_ttl_secs, } @@ -97,8 +135,10 @@ impl MarkdownRenderService { Some(&placeholder_kinds) }; - let mut response = - render(text, options.clone(), placeholder_kinds_ref).map_err(ServiceError::from)?; + let mut response = self + .renderer + .render(text, options.clone(), placeholder_kinds_ref) + .map_err(ServiceError::from)?; if !response.placeholders.is_empty() && !specs.is_empty() { self.apply_placeholder_renderers(&mut response, &options, specs) .await?; @@ -116,12 +156,12 @@ impl MarkdownRenderService { .list_latest_global_manifests() .await .map_err(ServiceError::from)?; - for (plugin_id, version, manifest) in manifests { + for item in manifests { push_renderers_from_manifest( &mut specs, - &manifest, - &plugin_id, - &version, + &item.manifest, + &item.plugin_id, + &item.version, RendererScope::Global, ); } @@ -132,7 +172,10 @@ impl MarkdownRenderService { .list_for_workspace(workspace_id) .await .map_err(ServiceError::from)?; - for inst in installs.into_iter().filter(|i| i.status == "enabled") { + for inst in installs + .into_iter() + .filter(|i| i.status == PluginInstallationStatus::Enabled) + { match self .assets .load_user_manifest(&workspace_id, &inst.plugin_id, &inst.version) @@ -194,18 +237,18 @@ impl MarkdownRenderService { let hydrate = spec.hydrate.as_ref(); let Some(function) = spec.function.as_deref() else { - if let Some(hydrate) = hydrate { - if self.attach_hydrate_metadata( + if let Some(hydrate) = hydrate + && self.attach_hydrate_metadata( &mut html, &placeholder, &request, spec, hydrate, options.token.as_deref(), - ) { - handled = true; - break; - } + ) + { + handled = true; + break; } continue; }; @@ -379,10 +422,7 @@ impl MarkdownRenderService { let module_url = self .build_hydrate_module_url(spec, hydrate, token) .ok_or_else(|| { - serde_json::Error::io(std::io::Error::new( - std::io::ErrorKind::Other, - "invalid hydrate module path", - )) + serde_json::Error::io(std::io::Error::other("invalid hydrate module path")) })?; let export_name = hydrate.export.as_deref().unwrap_or("default"); let context = json!({ diff --git a/api/src/application/services/metrics.rs b/api/crates/application/src/core/services/metrics.rs similarity index 95% rename from api/src/application/services/metrics.rs rename to api/crates/application/src/core/services/metrics.rs index 67f4b320..e250a2bc 100644 --- a/api/src/application/services/metrics.rs +++ b/api/crates/application/src/core/services/metrics.rs @@ -1,5 +1,9 @@ use std::sync::atomic::{AtomicU64, Ordering}; +pub trait MetricsRegistryFacade: Send + Sync { + fn render(&self) -> String; +} + #[derive(Default)] pub struct MetricsRegistry { storage_projection_success: AtomicU64, @@ -112,3 +116,9 @@ impl MetricsRegistry { ) } } + +impl MetricsRegistryFacade for MetricsRegistry { + fn render(&self) -> String { + MetricsRegistry::render(self) + } +} diff --git a/api/crates/application/src/core/services/mod.rs b/api/crates/application/src/core/services/mod.rs new file mode 100644 index 00000000..084206c6 --- /dev/null +++ b/api/crates/application/src/core/services/mod.rs @@ -0,0 +1,13 @@ +pub mod access; +pub mod authorization; +pub mod diff; +pub mod doc_events; +pub mod errors; +pub mod health; +pub mod markdown; +pub mod markdown_render; +pub mod metrics; +pub mod storage; +pub mod tagging; +pub mod utils; +pub mod worker; diff --git a/api/crates/application/src/core/services/storage/ingest/attachments.rs b/api/crates/application/src/core/services/storage/ingest/attachments.rs new file mode 100644 index 00000000..d05ec35b --- /dev/null +++ b/api/crates/application/src/core/services/storage/ingest/attachments.rs @@ -0,0 +1,90 @@ +use super::*; + +impl StorageIngestService { + pub(super) async fn handle_attachment_upsert( + &self, + file_id: Uuid, + doc_id: Uuid, + rel_path: &str, + repo_path: &str, + event: &StorageIngestEvent, + previous_repo_path: Option<&str>, + ) -> anyhow::Result<()> { + let abs = self.storage.absolute_from_relative(rel_path); + let bytes = match self.storage.read_bytes(abs.as_path()).await { + Ok(bytes) => bytes, + Err(err) if is_not_found_error(&err) => { + warn!( + file_id = %file_id, + doc_id = %doc_id, + repo_path = repo_path, + "storage_ingest_attachment_missing_skipped" + ); + self.storage_projection + .delete_relative_path(rel_path) + .await?; + return Ok(()); + } + Err(err) => return Err(err.into()), + }; + let size = bytes.len() as i64; + let hash = sha256_hex(&bytes); + self.files_repo + .update_hash_and_size(file_id, size, &hash) + .await?; + let mut payload_obj = serde_json::Map::new(); + payload_obj.insert("repo_path".into(), json!(repo_path)); + payload_obj.insert("storage_path".into(), json!(rel_path)); + payload_obj.insert("backend".into(), json!(event.backend.as_str())); + payload_obj.insert("size".into(), json!(size)); + payload_obj.insert("content_hash".into(), json!(hash)); + if let Some(prev) = previous_repo_path { + payload_obj.insert("previous_path".into(), json!(prev)); + } + self.events + .append( + event.workspace_id, + doc_id, + "attachment.ingest_upsert", + Some(Value::Object(payload_obj)), + ) + .await?; + info!( + doc_id = %doc_id, + file_id = %file_id, + repo_path = repo_path, + backend = event.backend.as_str(), + "storage_ingest_attachment_upsert_applied" + ); + Ok(()) + } + + pub(super) async fn handle_attachment_delete( + &self, + file_id: Uuid, + doc_id: Uuid, + repo_path: &str, + event: &StorageIngestEvent, + ) -> anyhow::Result<()> { + self.files_repo.delete_by_id(file_id).await?; + self.events + .append( + event.workspace_id, + doc_id, + "attachment.ingest_delete", + Some(json!({ + "repo_path": repo_path, + "backend": event.backend.as_str(), + })), + ) + .await?; + info!( + doc_id = %doc_id, + file_id = %file_id, + repo_path = repo_path, + backend = event.backend.as_str(), + "storage_ingest_attachment_deleted" + ); + Ok(()) + } +} diff --git a/api/crates/application/src/core/services/storage/ingest/documents.rs b/api/crates/application/src/core/services/storage/ingest/documents.rs new file mode 100644 index 00000000..31c59f2e --- /dev/null +++ b/api/crates/application/src/core/services/storage/ingest/documents.rs @@ -0,0 +1,196 @@ +use super::*; + +impl StorageIngestService { + pub(super) async fn handle_doc_upsert( + &self, + doc: &ResolvedDocument, + repo_path: &str, + event: &StorageIngestEvent, + payload: MarkdownIngestPayload, + previous_repo_path: Option<&str>, + ) -> anyhow::Result<()> { + if event.backend.is_fs_watcher() + && event.actor_id.is_none() + && self.recent_exports.is_recent_match( + event.workspace_id, + repo_path, + &payload.content_hash, + ) + { + debug!( + doc_id = %doc.id, + repo_path = repo_path, + "storage_ingest_doc_upsert_skipped_recent_projection" + ); + return Ok(()); + } + let snapshot = snapshot_from_markdown(&payload.body); + self.realtime + .apply_snapshot(&doc.id.to_string(), snapshot.as_slice()) + .await?; + // Persist back to storage only for API/actor initiated ingests; fs_watcher/reconcile events + // originate from the filesystem itself and writing would re-trigger the watcher endlessly. + if event.actor_id.is_some() + && let Err(err) = self.realtime.force_persist(&doc.id.to_string()).await + { + warn!( + error = ?err, + doc_id = %doc.id, + "storage_ingest_force_persist_failed" + ); + } + let mut payload_obj = serde_json::Map::new(); + payload_obj.insert("repo_path".into(), json!(repo_path)); + payload_obj.insert("backend".into(), json!(event.backend.as_str())); + payload_obj.insert("content_hash".into(), json!(payload.content_hash)); + payload_obj.insert("doc_type".into(), json!(doc.doc_type.as_str())); + if let Some(prev) = previous_repo_path { + payload_obj.insert("previous_path".into(), json!(prev)); + } + self.events + .append( + event.workspace_id, + doc.id, + "document.ingest_upsert", + Some(Value::Object(payload_obj)), + ) + .await?; + info!( + doc_id = %doc.id, + repo_path = repo_path, + backend = event.backend.as_str(), + "storage_ingest_doc_upsert_applied" + ); + Ok(()) + } + + pub(super) async fn load_markdown_payload( + &self, + rel_path: &str, + ) -> anyhow::Result { + let abs = self.storage.absolute_from_relative(rel_path); + let bytes = self.storage.read_bytes(abs.as_path()).await?; + parse_markdown_payload(bytes) + } + + pub(super) async fn resolve_doc_from_front_matter( + &self, + user_id: Uuid, + payload: &MarkdownIngestPayload, + ) -> anyhow::Result> { + let Some(doc_id) = payload.doc_id_hint else { + return Ok(None); + }; + let Some(meta) = self + .document_repo + .get_meta_for_owner(doc_id, user_id) + .await? + else { + return Ok(None); + }; + Ok(Some(ResolvedDocument::new( + doc_id, + meta.doc_type, + meta.path, + meta.archived_at.is_some(), + ))) + } + + pub(super) async fn handle_doc_delete( + &self, + doc: &ResolvedDocument, + repo_path: &str, + event: &StorageIngestEvent, + permissions: &PermissionSet, + ) -> anyhow::Result<()> { + let actor_id = event.actor_id; + match self + .document_service + .delete_for_user(event.workspace_id, doc.id, actor_id, permissions) + .await + { + Ok(true) => { + info!( + doc_id = %doc.id, + repo_path = repo_path, + backend = event.backend.as_str(), + "storage_ingest_doc_delete_applied" + ); + Ok(()) + } + Ok(false) => Ok(()), + Err(ServiceError::NotFound) => Ok(()), + Err(err) => Err(err.into()), + } + } + + pub(super) async fn handle_folder_upsert( + &self, + doc: &ResolvedDocument, + rel_path: &str, + repo_path: &str, + event: &StorageIngestEvent, + previous_repo_path: Option<&str>, + ) -> anyhow::Result<()> { + if !self + .reconcile_repo_path(doc, event.workspace_id, rel_path) + .await? + { + warn!( + doc_id = %doc.id, + repo_path = repo_path, + "storage_ingest_folder_repo_path_rejected" + ); + return Ok(()); + } + let mut payload_obj = serde_json::Map::new(); + payload_obj.insert("repo_path".into(), json!(repo_path)); + payload_obj.insert("doc_type".into(), json!(doc.doc_type.as_str())); + payload_obj.insert("owner_id".into(), json!(event.workspace_id)); + payload_obj.insert("backend".into(), json!(event.backend.as_str())); + if let Some(prev) = previous_repo_path { + payload_obj.insert("previous_path".into(), json!(prev)); + } + self.events + .append( + event.workspace_id, + doc.id, + "document.metadata_updated", + Some(Value::Object(payload_obj)), + ) + .await?; + info!( + doc_id = %doc.id, + repo_path = repo_path, + backend = event.backend.as_str(), + "storage_ingest_folder_upsert_applied" + ); + Ok(()) + } + + pub(super) async fn reconcile_repo_path( + &self, + doc: &ResolvedDocument, + owner_id: Uuid, + rel_path: &str, + ) -> anyhow::Result { + if doc.path.as_deref() == Some(rel_path) { + return Ok(true); + } + match self + .document_paths + .update_repo_path(doc.id, owner_id, rel_path) + .await + { + Ok(()) => Ok(true), + Err(err) => { + warn!( + doc_id = %doc.id, + error = ?err, + "storage_ingest_repo_path_update_failed" + ); + Ok(false) + } + } + } +} diff --git a/api/crates/application/src/core/services/storage/ingest/handler.rs b/api/crates/application/src/core/services/storage/ingest/handler.rs new file mode 100644 index 00000000..3fda8bd3 --- /dev/null +++ b/api/crates/application/src/core/services/storage/ingest/handler.rs @@ -0,0 +1,233 @@ +use super::*; + +#[async_trait] +impl StorageIngestHandler for StorageIngestService { + async fn handle_event(&self, event: &StorageIngestEvent) -> anyhow::Result<()> { + let Some(repo_path) = normalize_repo_path(&event.repo_path) else { + warn!( + user_id = %event.workspace_id, + repo_path = event.repo_path.as_str(), + "storage_ingest_invalid_repo_path" + ); + return Ok(()); + }; + let rel_path = Self::relative_path(event.workspace_id, &repo_path); + let payload_previous_repo_path = previous_path_from_payload(event.payload.as_ref()); + + let mut doc_previous_repo_path: Option = None; + let mut doc = self + .document_paths + .get_by_owner_and_path(event.workspace_id, &rel_path) + .await? + .map(ResolvedDocument::from); + + if doc.is_none() + && let Some(prev_repo) = payload_previous_repo_path.as_deref() + { + let prev_rel = Self::relative_path(event.workspace_id, prev_repo); + if let Some(prev_doc) = self + .document_paths + .get_by_owner_and_path(event.workspace_id, &prev_rel) + .await? + .map(ResolvedDocument::from) + { + if let Err(err) = self + .document_paths + .update_repo_path(prev_doc.id, event.workspace_id, &rel_path) + .await + { + warn!( + doc_id = %prev_doc.id, + error = ?err, + "storage_ingest_repo_path_update_failed" + ); + } else { + doc_previous_repo_path = Some(prev_repo.to_string()); + let mut updated = prev_doc.clone(); + updated.path = Some(rel_path.clone()); + doc = Some(updated); + } + } + } + + if let Some(doc) = doc { + if doc.is_archived() { + warn!( + doc_id = %doc.id, + repo_path = repo_path, + "storage_ingest_archived_doc_skipped" + ); + return Ok(()); + } + match event.kind { + StorageIngestKind::Upsert => { + if doc.is_folder() { + self.handle_folder_upsert( + &doc, + &rel_path, + &repo_path, + event, + doc_previous_repo_path.as_deref(), + ) + .await?; + } else { + let payload = match self.load_markdown_payload(&rel_path).await { + Ok(payload) => payload, + Err(err) if is_not_found_error(&err) => { + warn!( + doc_id = %doc.id, + repo_path = repo_path, + "storage_ingest_doc_payload_missing" + ); + self.storage_projection + .delete_relative_path(&rel_path) + .await?; + return Ok(()); + } + Err(err) => return Err(err), + }; + self.handle_doc_upsert( + &doc, + &repo_path, + event, + payload, + doc_previous_repo_path.as_deref(), + ) + .await?; + } + } + StorageIngestKind::Delete => { + let permissions = self.permissions_for_event(event).await?; + self.handle_doc_delete(&doc, &repo_path, event, &permissions) + .await?; + } + } + return Ok(()); + } + + let mut attachment_previous_repo_path: Option = None; + let mut attachment = self.files_repo.find_by_storage_path(&rel_path).await?; + + if attachment.is_none() + && let Some(prev_repo) = payload_previous_repo_path.as_deref() + { + let prev_rel = Self::relative_path(event.workspace_id, prev_repo); + if let Some(file) = self.files_repo.find_by_storage_path(&prev_rel).await? { + self.files_repo + .update_storage_path(file.file_id, &rel_path) + .await?; + attachment_previous_repo_path = Some(prev_repo.to_string()); + attachment = Some(file); + } + } + + if let Some(file) = attachment { + info!( + doc_id = %file.document_id, + owner_id = %file.workspace_id, + repo_path = repo_path, + "storage_ingest_attachment_detected" + ); + match event.kind { + StorageIngestKind::Upsert => { + self.handle_attachment_upsert( + file.file_id, + file.document_id, + &rel_path, + &repo_path, + event, + attachment_previous_repo_path.as_deref(), + ) + .await?; + } + StorageIngestKind::Delete => { + self.handle_attachment_delete( + file.file_id, + file.document_id, + &repo_path, + event, + ) + .await?; + } + } + return Ok(()); + } + + if event.kind == StorageIngestKind::Upsert && rel_path.ends_with(".md") { + let payload = match self.load_markdown_payload(&rel_path).await { + Ok(payload) => payload, + Err(err) if is_not_found_error(&err) => { + info!( + user_id = %event.workspace_id, + repo_path = repo_path, + "storage_ingest_missing_source_skipped" + ); + self.storage_projection + .delete_relative_path(&rel_path) + .await?; + return Ok(()); + } + Err(err) => return Err(err), + }; + if let Some(doc) = self + .resolve_doc_from_front_matter(event.workspace_id, &payload) + .await? + { + if doc.is_folder() { + warn!( + doc_id = %doc.id, + repo_path = repo_path, + "storage_ingest_folder_event_skipped" + ); + } else if doc.is_archived() { + warn!( + doc_id = %doc.id, + repo_path = repo_path, + "storage_ingest_archived_doc_skipped" + ); + } else { + if !self + .reconcile_repo_path(&doc, event.workspace_id, &rel_path) + .await? + { + warn!( + doc_id = %doc.id, + repo_path = repo_path, + "storage_ingest_repo_path_rejected" + ); + return Ok(()); + } + self.handle_doc_upsert( + &doc, + &repo_path, + event, + payload, + payload_previous_repo_path.as_deref(), + ) + .await?; + } + return Ok(()); + } + } + + if event.kind == StorageIngestKind::Delete { + self.storage_projection + .delete_relative_path(&rel_path) + .await?; + info!( + user_id = %event.workspace_id, + repo_path = repo_path, + backend = event.backend.as_str(), + "storage_ingest_orphan_deleted" + ); + } else { + warn!( + user_id = %event.workspace_id, + repo_path = repo_path, + backend = event.backend.as_str(), + "storage_ingest_no_target_found" + ); + } + Ok(()) + } +} diff --git a/api/crates/application/src/core/services/storage/ingest/markdown.rs b/api/crates/application/src/core/services/storage/ingest/markdown.rs new file mode 100644 index 00000000..e344fcad --- /dev/null +++ b/api/crates/application/src/core/services/storage/ingest/markdown.rs @@ -0,0 +1,99 @@ +use super::*; + +#[derive(Debug, Clone)] +pub(super) struct MarkdownIngestPayload { + pub(super) doc_id_hint: Option, + pub(super) body: String, + pub(super) content_hash: String, +} + +#[derive(Debug, Deserialize)] +struct MarkdownFrontMatter { + id: Option, +} + +pub(super) fn parse_markdown_payload(bytes: Vec) -> anyhow::Result { + let content_hash = sha256_hex(&bytes); + // Accept lossy UTF-8 to avoid retry storms on malformed files; non-UTF8 bytes become U+FFFD. + let text = String::from_utf8_lossy(&bytes).to_string(); + let trimmed = text.trim_start_matches('\u{feff}'); + if let Some((front, body)) = split_front_matter(trimmed) + && let Ok(front_matter) = serde_yaml::from_str::(front) + && let Some(doc_id) = front_matter.id + { + return Ok(MarkdownIngestPayload { + doc_id_hint: Some(doc_id), + body: body.to_string(), + content_hash, + }); + } + Ok(MarkdownIngestPayload { + doc_id_hint: None, + body: trimmed.to_string(), + content_hash, + }) +} + +fn split_front_matter(input: &str) -> Option<(&str, &str)> { + let after_open = input + .strip_prefix("---\r\n") + .or_else(|| input.strip_prefix("---\n"))?; + if let Some((front_len, body_start)) = find_front_matter_end(after_open) { + let front = &after_open[..front_len]; + let body = &after_open[body_start..]; + return Some((front, body)); + } + None +} + +fn find_front_matter_end(s: &str) -> Option<(usize, usize)> { + let bytes = s.as_bytes(); + let mut idx = 0; + while idx < bytes.len() { + if bytes[idx] == b'\n' { + let after_newline = &s[idx + 1..]; + if after_newline.starts_with("---") { + let mut body_start = idx + 1 + 3; + let mut remainder = &s[body_start..]; + // Skip any trailing newlines so we don't feed extra blank lines + // back into the realtime layer when the projection re-imports. + while remainder.starts_with("\r\n") || remainder.starts_with('\n') { + if remainder.starts_with("\r\n") { + body_start += 2; + let (_, rest) = remainder.split_at(2); + remainder = rest; + } else { + body_start += 1; + let (_, rest) = remainder.split_at(1); + remainder = rest; + } + } + return Some((idx, body_start)); + } + } + idx += 1; + } + None +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn preserves_body_when_front_matter_has_no_id() { + let markdown = "---\ntitle: Foo\n---\n\nBody".to_string(); + let payload = parse_markdown_payload(markdown.clone().into_bytes()).unwrap(); + assert!(payload.doc_id_hint.is_none()); + assert_eq!(payload.body, markdown); + } + + #[test] + fn extracts_id_when_front_matter_is_valid() { + let doc_id = Uuid::new_v4(); + let markdown = format!("---\nid: {}\n---\n\nHello", doc_id); + let payload = parse_markdown_payload(markdown.into_bytes()).unwrap(); + assert_eq!(payload.doc_id_hint, Some(doc_id)); + assert_eq!(payload.body.trim_start_matches('\n'), "Hello"); + } +} diff --git a/api/crates/application/src/core/services/storage/ingest/mod.rs b/api/crates/application/src/core/services/storage/ingest/mod.rs new file mode 100644 index 00000000..b432b0af --- /dev/null +++ b/api/crates/application/src/core/services/storage/ingest/mod.rs @@ -0,0 +1,94 @@ +use std::io; +use std::path::PathBuf; +use std::sync::Arc; + +use async_trait::async_trait; +use serde::Deserialize; +use serde_json::{Value, json}; +use tracing::{debug, info, warn}; +use uuid::Uuid; + +use crate::core::ports::storage::storage_ingest_queue::{StorageIngestEvent, StorageIngestKind}; +use crate::core::ports::storage::storage_port::{StorageProjectionPort, StorageResolverPort}; +use crate::core::services::errors::ServiceError; +use crate::core::services::storage::projection_cache::RecentProjectionCache; +use crate::core::services::utils::hash::sha256_hex; +use crate::documents::ports::doc_event_log::DocEventLog; +use crate::documents::ports::document_path_repository::DocumentPathRepository; +use crate::documents::ports::document_repository::DocumentRepository; +use crate::documents::ports::files::files_repository::FilesRepository; +use crate::documents::ports::realtime::realtime_port::RealtimeEngine; +use crate::documents::services::DocumentService; +use crate::documents::services::realtime::snapshot::snapshot_from_markdown; +use crate::workspaces::services::{ + WorkspacePermissionResolver, permission_snapshot::permission_set_from_snapshot, +}; +use domain::access::permissions::PermissionSet; +use domain::documents::document::Document as DomainDocument; + +mod attachments; +mod documents; +mod handler; +mod markdown; +mod permissions; +mod resolved_document; +mod utils; + +pub use domain::documents::path::normalize_repo_path; + +use markdown::{MarkdownIngestPayload, parse_markdown_payload}; +use resolved_document::ResolvedDocument; +use utils::{is_not_found_error, previous_path_from_payload}; + +#[async_trait] +pub trait StorageIngestHandler: Send + Sync { + async fn handle_event(&self, event: &StorageIngestEvent) -> anyhow::Result<()>; +} + +pub struct StorageIngestService { + document_repo: Arc, + document_paths: Arc, + files_repo: Arc, + realtime: Arc, + storage: Arc, + storage_projection: Arc, + events: Arc, + document_service: Arc, + permission_resolver: Arc, + recent_exports: Arc, +} + +impl StorageIngestService { + #[allow(clippy::too_many_arguments)] + pub fn new( + document_repo: Arc, + document_paths: Arc, + files_repo: Arc, + realtime: Arc, + storage: Arc, + storage_projection: Arc, + events: Arc, + document_service: Arc, + permission_resolver: Arc, + recent_exports: Arc, + ) -> Self { + Self { + document_repo, + document_paths, + files_repo, + realtime, + storage, + storage_projection, + events, + document_service, + permission_resolver, + recent_exports, + } + } + + fn relative_path(user_id: Uuid, repo_path: &str) -> String { + let mut path = PathBuf::from(user_id.to_string()); + path.push(repo_path); + path.to_string_lossy().replace('\\', "/") + } +} diff --git a/api/crates/application/src/core/services/storage/ingest/permissions.rs b/api/crates/application/src/core/services/storage/ingest/permissions.rs new file mode 100644 index 00000000..08ea06bc --- /dev/null +++ b/api/crates/application/src/core/services/storage/ingest/permissions.rs @@ -0,0 +1,67 @@ +use super::*; + +impl StorageIngestService { + pub(super) async fn permissions_for_event( + &self, + event: &StorageIngestEvent, + ) -> anyhow::Result { + let set = permission_set_from_snapshot(&event.permission_snapshot); + if !set.is_empty() { + return Ok(set); + } + let mut candidates = Vec::new(); + if let Some(actor_id) = event.actor_id { + candidates.push(("actor", actor_id, true)); + } + let warn_on_user_miss = event.user_id != event.workspace_id; + candidates.push(("user", event.user_id, warn_on_user_miss)); + for (source, user_id, warn_on_missing) in candidates { + match self + .permission_resolver + .load_permission_set(event.workspace_id, user_id) + .await + { + Ok(Some(resolved)) => { + info!( + workspace_id = %event.workspace_id, + user_id = %user_id, + source, + "storage_ingest_permissions_rehydrated" + ); + return Ok(resolved); + } + Ok(None) => { + if warn_on_missing { + warn!( + workspace_id = %event.workspace_id, + user_id = %user_id, + source, + "storage_ingest_member_missing_for_permissions" + ); + } else { + debug!( + workspace_id = %event.workspace_id, + user_id = %user_id, + source, + "storage_ingest_member_missing_for_permissions" + ); + } + } + Err(err) => { + warn!( + error = ?err, + workspace_id = %event.workspace_id, + user_id = %user_id, + source, + "storage_ingest_permission_resolve_failed" + ); + } + } + } + warn!( + workspace_id = %event.workspace_id, + "storage_ingest_permissions_fallback_all" + ); + Ok(PermissionSet::all()) + } +} diff --git a/api/crates/application/src/core/services/storage/ingest/resolved_document.rs b/api/crates/application/src/core/services/storage/ingest/resolved_document.rs new file mode 100644 index 00000000..b8d01f8c --- /dev/null +++ b/api/crates/application/src/core/services/storage/ingest/resolved_document.rs @@ -0,0 +1,45 @@ +use super::*; +use domain::documents::doc_type::DocumentType; + +#[derive(Debug, Clone)] +pub(super) struct ResolvedDocument { + pub(super) id: Uuid, + pub(super) doc_type: DocumentType, + pub(super) path: Option, + pub(super) archived: bool, +} + +impl ResolvedDocument { + pub(super) fn new( + id: Uuid, + doc_type: DocumentType, + path: Option, + archived: bool, + ) -> Self { + Self { + id, + doc_type, + path, + archived, + } + } + + pub(super) fn is_folder(&self) -> bool { + self.doc_type.is_folder() + } + + pub(super) fn is_archived(&self) -> bool { + self.archived + } +} + +impl From for ResolvedDocument { + fn from(value: DomainDocument) -> Self { + Self::new( + value.id(), + value.doc_type(), + value.path().map(str::to_string), + value.archived_at().is_some(), + ) + } +} diff --git a/api/crates/application/src/core/services/storage/ingest/utils.rs b/api/crates/application/src/core/services/storage/ingest/utils.rs new file mode 100644 index 00000000..8d87984e --- /dev/null +++ b/api/crates/application/src/core/services/storage/ingest/utils.rs @@ -0,0 +1,16 @@ +use super::*; + +pub(super) fn previous_path_from_payload(payload: Option<&Value>) -> Option { + payload + .and_then(|p| p.get("previous_path")) + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) +} + +pub(super) fn is_not_found_error(err: &anyhow::Error) -> bool { + err.chain().any(|cause| { + cause + .downcast_ref::() + .is_some_and(|io_err| io_err.kind() == io::ErrorKind::NotFound) + }) +} diff --git a/api/crates/application/src/core/services/storage/ingest_enqueue.rs b/api/crates/application/src/core/services/storage/ingest_enqueue.rs new file mode 100644 index 00000000..28dcd3ac --- /dev/null +++ b/api/crates/application/src/core/services/storage/ingest_enqueue.rs @@ -0,0 +1,95 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use uuid::Uuid; + +use crate::core::dtos::storage_ingest::IngestBatch; +use crate::core::ports::storage::storage_ingest_queue::StorageIngestQueue; +use crate::core::services::errors::ServiceError; +use domain::documents::path::normalize_repo_path; +use domain::storage::ingest_backend::StorageIngestBackend; + +pub struct StorageIngestEnqueueService { + queue: Arc, +} + +#[async_trait] +pub trait StorageIngestEnqueueServiceFacade: Send + Sync { + async fn enqueue_batch( + &self, + workspace_id: Uuid, + user_id: Uuid, + actor_id: Option, + permission_snapshot: &[String], + batch: IngestBatch, + ) -> Result; +} + +#[async_trait] +impl StorageIngestEnqueueServiceFacade for StorageIngestEnqueueService { + async fn enqueue_batch( + &self, + workspace_id: Uuid, + user_id: Uuid, + actor_id: Option, + permission_snapshot: &[String], + batch: IngestBatch, + ) -> Result { + self.enqueue_batch(workspace_id, user_id, actor_id, permission_snapshot, batch) + .await + } +} + +impl StorageIngestEnqueueService { + pub fn new(queue: Arc) -> Self { + Self { queue } + } + + pub async fn enqueue_batch( + &self, + workspace_id: Uuid, + user_id: Uuid, + actor_id: Option, + permission_snapshot: &[String], + batch: IngestBatch, + ) -> Result { + const MAX_EVENTS: usize = 1024; + if batch.events.is_empty() { + return Err(ServiceError::BadRequest("events_required")); + } + if batch.events.len() > MAX_EVENTS { + return Err(ServiceError::BadRequest("too_many_events")); + } + + let mut processed = 0usize; + for event in batch.events { + let repo_path = event.repo_path.trim(); + if repo_path.is_empty() { + return Err(ServiceError::BadRequest("repo_path_required")); + } + let Some(clean_repo) = normalize_repo_path(repo_path) else { + return Err(ServiceError::BadRequest("invalid_repo_path")); + }; + + let backend = StorageIngestBackend::parse(event.backend.as_deref().unwrap_or("api")); + + self.queue + .enqueue_event( + workspace_id, + user_id, + actor_id, + &clean_repo, + backend, + event.kind, + event.content_hash.as_deref(), + event.payload, + permission_snapshot, + ) + .await + .map_err(ServiceError::from)?; + processed += 1; + } + + Ok(processed) + } +} diff --git a/api/crates/application/src/core/services/storage/mod.rs b/api/crates/application/src/core/services/storage/mod.rs new file mode 100644 index 00000000..d3906eb5 --- /dev/null +++ b/api/crates/application/src/core/services/storage/mod.rs @@ -0,0 +1,5 @@ +pub mod ingest; +pub mod ingest_enqueue; +pub mod projection_cache; +pub mod reconcile; +pub mod reconcile_scheduler; diff --git a/api/src/application/services/storage_projection_cache.rs b/api/crates/application/src/core/services/storage/projection_cache.rs similarity index 100% rename from api/src/application/services/storage_projection_cache.rs rename to api/crates/application/src/core/services/storage/projection_cache.rs diff --git a/api/src/application/services/storage_reconcile.rs b/api/crates/application/src/core/services/storage/reconcile/mod.rs similarity index 66% rename from api/src/application/services/storage_reconcile.rs rename to api/crates/application/src/core/services/storage/reconcile/mod.rs index b7a6d3d9..8fb1eaeb 100644 --- a/api/src/application/services/storage_reconcile.rs +++ b/api/crates/application/src/core/services/storage/reconcile/mod.rs @@ -1,28 +1,32 @@ use std::collections::HashSet; use std::sync::Arc; -use std::time::Duration; use serde_json::json; use tracing::{error, info, warn}; use uuid::Uuid; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::storage_ingest_queue::{StorageIngestKind, StorageIngestQueue}; -use crate::application::ports::storage_projection_queue::{ +use crate::core::ports::storage::storage_ingest_queue::{StorageIngestKind, StorageIngestQueue}; +use crate::core::ports::storage::storage_projection_queue::{ StorageProjectionJobKind, StorageProjectionQueue, }; -use crate::application::ports::storage_reconcile_backend::StorageReconcileBackend; -use crate::application::ports::storage_reconcile_jobs::{ +use crate::core::ports::storage::storage_reconcile_backend::StorageReconcileBackend; +use crate::core::ports::storage::storage_reconcile_jobs::{ StorageReconcileJob, StorageReconcileJobs, }; -use crate::domain::workspaces::permissions::PermissionSet; +use crate::core::services::worker::WorkerTick; +use crate::documents::ports::document_path_repository::DocumentPathRepository; +use crate::documents::ports::files::files_repository::FilesRepository; +use domain::access::permissions::PermissionSet; +use domain::storage::ingest_backend::StorageIngestBackend; -const RESERVED_REPO_PATHS: &[&str] = &[".gitignore"]; // Files managed outside Document/Files repos +mod paths; +use paths::{ + is_attachment_repo_path, is_reserved_repo_path, normalize_repo_path, reserved_storage_paths, +}; pub struct StorageReconcileService { jobs: Arc, - documents: Arc, + documents: Arc, files: Arc, ingest_queue: Arc, storage_jobs: Arc, @@ -33,7 +37,7 @@ pub struct StorageReconcileService { impl StorageReconcileService { pub fn new( jobs: Arc, - documents: Arc, + documents: Arc, files: Arc, ingest_queue: Arc, storage_jobs: Arc, @@ -74,7 +78,10 @@ impl StorageReconcileService { } async fn enumerate_storage_paths(&self, workspace_id: Uuid) -> anyhow::Result> { - self.backend.list_paths(workspace_id).await + self.backend + .list_paths(workspace_id) + .await + .map_err(Into::into) } fn repo_relative_path(workspace_id: Uuid, storage_path: &str) -> Option { @@ -105,7 +112,7 @@ impl StorageReconcileService { workspace_id, None, &repo_path, - "reconcile", + StorageIngestBackend::Reconcile, StorageIngestKind::Delete, None, Some(json!({ @@ -115,6 +122,7 @@ impl StorageReconcileService { &permissions, ) .await + .map_err(Into::into) } async fn enqueue_upsert(&self, workspace_id: Uuid, storage_path: &str) -> anyhow::Result<()> { @@ -138,7 +146,7 @@ impl StorageReconcileService { workspace_id, None, &repo_path, - "reconcile", + StorageIngestBackend::Reconcile, StorageIngestKind::Upsert, None, Some(json!({ @@ -148,6 +156,7 @@ impl StorageReconcileService { &permissions, ) .await + .map_err(Into::into) } async fn process_job(&self, job: &StorageReconcileJob) -> anyhow::Result<()> { @@ -214,14 +223,14 @@ impl StorageReconcileService { Some(doc) => { info!( workspace_id = %workspace_id, - doc_id = %doc.id, + doc_id = %doc.id(), repo_path = repo_path, "storage_reconcile_missing_doc_enqueued" ); self.storage_jobs .enqueue_doc_job( - doc.workspace_id, - doc.id, + doc.workspace_id(), + doc.id(), StorageProjectionJobKind::DocSync, Some("storage_reconcile_missing_doc"), ) @@ -238,86 +247,19 @@ impl StorageReconcileService { Ok(()) } - pub async fn run(self: Arc) { - loop { - match self.jobs.fetch_next(30).await { - Ok(Some(job)) => { - if let Err(err) = self.process_job(&job).await { - error!(error = ?err, job_id = job.id, "storage_reconcile_job_failed"); - let _ = self.jobs.fail(job.id, &format!("{err:#}")).await; - } else { - let _ = self.jobs.complete(job.id).await; - } - } - Ok(None) => tokio::time::sleep(Duration::from_secs(2)).await, - Err(err) => { - error!(error = ?err, "storage_reconcile_fetch_failed"); - tokio::time::sleep(Duration::from_secs(2)).await; + pub async fn tick(&self) -> anyhow::Result { + match self.jobs.fetch_next(30).await { + Ok(Some(job)) => { + if let Err(err) = self.process_job(&job).await { + error!(error = ?err, job_id = job.id, "storage_reconcile_job_failed"); + let _ = self.jobs.fail(job.id, &format!("{err:#}")).await; + } else { + let _ = self.jobs.complete(job.id).await; } + Ok(WorkerTick::Processed) } + Ok(None) => Ok(WorkerTick::Idle), + Err(err) => Err(err.into()), } } } - -fn reserved_storage_paths(workspace_id: Uuid) -> impl Iterator { - RESERVED_REPO_PATHS - .iter() - .map(move |rel| format!("{}/{}", workspace_id, rel.trim_start_matches('/'))) -} - -fn is_reserved_repo_path(repo_path: &str) -> bool { - let trimmed = repo_path.trim_start_matches('/'); - RESERVED_REPO_PATHS - .iter() - .any(|reserved| trimmed == reserved.trim_start_matches('/')) -} - -fn is_attachment_repo_path(repo_path: &str) -> bool { - repo_path.contains("/attachments/") -} - -fn normalize_repo_path(raw: &str) -> Option { - let replaced = raw.replace('\\', "/"); - let trimmed = replaced.trim_start_matches('/'); - if trimmed.is_empty() { - None - } else { - Some(trimmed.to_string()) - } -} - -#[cfg(test)] -mod tests { - use super::{is_reserved_repo_path, normalize_repo_path, reserved_storage_paths}; - use uuid::Uuid; - - #[test] - fn reserved_paths_are_under_workspace_root() { - let workspace = Uuid::new_v4(); - let collected: Vec = reserved_storage_paths(workspace).collect(); - assert_eq!(collected, vec![format!("{}/.gitignore", workspace)]); - } - - #[test] - fn normalize_handles_windows_paths() { - let user = Uuid::new_v4(); - let path = format!(r"{}\notes\foo.md", user); - assert_eq!( - normalize_repo_path(&path), - Some(format!("{}/notes/foo.md", user)) - ); - } - - #[test] - fn normalize_filters_empty() { - assert_eq!(normalize_repo_path(""), None); - assert_eq!(normalize_repo_path("/"), None); - } - - #[test] - fn detects_reserved_repo_path() { - assert!(is_reserved_repo_path(".gitignore")); - assert!(is_reserved_repo_path("/.gitignore")); - assert!(!is_reserved_repo_path("docs/foo.md")); - } -} diff --git a/api/crates/application/src/core/services/storage/reconcile/paths.rs b/api/crates/application/src/core/services/storage/reconcile/paths.rs new file mode 100644 index 00000000..2e9c0671 --- /dev/null +++ b/api/crates/application/src/core/services/storage/reconcile/paths.rs @@ -0,0 +1,68 @@ +use uuid::Uuid; + +use domain::documents::path::normalize_repo_path as normalize_domain_repo_path; + +const RESERVED_REPO_PATHS: &[&str] = &[".gitignore"]; // Files managed outside Document/Files repos + +pub(super) fn reserved_storage_paths(workspace_id: Uuid) -> impl Iterator { + RESERVED_REPO_PATHS + .iter() + .map(move |rel| format!("{}/{}", workspace_id, rel.trim_start_matches('/'))) +} + +pub(super) fn is_reserved_repo_path(repo_path: &str) -> bool { + let trimmed = repo_path.trim_start_matches('/'); + RESERVED_REPO_PATHS + .iter() + .any(|reserved| trimmed == reserved.trim_start_matches('/')) +} + +pub(super) fn is_attachment_repo_path(repo_path: &str) -> bool { + repo_path.contains("/attachments/") +} + +pub(super) fn normalize_repo_path(raw: &str) -> Option { + normalize_domain_repo_path(raw) +} + +#[cfg(test)] +mod tests { + use super::{is_reserved_repo_path, normalize_repo_path, reserved_storage_paths}; + use uuid::Uuid; + + #[test] + fn reserved_paths_are_under_workspace_root() { + let workspace = Uuid::new_v4(); + let collected: Vec = reserved_storage_paths(workspace).collect(); + assert_eq!(collected, vec![format!("{}/.gitignore", workspace)]); + } + + #[test] + fn normalize_handles_windows_paths() { + let user = Uuid::new_v4(); + let path = format!(r"{}\notes\foo.md", user); + assert_eq!( + normalize_repo_path(&path), + Some(format!("{}/notes/foo.md", user)) + ); + } + + #[test] + fn normalize_filters_empty() { + assert_eq!(normalize_repo_path(""), None); + assert_eq!(normalize_repo_path("/"), None); + } + + #[test] + fn normalize_rejects_traversal() { + assert_eq!(normalize_repo_path("../secret"), None); + assert_eq!(normalize_repo_path("foo/../bar"), None); + } + + #[test] + fn detects_reserved_repo_path() { + assert!(is_reserved_repo_path(".gitignore")); + assert!(is_reserved_repo_path("/.gitignore")); + assert!(!is_reserved_repo_path("docs/foo.md")); + } +} diff --git a/api/crates/application/src/core/services/storage/reconcile_scheduler.rs b/api/crates/application/src/core/services/storage/reconcile_scheduler.rs new file mode 100644 index 00000000..45b23e78 --- /dev/null +++ b/api/crates/application/src/core/services/storage/reconcile_scheduler.rs @@ -0,0 +1,41 @@ +use std::sync::Arc; + +use crate::core::ports::storage::storage_reconcile_jobs::StorageReconcileJobs; +use crate::workspaces::ports::workspace_repository::WorkspaceRepository; +use tracing::{error, info}; + +pub struct StorageReconcileScheduler { + jobs: Arc, + workspaces: Arc, +} + +impl StorageReconcileScheduler { + pub fn new( + jobs: Arc, + workspaces: Arc, + ) -> Self { + Self { jobs, workspaces } + } + + pub async fn tick(&self) { + match self.workspaces.list_all_workspace_ids().await { + Ok(ids) => { + for id in ids { + if let Err(err) = self.jobs.enqueue(id, "full").await { + error!( + error = ?err, + workspace_id = %id, + "storage_reconcile_enqueue_failed" + ); + } else { + info!(workspace_id = %id, "storage_reconcile_job_enqueued"); + } + } + } + Err(err) => error!( + error = ?err, + "storage_reconcile_scheduler_workspace_list_failed" + ), + } + } +} diff --git a/api/src/application/services/tagging/mod.rs b/api/crates/application/src/core/services/tagging/mod.rs similarity index 94% rename from api/src/application/services/tagging/mod.rs rename to api/crates/application/src/core/services/tagging/mod.rs index f49812b9..569411fd 100644 --- a/api/src/application/services/tagging/mod.rs +++ b/api/crates/application/src/core/services/tagging/mod.rs @@ -1,4 +1,4 @@ -use crate::application::ports::tagging_repository::TaggingRepository; +use crate::documents::ports::tagging::tagging_repository::TaggingRepository; use once_cell::sync::Lazy; use regex::Regex; use uuid::Uuid; diff --git a/api/src/application/utils/hash.rs b/api/crates/application/src/core/services/utils/hash.rs similarity index 100% rename from api/src/application/utils/hash.rs rename to api/crates/application/src/core/services/utils/hash.rs diff --git a/api/src/application/utils/mod.rs b/api/crates/application/src/core/services/utils/mod.rs similarity index 100% rename from api/src/application/utils/mod.rs rename to api/crates/application/src/core/services/utils/mod.rs diff --git a/api/crates/application/src/core/services/worker.rs b/api/crates/application/src/core/services/worker.rs new file mode 100644 index 00000000..054897a2 --- /dev/null +++ b/api/crates/application/src/core/services/worker.rs @@ -0,0 +1,5 @@ +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum WorkerTick { + Processed, + Idle, +} diff --git a/api/crates/application/src/core/use_cases/mod.rs b/api/crates/application/src/core/use_cases/mod.rs new file mode 100644 index 00000000..33573843 --- /dev/null +++ b/api/crates/application/src/core/use_cases/mod.rs @@ -0,0 +1 @@ +// Intentionally left empty for now. diff --git a/api/src/application/dto/document_export.rs b/api/crates/application/src/documents/dtos/document_export.rs similarity index 100% rename from api/src/application/dto/document_export.rs rename to api/crates/application/src/documents/dtos/document_export.rs diff --git a/api/src/application/dto/documents.rs b/api/crates/application/src/documents/dtos/documents.rs similarity index 90% rename from api/src/application/dto/documents.rs rename to api/crates/application/src/documents/dtos/documents.rs index 9cb500bf..cf32c788 100644 --- a/api/src/application/dto/documents.rs +++ b/api/crates/application/src/documents/dtos/documents.rs @@ -1,8 +1,8 @@ use chrono::{DateTime, Utc}; use uuid::Uuid; -use crate::application::dto::diff::TextDiffResult; -use crate::application::ports::document_snapshot_archive_repository::SnapshotArchiveRecord; +use crate::core::dtos::TextDiffResult; +use crate::documents::ports::document_snapshot_archive_repository::SnapshotArchiveRecord; #[derive(Debug, Clone, Copy, Default)] pub enum DocumentListFilter { diff --git a/api/crates/application/src/documents/dtos/mod.rs b/api/crates/application/src/documents/dtos/mod.rs new file mode 100644 index 00000000..480b3893 --- /dev/null +++ b/api/crates/application/src/documents/dtos/mod.rs @@ -0,0 +1,11 @@ +mod document_export; +mod documents; +mod public; +mod shares; +mod tags; + +pub use document_export::*; +pub use documents::*; +pub use public::*; +pub use shares::*; +pub use tags::*; diff --git a/api/src/application/dto/public.rs b/api/crates/application/src/documents/dtos/public.rs similarity index 100% rename from api/src/application/dto/public.rs rename to api/crates/application/src/documents/dtos/public.rs diff --git a/api/src/application/dto/shares.rs b/api/crates/application/src/documents/dtos/shares.rs similarity index 100% rename from api/src/application/dto/shares.rs rename to api/crates/application/src/documents/dtos/shares.rs diff --git a/api/src/application/dto/tags.rs b/api/crates/application/src/documents/dtos/tags.rs similarity index 100% rename from api/src/application/dto/tags.rs rename to api/crates/application/src/documents/dtos/tags.rs diff --git a/api/crates/application/src/documents/mod.rs b/api/crates/application/src/documents/mod.rs new file mode 100644 index 00000000..2e8e16cf --- /dev/null +++ b/api/crates/application/src/documents/mod.rs @@ -0,0 +1,4 @@ +pub mod dtos; +pub mod ports; +pub mod services; +pub mod use_cases; diff --git a/api/src/application/ports/access_repository.rs b/api/crates/application/src/documents/ports/access_repository.rs similarity index 55% rename from api/src/application/ports/access_repository.rs rename to api/crates/application/src/documents/ports/access_repository.rs index a9209474..de6884b4 100644 --- a/api/src/application/ports/access_repository.rs +++ b/api/crates/application/src/documents/ports/access_repository.rs @@ -1,7 +1,8 @@ use async_trait::async_trait; use uuid::Uuid; -use crate::domain::workspaces::permissions::PermissionSet; +use crate::core::ports::errors::PortResult; +use domain::access::permissions::PermissionSet; #[derive(Debug, Clone)] pub struct DocumentUserAccess { @@ -16,7 +17,7 @@ pub trait AccessRepository: Send + Sync { &self, doc_id: Uuid, user_id: Uuid, - ) -> anyhow::Result>; - async fn is_document_public(&self, doc_id: Uuid) -> anyhow::Result; - async fn is_document_archived(&self, doc_id: Uuid) -> anyhow::Result; + ) -> PortResult>; + async fn is_document_public(&self, doc_id: Uuid) -> PortResult; + async fn is_document_archived(&self, doc_id: Uuid) -> PortResult; } diff --git a/api/crates/application/src/documents/ports/doc_event_log.rs b/api/crates/application/src/documents/ports/doc_event_log.rs new file mode 100644 index 00000000..eb5fbb54 --- /dev/null +++ b/api/crates/application/src/documents/ports/doc_event_log.rs @@ -0,0 +1,16 @@ +use async_trait::async_trait; +use serde_json::Value; +use uuid::Uuid; + +use crate::core::ports::errors::PortResult; + +#[async_trait] +pub trait DocEventLog: Send + Sync { + async fn append( + &self, + workspace_id: Uuid, + doc_id: Uuid, + event_type: &str, + payload: Option, + ) -> PortResult<()>; +} diff --git a/api/src/application/ports/document_exporter.rs b/api/crates/application/src/documents/ports/document_exporter.rs similarity index 77% rename from api/src/application/ports/document_exporter.rs rename to api/crates/application/src/documents/ports/document_exporter.rs index 2a72f61b..8e41a59b 100644 --- a/api/src/application/ports/document_exporter.rs +++ b/api/crates/application/src/documents/ports/document_exporter.rs @@ -1,6 +1,7 @@ use async_trait::async_trait; -use crate::application::dto::document_export::{DocumentDownload, DocumentDownloadFormat}; +use crate::core::ports::errors::PortResult; +use crate::documents::dtos::{DocumentDownload, DocumentDownloadFormat}; #[derive(Debug, Clone)] pub struct DocumentExportAttachment { @@ -22,5 +23,5 @@ pub trait DocumentExporter: Send + Sync { &self, assets: DocumentExportAssets, format: DocumentDownloadFormat, - ) -> anyhow::Result; + ) -> PortResult; } diff --git a/api/crates/application/src/documents/ports/document_path_repository.rs b/api/crates/application/src/documents/ports/document_path_repository.rs new file mode 100644 index 00000000..37ea8571 --- /dev/null +++ b/api/crates/application/src/documents/ports/document_path_repository.rs @@ -0,0 +1,23 @@ +use async_trait::async_trait; +use uuid::Uuid; + +use crate::core::ports::errors::PortResult; +use domain::documents::document::Document as DomainDocument; + +#[async_trait] +pub trait DocumentPathRepository: Send + Sync { + async fn list_paths_for_user(&self, workspace_id: Uuid) -> PortResult>; + + async fn get_by_owner_and_path( + &self, + workspace_id: Uuid, + relative_path: &str, + ) -> PortResult>; + + async fn update_repo_path( + &self, + doc_id: Uuid, + workspace_id: Uuid, + relative_path: &str, + ) -> PortResult<()>; +} diff --git a/api/crates/application/src/documents/ports/document_repository.rs b/api/crates/application/src/documents/ports/document_repository.rs new file mode 100644 index 00000000..403bb5a4 --- /dev/null +++ b/api/crates/application/src/documents/ports/document_repository.rs @@ -0,0 +1,189 @@ +use async_trait::async_trait; +use thiserror::Error; +use uuid::Uuid; + +use domain::documents::doc_type::DocumentType; +use domain::documents::document::Document as DomainDocument; +use domain::documents::document::SearchHit; +pub use domain::documents::meta::DocMeta; +use domain::documents::path::{DesiredPath, Slug}; +use domain::documents::title::Title; + +#[derive(Debug, Error)] +pub enum DocumentRepositoryError { + #[error("document path conflict")] + PathConflict, + #[error(transparent)] + Unexpected(#[from] anyhow::Error), +} + +pub type DocumentRepoResult = Result; + +impl From for crate::core::services::errors::ServiceError { + fn from(err: DocumentRepositoryError) -> Self { + match err { + DocumentRepositoryError::PathConflict => Self::Conflict, + DocumentRepositoryError::Unexpected(inner) => Self::Unexpected(inner), + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum DocumentListState { + #[default] + Active, + Archived, + All, +} + +#[async_trait] +pub trait DocumentRepository: Send + Sync { + async fn list_for_user( + &self, + workspace_id: Uuid, + query: Option, + tag: Option, + state: DocumentListState, + ) -> DocumentRepoResult>; + + async fn list_ids_for_user(&self, workspace_id: Uuid) -> DocumentRepoResult>; + + async fn list_workspace_documents( + &self, + workspace_id: Uuid, + ) -> DocumentRepoResult>; + + async fn get_by_id(&self, id: Uuid) -> DocumentRepoResult>; + + async fn search_for_user( + &self, + workspace_id: Uuid, + query: Option, + limit: i64, + ) -> DocumentRepoResult>; + + #[allow(clippy::too_many_arguments)] + async fn create_for_user( + &self, + workspace_id: Uuid, + created_by: Uuid, + title: &Title, + parent_id: Option, + doc_type: DocumentType, + created_by_plugin: Option<&str>, + slug: &Slug, + desired_path: &DesiredPath, + ) -> DocumentRepoResult; + + // parent_id: None => not provided; Some(None) => set NULL; Some(Some(uuid)) => set to value + #[allow(clippy::too_many_arguments)] + async fn update_title_and_parent_for_user( + &self, + id: Uuid, + workspace_id: Uuid, + title: &Title, + parent_id: Option>, + slug: &Slug, + desired_path: &DesiredPath, + ) -> DocumentRepoResult>; + + // Returns Some(type) if deleted, None if not found/unauthorized + async fn delete_owned( + &self, + id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult>; + + // Lightweight meta for ownership-scoped queries + async fn get_meta_for_owner( + &self, + doc_id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult>; + + async fn archive_subtree( + &self, + doc_id: Uuid, + workspace_id: Uuid, + archived_by: Uuid, + ) -> DocumentRepoResult>; + + async fn unarchive_subtree( + &self, + doc_id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult>; + + async fn list_owned_subtree_documents( + &self, + workspace_id: Uuid, + root_id: Uuid, + ) -> DocumentRepoResult>; +} + +#[async_trait] +pub trait DocumentRepositoryTx: Send { + #[allow(clippy::too_many_arguments)] + async fn create_for_user( + &mut self, + workspace_id: Uuid, + created_by: Uuid, + title: &Title, + parent_id: Option, + doc_type: DocumentType, + created_by_plugin: Option<&str>, + slug: &Slug, + desired_path: &DesiredPath, + ) -> DocumentRepoResult; + + // parent_id: None => not provided; Some(None) => set NULL; Some(Some(uuid)) => set to value + #[allow(clippy::too_many_arguments)] + async fn update_title_and_parent_for_user( + &mut self, + id: Uuid, + workspace_id: Uuid, + title: &Title, + parent_id: Option>, + slug: &Slug, + desired_path: &DesiredPath, + ) -> DocumentRepoResult>; + + // Returns Some(type) if deleted, None if not found/unauthorized + async fn delete_owned( + &mut self, + id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult>; + + // Lightweight meta for ownership-scoped queries + async fn get_meta_for_owner( + &mut self, + doc_id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult>; + + async fn archive_subtree( + &mut self, + doc_id: Uuid, + workspace_id: Uuid, + archived_by: Uuid, + ) -> DocumentRepoResult>; + + async fn unarchive_subtree( + &mut self, + doc_id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult>; + + async fn list_owned_subtree_documents( + &mut self, + workspace_id: Uuid, + root_id: Uuid, + ) -> DocumentRepoResult>; +} + +#[derive(Debug, Clone)] +pub struct SubtreeDocument { + pub id: Uuid, + pub doc_type: DocumentType, +} diff --git a/api/src/application/ports/document_snapshot_archive_repository.rs b/api/crates/application/src/documents/ports/document_snapshot_archive_repository.rs similarity index 68% rename from api/src/application/ports/document_snapshot_archive_repository.rs rename to api/crates/application/src/documents/ports/document_snapshot_archive_repository.rs index d2c1e701..3640c30f 100644 --- a/api/src/application/ports/document_snapshot_archive_repository.rs +++ b/api/crates/application/src/documents/ports/document_snapshot_archive_repository.rs @@ -2,6 +2,8 @@ use async_trait::async_trait; use chrono::{DateTime, Utc}; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Clone)] pub struct SnapshotArchiveInsert<'a> { pub document_id: &'a Uuid, @@ -29,26 +31,28 @@ pub struct SnapshotArchiveRecord { pub content_hash: String, } +#[derive(Debug, Clone)] +pub struct SnapshotArchiveEntry { + pub record: SnapshotArchiveRecord, + pub bytes: Vec, +} + #[async_trait] pub trait DocumentSnapshotArchiveRepository: Send + Sync { - async fn insert( - &self, - input: SnapshotArchiveInsert<'_>, - ) -> anyhow::Result; + async fn insert(&self, input: SnapshotArchiveInsert<'_>) -> PortResult; - async fn get_by_id(&self, id: Uuid) - -> anyhow::Result)>>; + async fn get_by_id(&self, id: Uuid) -> PortResult>; async fn list_for_document( &self, doc_id: Uuid, limit: i64, offset: i64, - ) -> anyhow::Result>; + ) -> PortResult>; async fn latest_before( &self, doc_id: Uuid, version: i64, - ) -> anyhow::Result)>>; + ) -> PortResult>; } diff --git a/api/crates/application/src/documents/ports/files/files_repository.rs b/api/crates/application/src/documents/ports/files/files_repository.rs new file mode 100644 index 00000000..ddede632 --- /dev/null +++ b/api/crates/application/src/documents/ports/files/files_repository.rs @@ -0,0 +1,81 @@ +use async_trait::async_trait; +use uuid::Uuid; + +use crate::core::ports::errors::PortResult; + +#[derive(Debug, Clone)] +pub struct FileMeta { + pub storage_path: String, + pub content_type: Option, + pub document_id: Uuid, + pub workspace_id: Uuid, +} + +#[derive(Debug, Clone)] +pub struct FilePathMeta { + pub storage_path: String, + pub content_type: Option, +} + +#[derive(Debug, Clone)] +pub struct StoredFileScope { + pub file_id: Uuid, + pub document_id: Uuid, + pub workspace_id: Uuid, +} + +#[async_trait] +pub trait FilesRepository: Send + Sync { + async fn is_workspace_document(&self, doc_id: Uuid, workspace_id: Uuid) -> PortResult; + async fn insert_file( + &self, + doc_id: Uuid, + filename: &str, + content_type: Option<&str>, + size: i64, + storage_path: &str, + content_hash: &str, + ) -> PortResult; + async fn get_file_meta(&self, file_id: Uuid) -> PortResult>; + async fn get_file_path_by_doc_and_name( + &self, + doc_id: Uuid, + filename: &str, + ) -> PortResult>; + + async fn list_storage_paths_for_document(&self, doc_id: Uuid) -> PortResult>; + + async fn list_files_for_document(&self, doc_id: Uuid) -> PortResult>; + + async fn list_storage_paths_for_workspace(&self, workspace_id: Uuid) + -> PortResult>; + + async fn find_by_storage_path(&self, storage_path: &str) + -> PortResult>; + + async fn update_storage_path(&self, file_id: Uuid, storage_path: &str) -> PortResult<()>; + + async fn update_hash_and_size( + &self, + file_id: Uuid, + size: i64, + content_hash: &str, + ) -> PortResult<()>; + + async fn delete_by_id(&self, file_id: Uuid) -> PortResult<()>; +} + +#[async_trait] +pub trait FilesRepositoryTx: Send { + async fn list_storage_paths_for_document(&mut self, doc_id: Uuid) -> PortResult>; +} + +#[derive(Debug, Clone)] +pub struct FileRecord { + pub id: Uuid, + pub filename: String, + pub content_type: Option, + pub size: i64, + pub storage_path: String, + pub content_hash: String, +} diff --git a/api/crates/application/src/documents/ports/files/mod.rs b/api/crates/application/src/documents/ports/files/mod.rs new file mode 100644 index 00000000..c356d3be --- /dev/null +++ b/api/crates/application/src/documents/ports/files/mod.rs @@ -0,0 +1 @@ +pub mod files_repository; diff --git a/api/crates/application/src/documents/ports/linkgraph_repository.rs b/api/crates/application/src/documents/ports/linkgraph_repository.rs new file mode 100644 index 00000000..e3bced88 --- /dev/null +++ b/api/crates/application/src/documents/ports/linkgraph_repository.rs @@ -0,0 +1,37 @@ +use async_trait::async_trait; +use uuid::Uuid; + +use crate::core::ports::errors::PortResult; +use domain::documents::document::{BacklinkInfo, OutgoingLink}; + +#[async_trait] +pub trait LinkGraphRepository: Send + Sync { + async fn clear_links_for_source(&self, source_id: Uuid) -> PortResult<()>; + async fn exists_doc_for_owner(&self, doc_id: Uuid, owner_id: Uuid) -> PortResult; + async fn find_doc_id_by_owner_and_title( + &self, + owner_id: Uuid, + title: &str, + ) -> PortResult>; + async fn upsert_link( + &self, + source_id: Uuid, + target_id: Uuid, + link_type: &str, + link_text: Option, + position_start: i32, + position_end: i32, + ) -> PortResult<()>; + + async fn backlinks_for( + &self, + workspace_id: Uuid, + target_id: Uuid, + ) -> PortResult>; + + async fn outgoing_links_for( + &self, + workspace_id: Uuid, + source_id: Uuid, + ) -> PortResult>; +} diff --git a/api/crates/application/src/documents/ports/mod.rs b/api/crates/application/src/documents/ports/mod.rs new file mode 100644 index 00000000..09fd2b4b --- /dev/null +++ b/api/crates/application/src/documents/ports/mod.rs @@ -0,0 +1,13 @@ +pub mod access_repository; +pub mod doc_event_log; +pub mod document_exporter; +pub mod document_path_repository; +pub mod document_repository; +pub mod document_snapshot_archive_repository; +pub mod files; +pub mod linkgraph_repository; +pub mod publishing; +pub mod realtime; +pub mod sharing; +pub mod tagging; +pub mod tx_runner; diff --git a/api/crates/application/src/documents/ports/publishing/mod.rs b/api/crates/application/src/documents/ports/publishing/mod.rs new file mode 100644 index 00000000..648c8e88 --- /dev/null +++ b/api/crates/application/src/documents/ports/publishing/mod.rs @@ -0,0 +1 @@ +pub mod public_repository; diff --git a/api/crates/application/src/documents/ports/publishing/public_repository.rs b/api/crates/application/src/documents/ports/publishing/public_repository.rs new file mode 100644 index 00000000..5c8d8cfc --- /dev/null +++ b/api/crates/application/src/documents/ports/publishing/public_repository.rs @@ -0,0 +1,57 @@ +use async_trait::async_trait; +use uuid::Uuid; + +use crate::core::ports::errors::PortResult; +use domain::documents::document::Document; + +#[derive(Debug, Clone)] +pub struct WorkspaceTitleAndSlug { + pub title: String, + pub workspace_slug: String, +} + +#[derive(Debug, Clone)] +pub struct PublishStatusRow { + pub slug: String, + pub workspace_slug: String, +} + +#[derive(Debug, Clone)] +pub struct PublicDocumentSummaryRow { + pub id: Uuid, + pub title: String, + pub updated_at: chrono::DateTime, + pub published_at: chrono::DateTime, +} + +#[async_trait] +pub trait PublicRepository: Send + Sync { + async fn ensure_workspace_title_and_slug( + &self, + doc_id: Uuid, + workspace_id: Uuid, + ) -> PortResult>; + async fn upsert_public_document(&self, doc_id: Uuid, slug: &str) -> PortResult<()>; + async fn slug_exists(&self, slug: &str) -> PortResult; + async fn is_workspace_document(&self, doc_id: Uuid, workspace_id: Uuid) -> PortResult; + async fn delete_public_document(&self, doc_id: Uuid) -> PortResult; + async fn get_publish_status( + &self, + workspace_id: Uuid, + doc_id: Uuid, + ) -> PortResult>; + async fn list_workspace_public_documents( + &self, + workspace_slug: &str, + ) -> PortResult>; + async fn get_public_meta_by_workspace_and_id( + &self, + workspace_slug: &str, + doc_id: Uuid, + ) -> PortResult>; + async fn public_exists_by_workspace_and_id( + &self, + workspace_slug: &str, + doc_id: Uuid, + ) -> PortResult; +} diff --git a/api/src/application/ports/awareness_port.rs b/api/crates/application/src/documents/ports/realtime/awareness_port.rs similarity index 69% rename from api/src/application/ports/awareness_port.rs rename to api/crates/application/src/documents/ports/realtime/awareness_port.rs index 0261f2b5..cc40771c 100644 --- a/api/src/application/ports/awareness_port.rs +++ b/api/crates/application/src/documents/ports/realtime/awareness_port.rs @@ -1,6 +1,8 @@ use async_trait::async_trait; +use crate::core::ports::errors::PortResult; + #[async_trait] pub trait AwarenessPublisher: Send + Sync { - async fn publish_awareness(&self, doc_id: &str, frame: Vec) -> anyhow::Result<()>; + async fn publish_awareness(&self, doc_id: &str, frame: Vec) -> PortResult<()>; } diff --git a/api/crates/application/src/documents/ports/realtime/mod.rs b/api/crates/application/src/documents/ports/realtime/mod.rs new file mode 100644 index 00000000..8a518ac2 --- /dev/null +++ b/api/crates/application/src/documents/ports/realtime/mod.rs @@ -0,0 +1,5 @@ +pub mod awareness_port; +pub mod realtime_hydration_port; +pub mod realtime_persistence_port; +pub mod realtime_port; +pub mod realtime_types; diff --git a/api/src/application/ports/realtime_hydration_port.rs b/api/crates/application/src/documents/ports/realtime/realtime_hydration_port.rs similarity index 69% rename from api/src/application/ports/realtime_hydration_port.rs rename to api/crates/application/src/documents/ports/realtime/realtime_hydration_port.rs index 5fb7c254..e513a414 100644 --- a/api/src/application/ports/realtime_hydration_port.rs +++ b/api/crates/application/src/documents/ports/realtime/realtime_hydration_port.rs @@ -1,6 +1,9 @@ use async_trait::async_trait; use uuid::Uuid; +use crate::core::ports::errors::PortResult; +use domain::documents::doc_type::DocumentType; + #[derive(Debug, Clone)] pub struct DocSnapshot { pub version: i64, @@ -21,7 +24,7 @@ pub struct StreamFrame { #[derive(Debug, Clone)] pub struct DocumentRecord { - pub doc_type: String, + pub doc_type: DocumentType, pub path: Option, pub desired_path: Option, pub title: String, @@ -31,11 +34,11 @@ pub struct DocumentRecord { #[async_trait] pub trait DocStateReader: Send + Sync { - async fn latest_snapshot(&self, doc_id: &Uuid) -> anyhow::Result>; + async fn latest_snapshot(&self, doc_id: &Uuid) -> PortResult>; - async fn updates_since(&self, doc_id: &Uuid, from_seq: i64) -> anyhow::Result>; + async fn updates_since(&self, doc_id: &Uuid, from_seq: i64) -> PortResult>; - async fn document_record(&self, doc_id: &Uuid) -> anyhow::Result>; + async fn document_record(&self, doc_id: &Uuid) -> PortResult>; } #[async_trait] @@ -44,11 +47,11 @@ pub trait RealtimeBacklogReader: Send + Sync { &self, doc_id: &str, last_stream_id: Option<&str>, - ) -> anyhow::Result>; + ) -> PortResult>; async fn read_awareness_backlog( &self, doc_id: &str, last_stream_id: Option<&str>, - ) -> anyhow::Result>; + ) -> PortResult>; } diff --git a/api/src/application/ports/realtime_persistence_port.rs b/api/crates/application/src/documents/ports/realtime/realtime_persistence_port.rs similarity index 56% rename from api/src/application/ports/realtime_persistence_port.rs rename to api/crates/application/src/documents/ports/realtime/realtime_persistence_port.rs index 05dffad0..86bb797b 100644 --- a/api/src/application/ports/realtime_persistence_port.rs +++ b/api/crates/application/src/documents/ports/realtime/realtime_persistence_port.rs @@ -3,6 +3,8 @@ use futures_util::stream::BoxStream; use thiserror::Error; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Error)] #[error("document_missing")] pub struct DocumentMissingError { @@ -15,6 +17,12 @@ pub struct PersistenceTask { pub document_id: Uuid, } +#[derive(Debug, Clone)] +pub struct SnapshotEntry { + pub version: i64, + pub bytes: Vec, +} + #[async_trait] pub trait DocPersistencePort: Send + Sync { async fn append_update_with_seq( @@ -22,26 +30,26 @@ pub trait DocPersistencePort: Send + Sync { doc_id: &Uuid, seq: i64, update: &[u8], - ) -> anyhow::Result<()>; + ) -> PortResult<()>; - async fn latest_update_seq(&self, doc_id: &Uuid) -> anyhow::Result>; + async fn latest_update_seq(&self, doc_id: &Uuid) -> PortResult>; async fn persist_snapshot( &self, doc_id: &Uuid, version: i64, snapshot: &[u8], - ) -> anyhow::Result<()>; + ) -> PortResult<()>; - async fn latest_snapshot_entry(&self, doc_id: &Uuid) -> anyhow::Result)>>; + async fn latest_snapshot_entry(&self, doc_id: &Uuid) -> PortResult>; - async fn latest_snapshot_version(&self, doc_id: &Uuid) -> anyhow::Result>; + async fn latest_snapshot_version(&self, doc_id: &Uuid) -> PortResult>; - async fn prune_snapshots(&self, doc_id: &Uuid, keep_latest: i64) -> anyhow::Result<()>; + async fn prune_snapshots(&self, doc_id: &Uuid, keep_latest: i64) -> PortResult<()>; - async fn prune_updates_before(&self, doc_id: &Uuid, seq_inclusive: i64) -> anyhow::Result<()>; + async fn prune_updates_before(&self, doc_id: &Uuid, seq_inclusive: i64) -> PortResult<()>; - async fn clear_updates(&self, doc_id: &Uuid) -> anyhow::Result<()>; + async fn clear_updates(&self, doc_id: &Uuid) -> PortResult<()>; } #[async_trait] @@ -49,7 +57,7 @@ pub trait PersistenceTaskConsumerPort: Send + Sync { async fn subscribe_tasks( &self, start_id: Option, - ) -> anyhow::Result>>; + ) -> PortResult>>; - async fn ack_task(&self, entry_id: &str) -> anyhow::Result<()>; + async fn ack_task(&self, entry_id: &str) -> PortResult<()>; } diff --git a/api/src/application/ports/realtime_port.rs b/api/crates/application/src/documents/ports/realtime/realtime_port.rs similarity index 74% rename from api/src/application/ports/realtime_port.rs rename to api/crates/application/src/documents/ports/realtime/realtime_port.rs index e3fad58a..d41a0feb 100644 --- a/api/src/application/ports/realtime_port.rs +++ b/api/crates/application/src/documents/ports/realtime/realtime_port.rs @@ -1,6 +1,8 @@ use async_trait::async_trait; use std::fmt; +use crate::core::ports::errors::PortResult; + #[derive(Debug)] pub struct RealtimeError(Box); @@ -31,19 +33,19 @@ pub trait RealtimeEngine: Send + Sync { sink: DynRealtimeSink, stream: DynRealtimeStream, can_edit: bool, - ) -> anyhow::Result<()>; + ) -> PortResult<()>; - async fn get_content(&self, doc_id: &str) -> anyhow::Result>; + async fn get_content(&self, doc_id: &str) -> PortResult>; - async fn force_persist(&self, doc_id: &str) -> anyhow::Result<()>; + async fn force_persist(&self, doc_id: &str) -> PortResult<()>; - async fn force_save_to_fs(&self, doc_id: &str) -> anyhow::Result<()> { + async fn force_save_to_fs(&self, doc_id: &str) -> PortResult<()> { self.force_persist(doc_id).await } - async fn apply_snapshot(&self, doc_id: &str, snapshot: &[u8]) -> anyhow::Result<()>; + async fn apply_snapshot(&self, doc_id: &str, snapshot: &[u8]) -> PortResult<()>; - async fn set_document_editable(&self, _doc_id: &str, _editable: bool) -> anyhow::Result<()> { + async fn set_document_editable(&self, _doc_id: &str, _editable: bool) -> PortResult<()> { Ok(()) } } diff --git a/api/src/application/ports/realtime_types.rs b/api/crates/application/src/documents/ports/realtime/realtime_types.rs similarity index 64% rename from api/src/application/ports/realtime_types.rs rename to api/crates/application/src/documents/ports/realtime/realtime_types.rs index 9894e937..d974de89 100644 --- a/api/src/application/ports/realtime_types.rs +++ b/api/crates/application/src/documents/ports/realtime/realtime_types.rs @@ -1,12 +1,10 @@ use std::pin::Pin; -use std::sync::Arc; use futures_util::{Sink, Stream}; -use tokio::sync::Mutex; use super::realtime_port::RealtimeError; pub type DynRealtimeSink = - Arc, Error = RealtimeError> + Send + Sync + 'static>>>>; + Pin, Error = RealtimeError> + Send + Sync + 'static>>; pub type DynRealtimeStream = Pin, RealtimeError>> + Send + Sync + 'static>>; diff --git a/api/crates/application/src/documents/ports/sharing/mod.rs b/api/crates/application/src/documents/ports/sharing/mod.rs new file mode 100644 index 00000000..2943b858 --- /dev/null +++ b/api/crates/application/src/documents/ports/sharing/mod.rs @@ -0,0 +1,2 @@ +pub mod share_access_port; +pub mod shares_repository; diff --git a/api/crates/application/src/documents/ports/sharing/share_access_port.rs b/api/crates/application/src/documents/ports/sharing/share_access_port.rs new file mode 100644 index 00000000..387fa712 --- /dev/null +++ b/api/crates/application/src/documents/ports/sharing/share_access_port.rs @@ -0,0 +1,16 @@ +use async_trait::async_trait; +use uuid::Uuid; + +use crate::core::ports::errors::PortResult; +use domain::documents::share::{ShareContext, SharePermission}; + +#[async_trait] +pub trait ShareAccessPort: Send + Sync { + async fn resolve_share_by_token(&self, token: &str) -> PortResult>; + + async fn get_materialized_permission( + &self, + parent_share_id: Uuid, + doc_id: Uuid, + ) -> PortResult>; +} diff --git a/api/crates/application/src/documents/ports/sharing/shares_repository.rs b/api/crates/application/src/documents/ports/sharing/shares_repository.rs new file mode 100644 index 00000000..03108e54 --- /dev/null +++ b/api/crates/application/src/documents/ports/sharing/shares_repository.rs @@ -0,0 +1,136 @@ +use async_trait::async_trait; +use uuid::Uuid; + +use crate::core::ports::errors::PortResult; +use chrono::{DateTime, Utc}; +use domain::documents::doc_type::DocumentType; +use domain::documents::share::{ShareContext, SharePermission}; +use domain::documents::title::Title; + +#[derive(Debug, Clone)] +pub struct ShareRow { + pub id: Uuid, + pub token: String, + pub permission: SharePermission, + pub expires_at: Option>, + pub parent_share_id: Option, + pub document_id: Uuid, + pub document_type: DocumentType, + pub document_title: Title, + pub created_at: chrono::DateTime, +} + +#[derive(Debug, Clone)] +pub struct ShareMountRow { + pub id: Uuid, + pub token: String, + pub target_document_id: Uuid, + pub target_document_type: DocumentType, + pub target_title: Title, + pub permission: SharePermission, + pub parent_folder_id: Option, + pub created_at: chrono::DateTime, +} + +#[derive(Debug, Clone)] +pub struct CreatedShare { + pub token: String, + pub share_id: Uuid, + pub document_type: DocumentType, +} + +#[derive(Debug, Clone)] +pub struct ShareTokenValidation { + pub document_id: Uuid, + pub permission: SharePermission, + pub expires_at: Option>, + pub title: Title, +} + +#[derive(Debug, Clone)] +pub struct ApplicableShareRow { + pub token: String, + pub permission: SharePermission, + pub expires_at: Option>, +} + +#[derive(Debug, Clone)] +pub struct ShareDocumentMeta { + pub document_id: Uuid, + pub owner_id: Uuid, + pub workspace_id: Uuid, +} + +#[derive(Debug, Clone)] +pub struct ShareSubtreeNode { + pub id: Uuid, + pub title: Title, + pub document_type: DocumentType, + pub parent_id: Option, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +#[async_trait] +pub trait SharesRepository: Send + Sync { + async fn create_share( + &self, + workspace_id: Uuid, + actor_id: Uuid, + document_id: Uuid, + permission: SharePermission, + expires_at: Option>, + ) -> PortResult; + + async fn list_document_shares( + &self, + workspace_id: Uuid, + document_id: Uuid, + ) -> PortResult>; + + async fn delete_share(&self, workspace_id: Uuid, token: &str) -> PortResult; + + async fn validate_share_token(&self, token: &str) -> PortResult>; + + async fn list_applicable_shares_for_doc( + &self, + workspace_id: Uuid, + doc_id: Uuid, + ) -> PortResult>; + + async fn list_active_shares(&self, workspace_id: Uuid) -> PortResult>; + + async fn resolve_share_by_token(&self, token: &str) -> PortResult>; + + async fn list_share_mounts(&self, workspace_id: Uuid) -> PortResult>; + + #[allow(clippy::too_many_arguments)] + async fn create_share_mount( + &self, + workspace_id: Uuid, + actor_id: Uuid, + token: &str, + target_document_id: Uuid, + target_document_type: DocumentType, + target_title: Title, + permission: SharePermission, + parent_folder_id: Option, + ) -> PortResult; + + async fn delete_share_mount(&self, workspace_id: Uuid, mount_id: Uuid) -> PortResult; + + async fn get_share_document_meta(&self, token: &str) -> PortResult>; + + async fn list_subtree_nodes(&self, root_id: Uuid) -> PortResult>; + + async fn list_materialized_children(&self, parent_share_id: Uuid) -> PortResult>; + + async fn materialize_folder_share( + &self, + workspace_id: Uuid, + actor_id: Uuid, + token: &str, + ) -> PortResult; + + async fn revoke_subtree_shares(&self, workspace_id: Uuid, root_id: Uuid) -> PortResult; +} diff --git a/api/crates/application/src/documents/ports/tagging/mod.rs b/api/crates/application/src/documents/ports/tagging/mod.rs new file mode 100644 index 00000000..5a5457fe --- /dev/null +++ b/api/crates/application/src/documents/ports/tagging/mod.rs @@ -0,0 +1,2 @@ +pub mod tag_repository; +pub mod tagging_repository; diff --git a/api/src/application/ports/tag_repository.rs b/api/crates/application/src/documents/ports/tagging/tag_repository.rs similarity index 52% rename from api/src/application/ports/tag_repository.rs rename to api/crates/application/src/documents/ports/tagging/tag_repository.rs index 71dce57f..18dbd37c 100644 --- a/api/src/application/ports/tag_repository.rs +++ b/api/crates/application/src/documents/ports/tagging/tag_repository.rs @@ -1,11 +1,19 @@ use async_trait::async_trait; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + +#[derive(Debug, Clone)] +pub struct TagSummary { + pub name: String, + pub count: i64, +} + #[async_trait] pub trait TagRepository: Send + Sync { async fn list_tags( &self, owner_id: Uuid, filter: Option, - ) -> anyhow::Result>; + ) -> PortResult>; } diff --git a/api/crates/application/src/documents/ports/tagging/tagging_repository.rs b/api/crates/application/src/documents/ports/tagging/tagging_repository.rs new file mode 100644 index 00000000..4748355f --- /dev/null +++ b/api/crates/application/src/documents/ports/tagging/tagging_repository.rs @@ -0,0 +1,12 @@ +use async_trait::async_trait; +use uuid::Uuid; + +use crate::core::ports::errors::PortResult; + +#[async_trait] +pub trait TaggingRepository: Send + Sync { + async fn clear_document_tags(&self, doc_id: Uuid) -> PortResult<()>; + async fn upsert_tag_return_id(&self, name: &str) -> PortResult; + async fn owner_doc_exists(&self, doc_id: Uuid, owner_id: Uuid) -> PortResult; + async fn associate_document_tag(&self, doc_id: Uuid, tag_id: i64) -> PortResult<()>; +} diff --git a/api/crates/application/src/documents/ports/tx_runner.rs b/api/crates/application/src/documents/ports/tx_runner.rs new file mode 100644 index 00000000..d22dc8cb --- /dev/null +++ b/api/crates/application/src/documents/ports/tx_runner.rs @@ -0,0 +1,53 @@ +use std::any::Any; +use std::future::Future; +use std::pin::Pin; + +use anyhow::anyhow; +use async_trait::async_trait; + +use crate::core::ports::storage::storage_projection_queue::StorageProjectionQueueTx; +use crate::documents::ports::document_repository::DocumentRepositoryTx; +use crate::documents::ports::files::files_repository::FilesRepositoryTx; + +pub type BoxFuture<'a, T> = Pin + Send + 'a>>; +pub type BoxedTxResult = Box; +pub type DocumentsTxFuture<'tx> = BoxFuture<'tx, anyhow::Result>; +pub type DocumentsTxFn = + Box FnOnce(&'tx mut dyn DocumentsTx) -> DocumentsTxFuture<'tx> + Send>; + +pub trait DocumentsTx: Send { + fn documents(&mut self) -> &mut dyn DocumentRepositoryTx; + fn files(&mut self) -> &mut dyn FilesRepositoryTx; + fn storage_jobs(&mut self) -> &mut dyn StorageProjectionQueueTx; +} + +#[async_trait] +pub trait DocumentsTxRunner: Send + Sync { + async fn run_boxed(&self, f: DocumentsTxFn) -> anyhow::Result; +} + +pub async fn run_in_tx(runner: &dyn DocumentsTxRunner, f: F) -> anyhow::Result +where + T: Send + 'static, + F: for<'tx> FnOnce(&'tx mut dyn DocumentsTx) -> BoxFuture<'tx, anyhow::Result> + + Send + + 'static, +{ + let mut f = Some(f); + let result = runner + .run_boxed(Box::new(move |tx| { + let f = f + .take() + .expect("DocumentsTx closure must be called exactly once"); + Box::pin(async move { + let out = f(tx).await?; + Ok(Box::new(out) as BoxedTxResult) + }) + })) + .await?; + + result + .downcast::() + .map(|v| *v) + .map_err(|_| anyhow!("documents tx runner output type mismatch")) +} diff --git a/api/crates/application/src/documents/services/attachments.rs b/api/crates/application/src/documents/services/attachments.rs new file mode 100644 index 00000000..2b97c0c6 --- /dev/null +++ b/api/crates/application/src/documents/services/attachments.rs @@ -0,0 +1,131 @@ +use serde_json::json; +use tracing::warn; +use uuid::Uuid; + +use domain::documents::document::Document as DomainDocument; +use domain::documents::path as doc_path; + +use crate::core::services::errors::ServiceError; +use crate::core::services::utils::hash::sha256_hex; + +use super::DocumentService; + +#[derive(Debug, Clone)] +pub(super) struct AttachmentSnapshot { + filename: String, + content_type: Option, + bytes: Vec, + content_hash: String, +} + +impl DocumentService { + pub(super) async fn snapshot_attachments( + &self, + doc_id: Uuid, + ) -> Result, ServiceError> { + let files = self + .files_repo + .list_files_for_document(doc_id) + .await + .map_err(ServiceError::from)?; + let mut snapshots = Vec::new(); + for file in files { + let abs_path = self.storage.absolute_from_relative(&file.storage_path); + let exists = self + .storage + .exists(&abs_path) + .await + .map_err(ServiceError::from)?; + if !exists { + warn!( + document_id = %doc_id, + storage_path = %file.storage_path, + "duplicate_attachment_missing" + ); + continue; + } + let bytes = self + .storage + .read_bytes(&abs_path) + .await + .map_err(ServiceError::from)?; + let content_hash = hash_bytes(&bytes); + snapshots.push(AttachmentSnapshot { + filename: file.filename, + content_type: file.content_type, + bytes, + content_hash, + }); + } + Ok(snapshots) + } + + pub(super) async fn copy_attachments( + &self, + target_doc: &DomainDocument, + attachments: &[AttachmentSnapshot], + actor_id: Uuid, + ) -> Result<(), ServiceError> { + if attachments.is_empty() { + return Ok(()); + } + let base_dir = self + .storage + .build_doc_dir(target_doc.id()) + .await + .map_err(ServiceError::from)?; + for attachment in attachments { + let filename = std::path::Path::new(&attachment.filename) + .file_name() + .and_then(|f| f.to_str()) + .map(str::to_string) + .filter(|f| !f.is_empty()) + .unwrap_or_else(|| attachment.filename.clone()); + let target_path = base_dir.join("attachments").join(&filename); + self.storage + .write_bytes(&target_path, &attachment.bytes) + .await + .map_err(ServiceError::from)?; + let storage_path = self + .storage + .relative_from_uploads(&target_path) + .replace('\\', "/"); + self.files_repo + .insert_file( + target_doc.id(), + &filename, + attachment.content_type.as_deref(), + attachment.bytes.len() as i64, + &storage_path, + &attachment.content_hash, + ) + .await + .map_err(ServiceError::from)?; + if let Some(repo_path) = + doc_path::repo_relative_from_storage(target_doc.workspace_id(), &storage_path) + { + let payload = json!({ + "repo_path": repo_path.as_str(), + "storage_path": storage_path, + "backend": "api", + "size": attachment.bytes.len() as i64, + "content_hash": attachment.content_hash, + "workspace_id": target_doc.workspace_id().to_string(), + "actor_id": actor_id.to_string(), + }); + self.record_event( + target_doc.workspace_id(), + target_doc.id(), + "attachment.ingest_upsert", + Some(payload), + ) + .await; + } + } + Ok(()) + } +} + +fn hash_bytes(bytes: &[u8]) -> String { + sha256_hex(bytes) +} diff --git a/api/crates/application/src/documents/services/content.rs b/api/crates/application/src/documents/services/content.rs new file mode 100644 index 00000000..a9aebf3a --- /dev/null +++ b/api/crates/application/src/documents/services/content.rs @@ -0,0 +1,139 @@ +use serde_json::json; +use tracing::warn; +use uuid::Uuid; + +use domain::documents::document::Document as DomainDocument; + +use crate::core::services::access::{self, Actor}; +use crate::core::services::errors::ServiceError; +use crate::documents::ports::tx_runner::run_in_tx; +use crate::documents::services::realtime::snapshot::snapshot_from_markdown; + +use super::DocumentService; +use super::patch::{DocumentPatchOperation, apply_patch_operations}; +use super::util::map_tx_error; + +impl DocumentService { + pub async fn get_content(&self, actor: &Actor, doc_id: Uuid) -> Result { + access::require_view( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + .map_err(|err| match err { + ServiceError::Forbidden => ServiceError::NotFound, + other => other, + })?; + + let content = self + .realtime + .get_content(&doc_id.to_string()) + .await + .map_err(ServiceError::from)? + .unwrap_or_default(); + Ok(content) + } + + pub async fn update_content( + &self, + actor: &Actor, + doc_id: Uuid, + content: &str, + ) -> Result { + access::require_edit( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + .map_err(|err| match err { + ServiceError::Forbidden => ServiceError::Unauthorized, + other => other, + })?; + + let snapshot_bytes = snapshot_from_markdown(content); + self.realtime + .apply_snapshot(&doc_id.to_string(), snapshot_bytes.as_slice()) + .await + .map_err(ServiceError::from)?; + + if let Err(err) = self.realtime.force_persist(&doc_id.to_string()).await { + warn!(document_id = %doc_id, error = ?err, "document_force_persist_after_update_failed"); + } + + let doc = self + .document_repo + .get_by_id(doc_id) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound)?; + let workspace_id = doc.workspace_id(); + let doc_id = doc.id(); + run_in_tx(self.tx_runner.as_ref(), move |tx| { + Box::pin(async move { + Self::enqueue_doc_sync_tx( + tx.storage_jobs(), + workspace_id, + doc_id, + "update_content", + ) + .await?; + Ok(()) + }) + }) + .await + .map_err(map_tx_error)?; + let repo_path = doc.desired_path().as_str().to_string(); + let event_payload = json!({ + "repo_path": repo_path, + "desired_path": doc.desired_path().as_str(), + "slug": doc.slug().as_str(), + "doc_type": doc.doc_type().as_str(), + "owner_id": doc.workspace_id(), + }); + self.record_event( + doc.workspace_id(), + doc.id(), + "document.content_updated", + Some(event_payload), + ) + .await; + Ok(doc) + } + + pub async fn patch_content( + &self, + actor: &Actor, + doc_id: Uuid, + operations: &[DocumentPatchOperation], + ) -> Result { + if operations.is_empty() { + return Err(ServiceError::BadRequest("patch_operations_required")); + } + + access::require_edit( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + .map_err(|err| match err { + ServiceError::Forbidden => ServiceError::Unauthorized, + other => other, + })?; + + let current = self + .realtime + .get_content(&doc_id.to_string()) + .await + .map_err(ServiceError::from)? + .unwrap_or_default(); + let updated = apply_patch_operations(¤t, operations)?; + + self.update_content(actor, doc_id, &updated).await + } +} diff --git a/api/crates/application/src/documents/services/crud.rs b/api/crates/application/src/documents/services/crud.rs new file mode 100644 index 00000000..6f0f7725 --- /dev/null +++ b/api/crates/application/src/documents/services/crud.rs @@ -0,0 +1,430 @@ +use serde_json::json; +use tracing::{error, warn}; +use uuid::Uuid; + +use domain::access::permissions::PermissionSet; +use domain::documents::document::{Document as DomainDocument, SearchHit}; +use domain::documents::permissions as doc_permissions; +use domain::documents::policy::DocumentState; +use domain::documents::{hierarchy, path as doc_path, policy as doc_policy, title}; + +use crate::core::services::access::{self, Actor}; +use crate::core::services::errors::ServiceError; +use crate::documents::dtos::DocumentListFilter; +use crate::documents::ports::tx_runner::run_in_tx; +use crate::documents::use_cases::create_document::CreateDocument; +use crate::documents::use_cases::delete_document::DeleteDocument; +use crate::documents::use_cases::get_document::GetDocument; +use crate::documents::use_cases::list_documents::ListDocuments; +use crate::documents::use_cases::search_documents::SearchDocuments; +use crate::documents::use_cases::update_document::UpdateDocument; + +use super::DocumentService; +use super::util::{map_parent_error, map_policy_error, map_tx_error, to_repo_state}; + +impl DocumentService { + pub async fn list_for_user( + &self, + workspace_id: Uuid, + query: Option, + tag: Option, + state: DocumentListFilter, + ) -> Result, ServiceError> { + let uc = ListDocuments { + repo: self.document_repo.as_ref(), + }; + uc.execute(workspace_id, query, tag, to_repo_state(state)) + .await + .map_err(ServiceError::from) + } + + #[allow(clippy::too_many_arguments)] + pub async fn create_for_user( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + title: &str, + parent_id: Option, + doc_type: domain::documents::doc_type::DocumentType, + created_by_plugin: Option<&str>, + ) -> Result { + doc_permissions::ensure_can_create(permissions, doc_type) + .map_err(|_| ServiceError::Forbidden)?; + let created_by_plugin = created_by_plugin.map(ToOwned::to_owned); + let title = domain::documents::title::Title::from_user_input(title); + let parent_desired_path = if let Some(parent_id) = parent_id { + let meta = self.load_owner_meta(workspace_id, parent_id).await?; + hierarchy::ensure_active_parent(Some(hierarchy::ParentMeta { + archived_at: meta.archived_at, + })) + .map_err(map_parent_error)?; + Some(meta.desired_path) + } else { + None + }; + let doc = match run_in_tx(self.tx_runner.as_ref(), move |tx| { + Box::pin(async move { + let doc = { + let mut uc = CreateDocument { + repo: tx.documents(), + }; + uc.execute( + workspace_id, + actor_id, + &title, + parent_id, + parent_desired_path.as_ref(), + doc_type, + created_by_plugin.as_deref(), + ) + .await? + }; + Self::enqueue_projection_for_document_tx( + tx.storage_jobs(), + &doc, + "create_document", + ) + .await?; + Ok(doc) + }) + }) + .await + { + Ok(doc) => doc, + Err(err) => { + let service_err = map_tx_error(err); + if service_err.is_internal() { + error!(error = ?service_err, "document_create_repo_failed"); + } + return Err(service_err); + } + }; + let repo_path = doc.desired_path().as_str().to_string(); + let event_payload = json!({ + "title": doc.title().as_str(), + "parent_id": doc.parent_id(), + "doc_type": doc.doc_type().as_str(), + "repo_path": repo_path, + "slug": doc.slug().as_str(), + "desired_path": doc.desired_path().as_str(), + "owner_id": doc.workspace_id(), + "actor_id": actor_id, + }); + self.record_event( + doc.workspace_id(), + doc.id(), + "document.created", + Some(event_payload), + ) + .await; + Ok(doc) + } + + pub async fn duplicate_document( + &self, + workspace_id: Uuid, + source_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + title: Option, + parent_id: Option>, + ) -> Result { + let actor = Actor::User(actor_id); + access::require_view( + self.access_repo.as_ref(), + self.share_access.as_ref(), + &actor, + source_id, + ) + .await?; + + let source = self + .document_repo + .get_by_id(source_id) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound)?; + if source.workspace_id() != workspace_id { + return Err(ServiceError::NotFound); + } + let state = DocumentState::new(source.doc_type(), source.archived_at()); + if doc_policy::ensure_duplicate_allowed(state).is_err() { + return Err(ServiceError::BadRequest("cannot_duplicate_folder")); + } + + let target_parent = match parent_id { + Some(explicit) => explicit, + None => source.parent_id().or(source.archived_parent_id()), + }; + + let source_content = self + .realtime + .get_content(&source_id.to_string()) + .await + .map_err(ServiceError::from)? + .unwrap_or_default(); + + let attachments = self.snapshot_attachments(source.id()).await?; + let new_title = title::duplicate_title(source.title(), title); + let new_doc = self + .create_for_user( + workspace_id, + actor_id, + permissions, + new_title.as_str(), + target_parent, + source.doc_type(), + source.created_by_plugin(), + ) + .await?; + + let result = async { + let updated_doc = self + .update_content(&actor, new_doc.id(), &source_content) + .await?; + + self.copy_attachments(&updated_doc, &attachments, actor_id) + .await?; + + Ok::<_, ServiceError>(updated_doc) + } + .await; + + match result { + Ok(doc) => Ok(doc), + Err(err) => { + if let Err(clean_err) = self + .delete_for_user_internal( + workspace_id, + new_doc.id(), + Some(actor_id), + permissions, + false, + ) + .await + { + warn!( + document_id = %new_doc.id(), + error = ?clean_err, + "duplicate_cleanup_failed" + ); + } + Err(err) + } + } + } + + pub async fn get_for_actor( + &self, + actor: &Actor, + doc_id: Uuid, + ) -> Result { + let uc = GetDocument { + repo: self.document_repo.as_ref(), + shares: self.share_access.as_ref(), + access: self.access_repo.as_ref(), + }; + uc.execute(actor, doc_id) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound) + } + + pub async fn delete_for_user( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Option, + permissions: &PermissionSet, + ) -> Result { + self.delete_for_user_internal(workspace_id, doc_id, actor_id, permissions, true) + .await + } + + pub(super) async fn delete_for_user_internal( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Option, + permissions: &PermissionSet, + enforce_permissions: bool, + ) -> Result { + let root_meta = self.load_owner_meta(workspace_id, doc_id).await?; + if enforce_permissions { + doc_permissions::ensure_can_delete(permissions, root_meta.doc_type) + .map_err(|_| ServiceError::Forbidden)?; + } + let permission_snapshot = if enforce_permissions { + permissions.to_vec() + } else { + // Cleanup flows (e.g., duplicate rollback) bypass user permissions so storage delete + // jobs always have authority to remove docs and attachments. + PermissionSet::all().to_vec() + }; + let (deleted, delete_events) = run_in_tx(self.tx_runner.as_ref(), move |tx| { + Box::pin(async move { + let delete_plan = + Self::build_delete_plan(tx, doc_id, workspace_id, root_meta.clone()).await?; + if delete_plan.is_empty() { + return Ok((false, Vec::new())); + } + + let mut deleted = false; + let mut delete_events = Vec::new(); + for entry in delete_plan { + let deleted_type = { + let mut uc = DeleteDocument { + repo: tx.documents(), + }; + uc.execute(entry.doc_id, workspace_id).await? + }; + if deleted_type.is_some() { + deleted = true; + Self::enqueue_delete_job_for_entry( + tx, + workspace_id, + &entry, + &permission_snapshot, + actor_id, + ) + .await?; + delete_events.push(entry.clone()); + } + } + Ok((deleted, delete_events)) + }) + }) + .await + .map_err(map_tx_error)?; + + if deleted { + for entry in delete_events { + self.record_delete_event(workspace_id, &entry, actor_id) + .await; + } + } + Ok(deleted) + } + + pub async fn update_metadata( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + title: Option, + parent_id: Option>, + ) -> Result { + let meta = self.load_owner_meta(workspace_id, doc_id).await?; + let state = DocumentState::new(meta.doc_type, meta.archived_at); + let requested_title = title + .as_deref() + .map(domain::documents::title::Title::from_user_input); + let rename_requested = title.is_some(); + let move_requested = parent_id.is_some(); + if rename_requested { + doc_policy::ensure_editable(state, permissions).map_err(map_policy_error)?; + } + if move_requested { + doc_policy::ensure_movable(state, permissions).map_err(map_policy_error)?; + } + let parent_desired_path = match parent_id { + Some(Some(parent)) => { + let meta = self.load_owner_meta(workspace_id, parent).await?; + hierarchy::ensure_active_parent(Some(hierarchy::ParentMeta { + archived_at: meta.archived_at, + })) + .map_err(map_parent_error)?; + Some(meta.desired_path) + } + Some(None) => None, + None => doc_path::parent_desired_path(&meta.desired_path), + }; + let previous_repo_path = + doc_path::workspace_repo_relative(workspace_id, meta.path.as_deref()) + .map(|p| p.into_string()); + let current_title = meta.title.clone(); + let current_slug = meta.slug.clone(); + let current_desired_path = meta.desired_path.clone(); + let current_doc_type = meta.doc_type; + let previous_desired_path = meta.desired_path.as_str().to_string(); + let doc = match run_in_tx(self.tx_runner.as_ref(), move |tx| { + Box::pin(async move { + let doc = { + let mut uc = UpdateDocument { + repo: tx.documents(), + }; + uc.execute( + doc_id, + workspace_id, + ¤t_title, + ¤t_slug, + ¤t_desired_path, + current_doc_type, + requested_title.as_ref(), + parent_id, + parent_desired_path.as_ref(), + ) + .await? + }; + let Some(doc) = doc else { + return Err(ServiceError::NotFound.into()); + }; + Self::enqueue_projection_for_document_tx( + tx.storage_jobs(), + &doc, + "update_metadata", + ) + .await?; + Ok(doc) + }) + }) + .await + { + Ok(doc) => doc, + Err(err) => { + let service_err = map_tx_error(err); + if service_err.is_internal() { + error!(error = ?service_err, "document_update_repo_failed"); + } + return Err(service_err); + } + }; + let repo_path = doc.desired_path().as_str().to_string(); + let event_payload = json!({ + "title": doc.title().as_str(), + "parent_id": doc.parent_id(), + "repo_path": repo_path, + "doc_type": doc.doc_type().as_str(), + "slug": doc.slug().as_str(), + "desired_path": doc.desired_path().as_str(), + "owner_id": doc.workspace_id(), + "actor_id": actor_id, + "previous_path": previous_repo_path, + "previous_desired_path": previous_desired_path, + }); + self.record_event( + doc.workspace_id(), + doc.id(), + "document.metadata_updated", + Some(event_payload), + ) + .await; + Ok(doc) + } + + pub async fn search_for_user( + &self, + workspace_id: Uuid, + query: Option, + limit: i64, + ) -> Result, ServiceError> { + let uc = SearchDocuments { + repo: self.document_repo.as_ref(), + }; + uc.execute(workspace_id, query, limit) + .await + .map_err(ServiceError::from) + } +} diff --git a/api/crates/application/src/documents/services/deletion.rs b/api/crates/application/src/documents/services/deletion.rs new file mode 100644 index 00000000..ff0f6fc3 --- /dev/null +++ b/api/crates/application/src/documents/services/deletion.rs @@ -0,0 +1,133 @@ +use serde_json::json; +use uuid::Uuid; + +use domain::documents::doc_type::DocumentType; +use domain::documents::{delete_plan, path as doc_path}; + +use crate::core::ports::storage::storage_projection_queue::StorageDeleteJobMetadata; +use crate::core::services::errors::ServiceError; +use crate::documents::ports::document_repository::DocMeta; +use crate::documents::ports::tx_runner::DocumentsTx; + +use super::DocumentService; + +impl DocumentService { + pub(super) async fn build_delete_plan( + tx: &mut dyn DocumentsTx, + doc_id: Uuid, + workspace_id: Uuid, + root_meta: DocMeta, + ) -> Result, ServiceError> { + let subtree = tx + .documents() + .list_owned_subtree_documents(workspace_id, doc_id) + .await + .map_err(ServiceError::from)?; + + let mut nodes = Vec::new(); + for node in subtree { + let meta = if node.id == doc_id { + root_meta.clone() + } else { + tx.documents() + .get_meta_for_owner(node.id, workspace_id) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound)? + }; + let attachments = if node.doc_type != DocumentType::Folder { + tx.files() + .list_storage_paths_for_document(node.id) + .await + .map_err(ServiceError::from)? + } else { + Vec::new() + }; + nodes.push(delete_plan::DeleteNode { + id: node.id, + doc_type: node.doc_type, + meta, + attachments, + }); + } + + Ok(delete_plan::build_delete_plan(doc_id, root_meta, nodes)) + } + + pub(super) async fn enqueue_delete_job_for_entry( + tx: &mut dyn DocumentsTx, + workspace_id: Uuid, + entry: &delete_plan::DeleteEntry, + permission_snapshot: &[String], + actor_id: Option, + ) -> Result<(), ServiceError> { + let repo_path = doc_path::workspace_repo_relative(workspace_id, entry.meta.path.as_deref()) + .map(|p| p.into_string()) + .unwrap_or_else(|| entry.meta.desired_path.as_str().to_string()); + let metadata = StorageDeleteJobMetadata { + workspace_id, + repo_path: Some(repo_path), + doc_type: entry.doc_type, + attachment_paths: if entry.attachments.is_empty() { + None + } else { + Some(entry.attachments.clone()) + }, + permission_snapshot: permission_snapshot.to_vec(), + actor_id, + }; + if entry.doc_type == DocumentType::Folder { + Self::enqueue_folder_delete_tx( + tx.storage_jobs(), + workspace_id, + entry.doc_id, + entry.reason, + Some(metadata), + ) + .await + } else { + Self::enqueue_doc_delete_tx( + tx.storage_jobs(), + workspace_id, + entry.doc_id, + entry.reason, + Some(metadata), + ) + .await + } + } + + pub(super) async fn record_delete_event( + &self, + workspace_id: Uuid, + entry: &delete_plan::DeleteEntry, + actor_id: Option, + ) { + let repo_path = doc_path::workspace_repo_relative(workspace_id, entry.meta.path.as_deref()) + .map(|p| p.into_string()) + .unwrap_or_else(|| entry.meta.desired_path.as_str().to_string()); + let previous_repo_path = + doc_path::workspace_repo_relative(workspace_id, entry.meta.path.as_deref()) + .map(|p| p.into_string()); + let mut payload = json!({ + "doc_type": entry.doc_type.as_str(), + "repo_path": repo_path, + "slug": entry.meta.slug.as_str(), + "desired_path": entry.meta.desired_path.as_str(), + "owner_id": workspace_id, + "previous_path": previous_repo_path, + }); + if let Some(actor) = actor_id + && let serde_json::Value::Object(ref mut map) = payload + { + map.insert("actor_id".into(), json!(actor)); + } + self.record_event( + workspace_id, + entry.doc_id, + "document.deleted", + Some(payload), + ) + .await; + } +} diff --git a/api/crates/application/src/documents/services/downloads.rs b/api/crates/application/src/documents/services/downloads.rs new file mode 100644 index 00000000..71f4dfc2 --- /dev/null +++ b/api/crates/application/src/documents/services/downloads.rs @@ -0,0 +1,73 @@ +use uuid::Uuid; + +use crate::core::services::access::Actor; +use crate::core::services::errors::ServiceError; +use crate::documents::dtos::{DocumentDownload, DocumentDownloadFormat}; +use crate::documents::use_cases::download_document::{ + DownloadDocument as DownloadDocumentUseCase, FolderDownloadUnsupportedFormat, +}; + +use super::DocumentService; + +impl DocumentService { + pub async fn download_document( + &self, + actor: &Actor, + doc_id: Uuid, + format: DocumentDownloadFormat, + ) -> Result { + let uc = DownloadDocumentUseCase { + documents: self.document_repo.as_ref(), + files: self.files_repo.as_ref(), + storage: self.storage.as_ref(), + access: self.access_repo.as_ref(), + shares: self.share_access.as_ref(), + snapshot: self.snapshot_service.as_ref(), + exporter: self.exporter.as_ref(), + }; + uc.execute(actor, doc_id, format) + .await + .map_err(|err| { + if err + .downcast_ref::() + .is_some() + { + ServiceError::BadRequest("folder_archive_only") + } else { + ServiceError::from(err) + } + })? + .ok_or(ServiceError::NotFound) + } + + pub async fn download_workspace_root( + &self, + actor: &Actor, + workspace_id: Uuid, + workspace_name: &str, + format: DocumentDownloadFormat, + ) -> Result { + let uc = DownloadDocumentUseCase { + documents: self.document_repo.as_ref(), + files: self.files_repo.as_ref(), + storage: self.storage.as_ref(), + access: self.access_repo.as_ref(), + shares: self.share_access.as_ref(), + snapshot: self.snapshot_service.as_ref(), + exporter: self.exporter.as_ref(), + }; + uc.download_workspace_root(actor, workspace_id, workspace_name, format) + .await + .map_err(|err| { + if err + .downcast_ref::() + .is_some() + { + ServiceError::BadRequest("folder_archive_only") + } else { + ServiceError::from(err) + } + })? + .ok_or(ServiceError::NotFound) + } +} diff --git a/api/crates/application/src/documents/services/events.rs b/api/crates/application/src/documents/services/events.rs new file mode 100644 index 00000000..5f36d7fb --- /dev/null +++ b/api/crates/application/src/documents/services/events.rs @@ -0,0 +1,27 @@ +use tracing::warn; +use uuid::Uuid; + +use super::DocumentService; + +impl DocumentService { + pub(super) async fn record_event( + &self, + workspace_id: Uuid, + doc_id: Uuid, + event_type: &'static str, + payload: Option, + ) { + if let Err(err) = self + .events + .append(workspace_id, doc_id, event_type, payload) + .await + { + warn!( + error = ?err, + doc_id = %doc_id, + event_type, + "doc_event_log_append_failed" + ); + } + } +} diff --git a/api/src/application/services/files.rs b/api/crates/application/src/documents/services/files/mod.rs similarity index 57% rename from api/src/application/services/files.rs rename to api/crates/application/src/documents/services/files/mod.rs index b8ef490f..dc65039e 100644 --- a/api/src/application/services/files.rs +++ b/api/crates/application/src/documents/services/files/mod.rs @@ -5,20 +5,111 @@ use serde_json::json; use tracing::warn; use uuid::Uuid; -use crate::application::access::{self, Actor}; -use crate::application::ports::access_repository::AccessRepository; -use crate::application::ports::doc_event_log::DocEventLog; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::share_access_port::ShareAccessPort; -use crate::application::ports::storage_port::StorageResolverPort; -use crate::application::services::errors::ServiceError; -use crate::application::use_cases::files::upload_file::{UploadFile, UploadedFile}; +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::core::services::access::{self, Actor}; +use crate::core::services::errors::ServiceError; +use crate::documents::ports::access_repository::AccessRepository; +use crate::documents::ports::doc_event_log::DocEventLog; +use crate::documents::ports::files::files_repository::FilesRepository; +use crate::documents::ports::sharing::share_access_port::ShareAccessPort; +use crate::documents::use_cases::files::upload_file::{UploadFile, UploadedFile}; +use async_trait::async_trait; +use domain::documents::path as doc_path; pub struct FilePayload { pub bytes: Vec, pub content_type: Option, } +#[async_trait] +pub trait FileServiceFacade: Send + Sync { + #[allow(clippy::too_many_arguments)] + async fn upload_file( + &self, + workspace_id: Uuid, + actor_id: Uuid, + doc_id: Uuid, + bytes: Vec, + orig_filename: Option, + content_type: Option, + public_base_url: Option, + ) -> Result; + + async fn download_owned_file( + &self, + actor: &Actor, + workspace_id: Uuid, + file_id: Uuid, + ) -> Result; + + async fn get_file_by_name( + &self, + actor: &Actor, + doc_id: Uuid, + filename: &str, + ) -> Result; + + async fn serve_upload( + &self, + actor: &Actor, + doc_id: Uuid, + attachment_path: &str, + ) -> Result; +} + +#[async_trait] +impl FileServiceFacade for FileService { + #[allow(clippy::too_many_arguments)] + async fn upload_file( + &self, + workspace_id: Uuid, + actor_id: Uuid, + doc_id: Uuid, + bytes: Vec, + orig_filename: Option, + content_type: Option, + public_base_url: Option, + ) -> Result { + self.upload_file( + workspace_id, + actor_id, + doc_id, + bytes, + orig_filename, + content_type, + public_base_url, + ) + .await + } + + async fn download_owned_file( + &self, + actor: &Actor, + workspace_id: Uuid, + file_id: Uuid, + ) -> Result { + self.download_owned_file(actor, workspace_id, file_id).await + } + + async fn get_file_by_name( + &self, + actor: &Actor, + doc_id: Uuid, + filename: &str, + ) -> Result { + self.get_file_by_name(actor, doc_id, filename).await + } + + async fn serve_upload( + &self, + actor: &Actor, + doc_id: Uuid, + attachment_path: &str, + ) -> Result { + self.serve_upload(actor, doc_id, attachment_path).await + } +} + pub struct FileService { files_repo: Arc, storage: Arc, @@ -44,6 +135,7 @@ impl FileService { } } + #[allow(clippy::too_many_arguments)] pub async fn upload_file( &self, workspace_id: Uuid, @@ -71,6 +163,7 @@ impl FileService { pub async fn download_owned_file( &self, + actor: &Actor, workspace_id: Uuid, file_id: Uuid, ) -> Result { @@ -80,11 +173,17 @@ impl FileService { .await .map_err(ServiceError::from)? .ok_or(ServiceError::NotFound)?; - let (path, content_type, stored_workspace) = meta; - if stored_workspace != workspace_id { + if meta.workspace_id != workspace_id { return Err(ServiceError::Forbidden); } - let abs_path = self.storage.absolute_from_relative(&path); + access::require_view( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + meta.document_id, + ) + .await?; + let abs_path = self.storage.absolute_from_relative(&meta.storage_path); let bytes = self .storage .read_bytes(&abs_path) @@ -92,7 +191,7 @@ impl FileService { .map_err(ServiceError::from)?; Ok(FilePayload { bytes, - content_type, + content_type: meta.content_type, }) } @@ -108,16 +207,15 @@ impl FileService { actor, doc_id, ) - .await - .map_err(|_| ServiceError::Forbidden)?; + .await?; - let (path, ct) = self + let meta = self .files_repo .get_file_path_by_doc_and_name(doc_id, filename) .await .map_err(ServiceError::from)? .ok_or(ServiceError::NotFound)?; - let abs_path = self.storage.absolute_from_relative(&path); + let abs_path = self.storage.absolute_from_relative(&meta.storage_path); let bytes = self .storage .read_bytes(&abs_path) @@ -125,7 +223,7 @@ impl FileService { .map_err(ServiceError::from)?; Ok(FilePayload { bytes, - content_type: ct, + content_type: meta.content_type, }) } @@ -141,8 +239,7 @@ impl FileService { actor, doc_id, ) - .await - .map_err(|_| ServiceError::Unauthorized)?; + .await?; let file_path = self .storage @@ -169,7 +266,9 @@ impl FileService { doc_id: Uuid, file: &UploadedFile, ) { - let Some(repo_path) = repo_relative_from_storage(workspace_id, &file.storage_path) else { + let Some(repo_path) = + doc_path::repo_relative_from_storage(workspace_id, &file.storage_path) + else { return; }; if let Err(err) = self @@ -179,7 +278,7 @@ impl FileService { doc_id, "attachment.ingest_upsert", Some(json!({ - "repo_path": repo_path, + "repo_path": repo_path.as_str(), "storage_path": file.storage_path, "backend": "api", "size": file.size, @@ -198,17 +297,3 @@ impl FileService { } } } - -fn repo_relative_from_storage(workspace_id: Uuid, storage_path: &str) -> Option { - let trimmed = storage_path.trim_start_matches('/'); - let owner_prefix = workspace_id.to_string(); - let remainder = trimmed - .strip_prefix(&owner_prefix) - .map(|rest| rest.trim_start_matches('/')) - .unwrap_or(trimmed); - if remainder.is_empty() { - None - } else { - Some(remainder.to_string()) - } -} diff --git a/api/crates/application/src/documents/services/jobs.rs b/api/crates/application/src/documents/services/jobs.rs new file mode 100644 index 00000000..45317432 --- /dev/null +++ b/api/crates/application/src/documents/services/jobs.rs @@ -0,0 +1,146 @@ +use tracing::warn; +use uuid::Uuid; + +use domain::documents::doc_type::DocumentType; +use domain::documents::document::Document as DomainDocument; + +use crate::core::ports::storage::storage_projection_queue::StorageProjectionQueueTx; +use crate::core::ports::storage::storage_projection_queue::{ + StorageDeleteJobMetadata, StorageJobReason, StorageProjectionJobKind, WorkspaceJobMetadata, +}; +use crate::core::services::errors::ServiceError; + +use super::DocumentService; + +impl DocumentService { + pub(super) async fn enqueue_projection_for_document_tx( + storage_jobs: &mut dyn StorageProjectionQueueTx, + doc: &DomainDocument, + reason: &'static str, + ) -> Result<(), ServiceError> { + if doc.doc_type() == DocumentType::Folder { + Self::enqueue_folder_sync_tx(storage_jobs, doc.workspace_id(), doc.id(), reason).await + } else { + Self::enqueue_doc_sync_tx(storage_jobs, doc.workspace_id(), doc.id(), reason).await + } + } + + pub(super) async fn enqueue_doc_sync_tx( + storage_jobs: &mut dyn StorageProjectionQueueTx, + workspace_id: Uuid, + doc_id: Uuid, + reason: &'static str, + ) -> Result<(), ServiceError> { + let encoded_reason = serde_json::to_string(&StorageJobReason { + reason: reason.to_string(), + metadata: Some(WorkspaceJobMetadata { workspace_id }), + }) + .ok(); + storage_jobs + .enqueue_doc_job( + workspace_id, + doc_id, + StorageProjectionJobKind::DocSync, + encoded_reason.as_deref(), + ) + .await + .map_err(|err| { + warn!( + error = ?err, + doc_id = %doc_id, + "storage_projection_enqueue_failed" + ); + ServiceError::Unexpected(err.into()) + }) + } + + pub(super) async fn enqueue_doc_delete_tx( + storage_jobs: &mut dyn StorageProjectionQueueTx, + workspace_id: Uuid, + doc_id: Uuid, + reason: &'static str, + metadata: Option, + ) -> Result<(), ServiceError> { + let encoded_reason = metadata.and_then(|meta| { + serde_json::to_string(&StorageJobReason { + reason: reason.to_string(), + metadata: Some(meta), + }) + .ok() + }); + let reason_str = encoded_reason.as_deref().unwrap_or(reason); + storage_jobs + .enqueue_doc_job( + workspace_id, + doc_id, + StorageProjectionJobKind::DeleteDoc, + Some(reason_str), + ) + .await + .map_err(|err| { + warn!( + error = ?err, + doc_id = %doc_id, + "storage_projection_enqueue_failed" + ); + ServiceError::Unexpected(err.into()) + }) + } + + pub(super) async fn enqueue_folder_sync_tx( + storage_jobs: &mut dyn StorageProjectionQueueTx, + workspace_id: Uuid, + folder_id: Uuid, + reason: &'static str, + ) -> Result<(), ServiceError> { + storage_jobs + .enqueue_folder_job( + workspace_id, + folder_id, + StorageProjectionJobKind::FolderSync, + Some(reason), + ) + .await + .map_err(|err| { + warn!( + error = ?err, + folder_id = %folder_id, + "storage_projection_enqueue_failed" + ); + ServiceError::Unexpected(err.into()) + }) + } + + pub(super) async fn enqueue_folder_delete_tx( + storage_jobs: &mut dyn StorageProjectionQueueTx, + workspace_id: Uuid, + folder_id: Uuid, + reason: &'static str, + metadata: Option, + ) -> Result<(), ServiceError> { + let encoded_reason = metadata.and_then(|meta| { + serde_json::to_string(&StorageJobReason { + reason: reason.to_string(), + metadata: Some(meta), + }) + .ok() + }); + let reason_str = encoded_reason.as_deref().unwrap_or(reason); + storage_jobs + .enqueue_folder_job( + workspace_id, + folder_id, + StorageProjectionJobKind::DeleteFolder, + Some(reason_str), + ) + .await + .map_err(|err| { + warn!( + error = ?err, + folder_id = %folder_id, + "storage_projection_enqueue_failed" + ); + ServiceError::Unexpected(err.into()) + }) + } +} diff --git a/api/crates/application/src/documents/services/lifecycle.rs b/api/crates/application/src/documents/services/lifecycle.rs new file mode 100644 index 00000000..51973dc4 --- /dev/null +++ b/api/crates/application/src/documents/services/lifecycle.rs @@ -0,0 +1,158 @@ +use serde_json::json; +use uuid::Uuid; + +use domain::access::permissions::PermissionSet; +use domain::documents::doc_type::DocumentType; +use domain::documents::document::Document as DomainDocument; +use domain::documents::policy::DocumentState; +use domain::documents::{path as doc_path, policy as doc_policy}; + +use crate::core::services::errors::ServiceError; +use crate::documents::ports::tx_runner::run_in_tx; + +use super::DocumentService; +use super::util::{map_policy_error, map_tx_error}; + +impl DocumentService { + pub async fn archive_document( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + ) -> Result { + let meta = self.load_owner_meta(workspace_id, doc_id).await?; + let state = DocumentState::new(meta.doc_type, meta.archived_at); + doc_policy::ensure_archivable(state, permissions).map_err(map_policy_error)?; + let previous_repo_path = + doc_path::workspace_repo_relative(workspace_id, meta.path.as_deref()) + .map(|p| p.into_string()); + let subtree = self + .document_repo + .list_owned_subtree_documents(workspace_id, doc_id) + .await + .map_err(ServiceError::from)?; + for node in &subtree { + if node.doc_type != DocumentType::Folder { + self.realtime.force_persist(&node.id.to_string()).await?; + } + } + + let doc = run_in_tx(self.tx_runner.as_ref(), move |tx| { + Box::pin(async move { + let doc = tx + .documents() + .archive_subtree(doc_id, workspace_id, actor_id) + .await?; + let Some(doc) = doc else { + return Err(ServiceError::NotFound.into()); + }; + Self::enqueue_projection_for_document_tx( + tx.storage_jobs(), + &doc, + "archive_document", + ) + .await?; + Ok(doc) + }) + }) + .await + .map_err(map_tx_error)?; + + for node in &subtree { + self.realtime + .set_document_editable(&node.id.to_string(), false) + .await?; + } + let repo_path = doc.desired_path().as_str().to_string(); + let event_payload = json!({ + "repo_path": repo_path, + "doc_type": doc.doc_type().as_str(), + "slug": doc.slug().as_str(), + "desired_path": doc.desired_path().as_str(), + "owner_id": doc.workspace_id(), + "actor_id": actor_id, + "previous_path": previous_repo_path, + "previous_desired_path": meta.desired_path.as_str(), + }); + self.record_event( + doc.workspace_id(), + doc.id(), + "document.archived", + Some(event_payload), + ) + .await; + Ok(doc) + } + + pub async fn unarchive_document( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + ) -> Result { + let meta = self.load_owner_meta(workspace_id, doc_id).await?; + let state = DocumentState::new(meta.doc_type, meta.archived_at); + doc_policy::ensure_unarchivable(state, permissions).map_err(map_policy_error)?; + let previous_repo_path = + doc_path::workspace_repo_relative(workspace_id, meta.path.as_deref()) + .map(|p| p.into_string()); + let subtree = self + .document_repo + .list_owned_subtree_documents(workspace_id, doc_id) + .await + .map_err(ServiceError::from)?; + + let doc = run_in_tx(self.tx_runner.as_ref(), move |tx| { + Box::pin(async move { + let doc = tx + .documents() + .unarchive_subtree(doc_id, workspace_id) + .await?; + let Some(doc) = doc else { + return Err(ServiceError::NotFound.into()); + }; + Self::enqueue_projection_for_document_tx( + tx.storage_jobs(), + &doc, + "unarchive_document", + ) + .await?; + Ok(doc) + }) + }) + .await + .map_err(map_tx_error)?; + + for node in &subtree { + self.realtime + .set_document_editable(&node.id.to_string(), true) + .await?; + } + for node in &subtree { + if node.doc_type != DocumentType::Folder { + self.realtime.force_persist(&node.id.to_string()).await?; + } + } + let repo_path = doc.desired_path().as_str().to_string(); + let event_payload = json!({ + "repo_path": repo_path, + "doc_type": doc.doc_type().as_str(), + "slug": doc.slug().as_str(), + "desired_path": doc.desired_path().as_str(), + "owner_id": doc.workspace_id(), + "actor_id": actor_id, + "previous_path": previous_repo_path, + "previous_desired_path": meta.desired_path.as_str(), + }); + self.record_event( + doc.workspace_id(), + doc.id(), + "document.unarchived", + Some(event_payload), + ) + .await; + Ok(doc) + } +} diff --git a/api/src/application/linkgraph/mod.rs b/api/crates/application/src/documents/services/linkgraph.rs similarity index 98% rename from api/src/application/linkgraph/mod.rs rename to api/crates/application/src/documents/services/linkgraph.rs index ee701927..76130860 100644 --- a/api/src/application/linkgraph/mod.rs +++ b/api/crates/application/src/documents/services/linkgraph.rs @@ -1,4 +1,4 @@ -use crate::application::ports::linkgraph_repository::LinkGraphRepository; +use crate::documents::ports::linkgraph_repository::LinkGraphRepository; use once_cell::sync::Lazy; use regex::Regex; use uuid::Uuid; diff --git a/api/crates/application/src/documents/services/links.rs b/api/crates/application/src/documents/services/links.rs new file mode 100644 index 00000000..541216a5 --- /dev/null +++ b/api/crates/application/src/documents/services/links.rs @@ -0,0 +1,66 @@ +use uuid::Uuid; + +use domain::documents::document::{ + BacklinkInfo as DomainBacklink, OutgoingLink as DomainOutgoingLink, +}; + +use crate::core::services::access::{self, Actor}; +use crate::core::services::errors::ServiceError; +use crate::documents::use_cases::get_backlinks::GetBacklinks; +use crate::documents::use_cases::get_outgoing_links::GetOutgoingLinks; + +use super::DocumentService; + +impl DocumentService { + pub async fn backlinks( + &self, + actor: &Actor, + workspace_id: Uuid, + doc_id: Uuid, + ) -> Result, ServiceError> { + access::require_view( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + .map_err(|err| match err { + ServiceError::Forbidden => ServiceError::NotFound, + other => other, + })?; + + let uc = GetBacklinks { + repo: self.linkgraph_repo.as_ref(), + }; + uc.execute(workspace_id, doc_id) + .await + .map_err(ServiceError::from) + } + + pub async fn outgoing_links( + &self, + actor: &Actor, + workspace_id: Uuid, + doc_id: Uuid, + ) -> Result, ServiceError> { + access::require_view( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + .map_err(|err| match err { + ServiceError::Forbidden => ServiceError::NotFound, + other => other, + })?; + + let uc = GetOutgoingLinks { + repo: self.linkgraph_repo.as_ref(), + }; + uc.execute(workspace_id, doc_id) + .await + .map_err(ServiceError::from) + } +} diff --git a/api/crates/application/src/documents/services/mod.rs b/api/crates/application/src/documents/services/mod.rs new file mode 100644 index 00000000..74d21ea7 --- /dev/null +++ b/api/crates/application/src/documents/services/mod.rs @@ -0,0 +1,493 @@ +use std::sync::Arc; + +use uuid::Uuid; + +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::core::services::errors::ServiceError; +use crate::documents::dtos::{DocumentDownload, DocumentDownloadFormat, DocumentListFilter}; +use crate::documents::ports::access_repository::AccessRepository; +use crate::documents::ports::doc_event_log::DocEventLog; +use crate::documents::ports::document_exporter::DocumentExporter; +use crate::documents::ports::document_repository::{DocMeta, DocumentRepository}; +use crate::documents::ports::files::files_repository::FilesRepository; +use crate::documents::ports::linkgraph_repository::LinkGraphRepository; +use crate::documents::ports::realtime::realtime_port::RealtimeEngine; +use crate::documents::ports::sharing::share_access_port::ShareAccessPort; +use crate::documents::ports::tx_runner::DocumentsTxRunner; +use crate::documents::services::realtime::snapshot::SnapshotService; +use async_trait::async_trait; +use domain::access::permissions::PermissionSet; +use domain::documents::doc_type::DocumentType; +use domain::documents::document::Document as DomainDocument; +use domain::documents::document::{ + BacklinkInfo as DomainBacklink, OutgoingLink as DomainOutgoingLink, SearchHit, +}; + +mod attachments; +mod content; +mod crud; +mod deletion; +mod downloads; +mod events; +pub mod files; +mod jobs; +mod lifecycle; +pub mod linkgraph; +mod links; +mod patch; +pub mod publishing; +pub mod realtime; +pub mod sharing; +mod snapshot_dto; +mod snapshots; +pub mod tagging; +mod util; + +pub use patch::DocumentPatchOperation; + +#[async_trait] +pub trait DocumentServiceFacade: Send + Sync { + async fn list_for_user( + &self, + workspace_id: Uuid, + query: Option, + tag: Option, + state: DocumentListFilter, + ) -> Result, ServiceError>; + + async fn search_for_user( + &self, + workspace_id: Uuid, + query: Option, + limit: i64, + ) -> Result, ServiceError>; + + #[allow(clippy::too_many_arguments)] + async fn create_for_user( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + title: &str, + parent_id: Option, + doc_type: DocumentType, + created_by_plugin: Option<&str>, + ) -> Result; + + async fn duplicate_document( + &self, + workspace_id: Uuid, + source_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + title: Option, + parent_id: Option>, + ) -> Result; + + async fn get_for_actor( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + ) -> Result; + + async fn delete_for_user( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Option, + permissions: &PermissionSet, + ) -> Result; + + async fn update_metadata( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + title: Option, + parent_id: Option>, + ) -> Result; + + async fn archive_document( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + ) -> Result; + + async fn unarchive_document( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + ) -> Result; + + async fn get_content( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + ) -> Result; + + async fn update_content( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + content: &str, + ) -> Result; + + async fn patch_content( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + operations: &[DocumentPatchOperation], + ) -> Result; + + async fn download_document( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + format: DocumentDownloadFormat, + ) -> Result; + + async fn download_workspace_root( + &self, + actor: &crate::core::services::access::Actor, + workspace_id: Uuid, + workspace_name: &str, + format: DocumentDownloadFormat, + ) -> Result; + + async fn list_snapshots( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + limit: i64, + offset: i64, + ) -> Result, ServiceError>; + + async fn snapshot_diff( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + snapshot_id: Uuid, + compare: Option, + base_mode: crate::documents::dtos::SnapshotDiffBaseMode, + ) -> Result; + + async fn restore_snapshot( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + snapshot_id: Uuid, + ) -> Result; + + async fn download_snapshot( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + snapshot_id: Uuid, + ) -> Result; + + async fn backlinks( + &self, + actor: &crate::core::services::access::Actor, + workspace_id: Uuid, + doc_id: Uuid, + ) -> Result, ServiceError>; + + async fn outgoing_links( + &self, + actor: &crate::core::services::access::Actor, + workspace_id: Uuid, + doc_id: Uuid, + ) -> Result, ServiceError>; +} + +#[async_trait] +impl DocumentServiceFacade for DocumentService { + async fn list_for_user( + &self, + workspace_id: Uuid, + query: Option, + tag: Option, + state: DocumentListFilter, + ) -> Result, ServiceError> { + self.list_for_user(workspace_id, query, tag, state).await + } + + async fn search_for_user( + &self, + workspace_id: Uuid, + query: Option, + limit: i64, + ) -> Result, ServiceError> { + self.search_for_user(workspace_id, query, limit).await + } + + async fn create_for_user( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + title: &str, + parent_id: Option, + doc_type: DocumentType, + created_by_plugin: Option<&str>, + ) -> Result { + self.create_for_user( + workspace_id, + actor_id, + permissions, + title, + parent_id, + doc_type, + created_by_plugin, + ) + .await + } + + async fn duplicate_document( + &self, + workspace_id: Uuid, + source_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + title: Option, + parent_id: Option>, + ) -> Result { + self.duplicate_document( + workspace_id, + source_id, + actor_id, + permissions, + title, + parent_id, + ) + .await + } + + async fn get_for_actor( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + ) -> Result { + self.get_for_actor(actor, doc_id).await + } + + async fn delete_for_user( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Option, + permissions: &PermissionSet, + ) -> Result { + self.delete_for_user(workspace_id, doc_id, actor_id, permissions) + .await + } + + async fn update_metadata( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + title: Option, + parent_id: Option>, + ) -> Result { + self.update_metadata( + workspace_id, + doc_id, + actor_id, + permissions, + title, + parent_id, + ) + .await + } + + async fn archive_document( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + ) -> Result { + self.archive_document(workspace_id, doc_id, actor_id, permissions) + .await + } + + async fn unarchive_document( + &self, + workspace_id: Uuid, + doc_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + ) -> Result { + self.unarchive_document(workspace_id, doc_id, actor_id, permissions) + .await + } + + async fn get_content( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + ) -> Result { + self.get_content(actor, doc_id).await + } + + async fn update_content( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + content: &str, + ) -> Result { + self.update_content(actor, doc_id, content).await + } + + async fn patch_content( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + operations: &[DocumentPatchOperation], + ) -> Result { + self.patch_content(actor, doc_id, operations).await + } + + async fn download_document( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + format: DocumentDownloadFormat, + ) -> Result { + self.download_document(actor, doc_id, format).await + } + + async fn download_workspace_root( + &self, + actor: &crate::core::services::access::Actor, + workspace_id: Uuid, + workspace_name: &str, + format: DocumentDownloadFormat, + ) -> Result { + self.download_workspace_root(actor, workspace_id, workspace_name, format) + .await + } + + async fn list_snapshots( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + limit: i64, + offset: i64, + ) -> Result, ServiceError> { + self.list_snapshots(actor, doc_id, limit, offset).await + } + + async fn snapshot_diff( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + snapshot_id: Uuid, + compare: Option, + base_mode: crate::documents::dtos::SnapshotDiffBaseMode, + ) -> Result { + self.snapshot_diff(actor, doc_id, snapshot_id, compare, base_mode) + .await + } + + async fn restore_snapshot( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + snapshot_id: Uuid, + ) -> Result { + self.restore_snapshot(actor, doc_id, snapshot_id).await + } + + async fn download_snapshot( + &self, + actor: &crate::core::services::access::Actor, + doc_id: Uuid, + snapshot_id: Uuid, + ) -> Result + { + self.download_snapshot(actor, doc_id, snapshot_id).await + } + + async fn backlinks( + &self, + actor: &crate::core::services::access::Actor, + workspace_id: Uuid, + doc_id: Uuid, + ) -> Result, ServiceError> { + self.backlinks(actor, workspace_id, doc_id).await + } + + async fn outgoing_links( + &self, + actor: &crate::core::services::access::Actor, + workspace_id: Uuid, + doc_id: Uuid, + ) -> Result, ServiceError> { + self.outgoing_links(actor, workspace_id, doc_id).await + } +} + +pub struct DocumentService { + tx_runner: Arc, + document_repo: Arc, + files_repo: Arc, + access_repo: Arc, + share_access: Arc, + linkgraph_repo: Arc, + storage: Arc, + events: Arc, + realtime: Arc, + snapshot_service: Arc, + exporter: Arc, +} + +impl DocumentService { + #[allow(clippy::too_many_arguments)] + pub fn new( + tx_runner: Arc, + document_repo: Arc, + files_repo: Arc, + access_repo: Arc, + share_access: Arc, + linkgraph_repo: Arc, + storage: Arc, + events: Arc, + realtime: Arc, + snapshot_service: Arc, + exporter: Arc, + ) -> Self { + Self { + tx_runner, + document_repo, + files_repo, + access_repo, + share_access, + linkgraph_repo, + storage, + events, + realtime, + snapshot_service, + exporter, + } + } + + async fn load_owner_meta( + &self, + workspace_id: Uuid, + doc_id: Uuid, + ) -> Result { + self.document_repo + .get_meta_for_owner(doc_id, workspace_id) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound) + } +} diff --git a/api/crates/application/src/documents/services/patch.rs b/api/crates/application/src/documents/services/patch.rs new file mode 100644 index 00000000..fc061adc --- /dev/null +++ b/api/crates/application/src/documents/services/patch.rs @@ -0,0 +1,62 @@ +use crate::core::services::errors::ServiceError; + +#[derive(Debug, Clone)] +pub enum DocumentPatchOperation { + Insert { + offset: usize, + text: String, + }, + Delete { + offset: usize, + length: usize, + }, + Replace { + offset: usize, + length: usize, + text: String, + }, +} + +pub(super) fn apply_patch_operations( + initial: &str, + operations: &[DocumentPatchOperation], +) -> Result { + let mut chars: Vec = initial.chars().collect(); + for operation in operations { + match operation { + DocumentPatchOperation::Insert { offset, text } => { + splice_chars(&mut chars, *offset, 0, text)?; + } + DocumentPatchOperation::Delete { offset, length } => { + splice_chars(&mut chars, *offset, *length, "")?; + } + DocumentPatchOperation::Replace { + offset, + length, + text, + } => { + splice_chars(&mut chars, *offset, *length, text)?; + } + } + } + Ok(chars.into_iter().collect()) +} + +fn splice_chars( + chars: &mut Vec, + offset: usize, + length: usize, + replacement: &str, +) -> Result<(), ServiceError> { + if offset > chars.len() { + return Err(ServiceError::BadRequest("patch_offset_out_of_bounds")); + } + let end = offset + .checked_add(length) + .ok_or(ServiceError::BadRequest("patch_length_overflow"))?; + if end > chars.len() { + return Err(ServiceError::BadRequest("patch_range_out_of_bounds")); + } + chars.splice(offset..end, replacement.chars()); + Ok(()) +} diff --git a/api/crates/application/src/documents/services/publishing/mod.rs b/api/crates/application/src/documents/services/publishing/mod.rs new file mode 100644 index 00000000..6aeae1f2 --- /dev/null +++ b/api/crates/application/src/documents/services/publishing/mod.rs @@ -0,0 +1,228 @@ +use std::sync::Arc; + +use uuid::Uuid; + +use crate::core::services::errors::ServiceError; +use crate::documents::dtos::PublicDocumentSummaryDto; +use crate::documents::ports::publishing::public_repository::PublicRepository; +use crate::documents::ports::realtime::realtime_port::RealtimeEngine; +use crate::documents::use_cases::publishing::get_public::GetPublicByWorkspaceAndId; +use crate::documents::use_cases::publishing::get_status::{GetPublishStatus, PublishStatusDto}; +use crate::documents::use_cases::publishing::list_workspace::ListWorkspacePublic; +use crate::documents::use_cases::publishing::publish::{PublishDocument, PublishResponseDto}; +use crate::documents::use_cases::publishing::unpublish::UnpublishDocument; +use async_trait::async_trait; +use domain::access::permissions::PermissionSet; +use domain::documents::document::Document; +use domain::documents::public_policy; + +pub struct PublicService { + repo: Arc, + realtime: Arc, +} + +#[async_trait] +pub trait PublicServiceFacade: Send + Sync { + async fn publish_document( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result; + + async fn unpublish_document( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result; + + async fn get_publish_status( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result; + + async fn list_workspace_public_documents( + &self, + workspace_slug: &str, + ) -> Result, ServiceError>; + + async fn get_public_by_workspace_and_id( + &self, + workspace_slug: &str, + doc_id: Uuid, + ) -> Result; + + async fn get_public_content_by_workspace_and_id( + &self, + workspace_slug: &str, + doc_id: Uuid, + ) -> Result; +} + +#[async_trait] +impl PublicServiceFacade for PublicService { + async fn publish_document( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result { + self.publish_document(workspace_id, permissions, doc_id) + .await + } + + async fn unpublish_document( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result { + self.unpublish_document(workspace_id, permissions, doc_id) + .await + } + + async fn get_publish_status( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result { + self.get_publish_status(workspace_id, permissions, doc_id) + .await + } + + async fn list_workspace_public_documents( + &self, + workspace_slug: &str, + ) -> Result, ServiceError> { + self.list_workspace_public_documents(workspace_slug).await + } + + async fn get_public_by_workspace_and_id( + &self, + workspace_slug: &str, + doc_id: Uuid, + ) -> Result { + self.get_public_by_workspace_and_id(workspace_slug, doc_id) + .await + } + + async fn get_public_content_by_workspace_and_id( + &self, + workspace_slug: &str, + doc_id: Uuid, + ) -> Result { + self.get_public_content_by_workspace_and_id(workspace_slug, doc_id) + .await + } +} + +impl PublicService { + pub fn new(repo: Arc, realtime: Arc) -> Self { + Self { repo, realtime } + } + + pub async fn publish_document( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result { + public_policy::ensure_public_publish_allowed(permissions) + .map_err(|_| ServiceError::Forbidden)?; + let uc = PublishDocument { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_id, doc_id) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound) + } + + pub async fn unpublish_document( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result { + public_policy::ensure_public_unpublish_allowed(permissions) + .map_err(|_| ServiceError::Forbidden)?; + let uc = UnpublishDocument { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_id, doc_id) + .await + .map_err(ServiceError::from) + } + + pub async fn get_publish_status( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result { + public_policy::ensure_public_publish_allowed(permissions) + .map_err(|_| ServiceError::Forbidden)?; + let uc = GetPublishStatus { + repo: self.repo.as_ref(), + }; + let status: PublishStatusDto = uc + .execute(workspace_id, doc_id) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound)?; + Ok(PublishResponseDto { + slug: status.slug, + public_url: status.public_url, + }) + } + + pub async fn list_workspace_public_documents( + &self, + workspace_slug: &str, + ) -> Result, ServiceError> { + let uc = ListWorkspacePublic { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_slug).await.map_err(ServiceError::from) + } + + pub async fn get_public_by_workspace_and_id( + &self, + workspace_slug: &str, + doc_id: Uuid, + ) -> Result { + let uc = GetPublicByWorkspaceAndId { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_slug, doc_id) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound) + } + + pub async fn get_public_content_by_workspace_and_id( + &self, + workspace_slug: &str, + doc_id: Uuid, + ) -> Result { + let exists = self + .repo + .public_exists_by_workspace_and_id(workspace_slug, doc_id) + .await + .map_err(ServiceError::from)?; + if !exists { + return Err(ServiceError::NotFound); + } + let content = self + .realtime + .get_content(&doc_id.to_string()) + .await + .map_err(ServiceError::from)? + .unwrap_or_default(); + Ok(content) + } +} diff --git a/api/src/application/services/realtime/doc_hydration.rs b/api/crates/application/src/documents/services/realtime/doc_hydration.rs similarity index 74% rename from api/src/application/services/realtime/doc_hydration.rs rename to api/crates/application/src/documents/services/realtime/doc_hydration.rs index 84f32855..51b459be 100644 --- a/api/src/application/services/realtime/doc_hydration.rs +++ b/api/crates/application/src/documents/services/realtime/doc_hydration.rs @@ -6,8 +6,10 @@ use yrs::sync::{Message, MessageReader, SyncMessage}; use yrs::updates::decoder::{Decode, DecoderV1}; use yrs::{Doc, Transact, Update}; -use crate::application::ports::realtime_hydration_port::{DocStateReader, RealtimeBacklogReader}; -use crate::application::ports::storage_port::StorageResolverPort; +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::documents::ports::realtime::realtime_hydration_port::{ + DocStateReader, RealtimeBacklogReader, +}; pub struct DocHydrationService { state_reader: Arc, @@ -105,19 +107,16 @@ impl DocHydrationService { let is_empty = yrs::Text::len(&txt, &txn) == 0; drop(txn); - if is_empty { - if let Some(record) = self.state_reader.document_record(doc_id).await? { - if let Some(path) = record.path { - let absolute = self.storage.absolute_from_relative(&path); - if let Ok(bytes) = self.storage.read_bytes(absolute.as_path()).await { - if let Ok(content) = String::from_utf8(bytes) { - let body = strip_frontmatter(&content); - let mut txn = doc.transact_mut(); - yrs::Text::insert(&txt, &mut txn, 0, body); - } - } - } - } + if is_empty + && let Some(record) = self.state_reader.document_record(doc_id).await? + && let Some(path) = record.path + && let absolute = self.storage.absolute_from_relative(&path) + && let Ok(bytes) = self.storage.read_bytes(absolute.as_path()).await + && let Ok(content) = String::from_utf8(bytes) + { + let body = strip_frontmatter(&content); + let mut txn = doc.transact_mut(); + yrs::Text::insert(&txt, &mut txn, 0, body); } } @@ -148,9 +147,9 @@ fn apply_update_bytes(doc: &Doc, bytes: &[u8]) -> anyhow::Result<()> { fn extract_updates(frame: &[u8]) -> anyhow::Result> { let mut decoder = DecoderV1::new(Cursor::new(frame)); - let mut reader = MessageReader::new(&mut decoder); + let reader = MessageReader::new(&mut decoder); let mut updates = Vec::new(); - while let Some(message) = reader.next() { + for message in reader { match message? { Message::Sync(SyncMessage::Update(bin)) | Message::Sync(SyncMessage::SyncStep2(bin)) => { @@ -164,20 +163,15 @@ fn extract_updates(frame: &[u8]) -> anyhow::Result> { } fn strip_frontmatter(content: &str) -> &str { - if content.starts_with("---\n") { - if let Some(idx) = content[4..].find("\n---\n") { - let start = 4 + idx + 5; - let mut body = &content[start..]; - if let Some(stripped) = body.strip_prefix("\r\n") { - body = stripped; - } else if let Some(stripped) = body.strip_prefix('\n') { - body = stripped; - } - body - } else { - content - } - } else { - content - } + let Some(after_open) = content.strip_prefix("---\n") else { + return content; + }; + let Some(idx) = after_open.find("\n---\n") else { + return content; + }; + let start = idx + "\n---\n".len(); + let body = &after_open[start..]; + body.strip_prefix("\r\n") + .or_else(|| body.strip_prefix('\n')) + .unwrap_or(body) } diff --git a/api/src/application/services/realtime/mod.rs b/api/crates/application/src/documents/services/realtime/mod.rs similarity index 100% rename from api/src/application/services/realtime/mod.rs rename to api/crates/application/src/documents/services/realtime/mod.rs diff --git a/api/src/application/services/realtime/snapshot.rs b/api/crates/application/src/documents/services/realtime/snapshot.rs similarity index 82% rename from api/src/application/services/realtime/snapshot.rs rename to api/crates/application/src/documents/services/realtime/snapshot.rs index 7a5db483..b0dd7b18 100644 --- a/api/src/application/services/realtime/snapshot.rs +++ b/api/crates/application/src/documents/services/realtime/snapshot.rs @@ -6,19 +6,21 @@ use uuid::Uuid; use yrs::updates::decoder::Decode; use yrs::{Doc, GetString, ReadTxn, StateVector, Text, Transact, Update}; -use crate::application::linkgraph; -use crate::application::ports::document_snapshot_archive_repository::{ - DocumentSnapshotArchiveRepository, SnapshotArchiveInsert, SnapshotArchiveRecord, -}; -use crate::application::ports::linkgraph_repository::LinkGraphRepository; -use crate::application::ports::realtime_hydration_port::DocStateReader; -use crate::application::ports::realtime_persistence_port::DocPersistencePort; -use crate::application::ports::storage_projection_queue::{ +use crate::core::ports::storage::storage_projection_queue::{ StorageProjectionJobKind, StorageProjectionQueue, }; -use crate::application::ports::tagging_repository::TaggingRepository; -use crate::application::services::tagging; -use crate::application::utils::hash::sha256_hex; +use crate::core::services::tagging; +use crate::core::services::utils::hash::sha256_hex; +use crate::documents::ports::document_snapshot_archive_repository::{ + DocumentSnapshotArchiveRepository, SnapshotArchiveInsert, SnapshotArchiveRecord, +}; +use crate::documents::ports::linkgraph_repository::LinkGraphRepository; +use crate::documents::ports::realtime::realtime_hydration_port::DocStateReader; +use crate::documents::ports::realtime::realtime_persistence_port::DocPersistencePort; +use crate::documents::ports::realtime::realtime_persistence_port::SnapshotEntry; +use crate::documents::ports::tagging::tagging_repository::TaggingRepository; +use crate::documents::services::linkgraph; +use domain::documents::doc_type::DocumentType; pub struct SnapshotService { state_reader: Arc, @@ -29,6 +31,7 @@ pub struct SnapshotService { storage_jobs: Arc, } +#[derive(Default)] pub struct SnapshotPersistOptions { pub clear_updates: bool, pub skip_if_unchanged: bool, @@ -36,17 +39,6 @@ pub struct SnapshotPersistOptions { pub prune_updates_before: Option, } -impl Default for SnapshotPersistOptions { - fn default() -> Self { - Self { - clear_updates: false, - skip_if_unchanged: false, - prune_snapshots: None, - prune_updates_before: None, - } - } -} - pub struct SnapshotPersistResult { pub version: i64, pub snapshot_bytes: Vec, @@ -137,7 +129,7 @@ impl SnapshotService { let snapshot_bin = encode_doc_snapshot(doc); let (current_version, previous_snapshot) = if options.skip_if_unchanged { match self.persistence.latest_snapshot_entry(doc_id).await? { - Some((version, bytes)) => (version, Some(bytes)), + Some(SnapshotEntry { version, bytes }) => (version, Some(bytes)), None => (0, None), } } else { @@ -150,27 +142,26 @@ impl SnapshotService { ) }; - if options.skip_if_unchanged { - if let Some(prev) = previous_snapshot.as_ref() { - if prev.as_slice() == snapshot_bin.as_slice() { - if options.clear_updates { - self.persistence.clear_updates(doc_id).await?; - } - if let Some(keep) = options.prune_snapshots { - self.persistence.prune_snapshots(doc_id, keep).await?; - } - if let Some(cutoff) = options.prune_updates_before { - self.persistence - .prune_updates_before(doc_id, cutoff) - .await?; - } - return Ok(SnapshotPersistResult { - version: current_version, - snapshot_bytes: snapshot_bin, - persisted: false, - }); - } + if options.skip_if_unchanged + && let Some(prev) = previous_snapshot.as_ref() + && prev.as_slice() == snapshot_bin.as_slice() + { + if options.clear_updates { + self.persistence.clear_updates(doc_id).await?; + } + if let Some(keep) = options.prune_snapshots { + self.persistence.prune_snapshots(doc_id, keep).await?; } + if let Some(cutoff) = options.prune_updates_before { + self.persistence + .prune_updates_before(doc_id, cutoff) + .await?; + } + return Ok(SnapshotPersistResult { + version: current_version, + snapshot_bytes: snapshot_bin, + persisted: false, + }); } let next_version = current_version + 1; self.persistence @@ -203,7 +194,7 @@ impl SnapshotService { Some(r) => r, None => return Ok(MarkdownPersistResult { written: false }), }; - if record.doc_type == "folder" { + if record.doc_type == DocumentType::Folder { return Ok(MarkdownPersistResult { written: false }); } let contents = extract_markdown(doc); @@ -245,7 +236,7 @@ impl SnapshotService { let Some(record) = self.state_reader.document_record(doc_id).await? else { return Ok(None); }; - if record.doc_type == "folder" { + if record.doc_type == DocumentType::Folder { return Ok(None); } let doc = self.hydrate_doc_from_state(doc_id).await?; @@ -297,18 +288,19 @@ impl SnapshotService { self.archive_repo .list_for_document(doc_id, limit, offset) .await + .map_err(Into::into) } pub async fn load_archive_doc( &self, archive_id: Uuid, ) -> anyhow::Result> { - let Some((record, bytes)) = self.archive_repo.get_by_id(archive_id).await? else { + let Some(entry) = self.archive_repo.get_by_id(archive_id).await? else { return Ok(None); }; let doc = Doc::new(); - apply_update_bytes(&doc, &bytes)?; - Ok(Some((record, doc))) + apply_update_bytes(&doc, &entry.bytes)?; + Ok(Some((entry.record, doc))) } pub async fn load_archive_markdown( @@ -327,11 +319,11 @@ impl SnapshotService { doc_id: Uuid, version: i64, ) -> anyhow::Result> { - if let Some((record, bytes)) = self.archive_repo.latest_before(doc_id, version).await? { + if let Some(entry) = self.archive_repo.latest_before(doc_id, version).await? { let doc = Doc::new(); - apply_update_bytes(&doc, &bytes)?; + apply_update_bytes(&doc, &entry.bytes)?; let markdown = extract_markdown(&doc); - return Ok(Some((record, markdown))); + return Ok(Some((entry.record, markdown))); } Ok(None) } @@ -340,8 +332,7 @@ impl SnapshotService { fn extract_markdown(doc: &Doc) -> String { let txt = doc.get_or_insert_text("content"); let txn = doc.transact(); - let contents = txt.get_string(&txn); - contents + txt.get_string(&txn) } fn render_markdown_bytes(doc_id: &Uuid, title: &str, contents: &str) -> Vec { @@ -402,7 +393,7 @@ impl SnapshotService { } fn repo_path_from_record( - record: &crate::application::ports::realtime_hydration_port::DocumentRecord, + record: &crate::documents::ports::realtime::realtime_hydration_port::DocumentRecord, ) -> Option { if let Some(path) = record.desired_path.as_deref() { return Some(normalize_repo_path(path)); diff --git a/api/crates/application/src/documents/services/sharing/browse.rs b/api/crates/application/src/documents/services/sharing/browse.rs new file mode 100644 index 00000000..2368ed07 --- /dev/null +++ b/api/crates/application/src/documents/services/sharing/browse.rs @@ -0,0 +1,57 @@ +use domain::documents::share; + +use crate::core::services::errors::ServiceError; +use crate::documents::dtos::{ShareBrowseResponseDto, ShareDocumentDto}; +use crate::documents::use_cases::sharing::browse_share::BrowseShare; +use crate::documents::use_cases::sharing::validate_share::ValidateShare; + +use super::{ShareDocumentMeta, ShareService}; + +impl ShareService { + pub async fn validate_token( + &self, + token: &str, + ) -> Result, ServiceError> { + let uc = ValidateShare { + repo: self.repo.as_ref(), + }; + uc.execute(token).await.map_err(ServiceError::from) + } + + pub async fn resolve_share_context( + &self, + token: &str, + ) -> Result, ServiceError> { + self.repo + .resolve_share_by_token(token) + .await + .map_err(ServiceError::from) + } + + pub async fn browse_share( + &self, + token: &str, + ) -> Result, ServiceError> { + let uc = BrowseShare { + repo: self.repo.as_ref(), + }; + uc.execute(token).await.map_err(ServiceError::from) + } + + pub async fn share_document_meta( + &self, + token: &str, + ) -> Result, ServiceError> { + let meta = self + .repo + .get_share_document_meta(token) + .await + .map_err(ServiceError::from)? + .map(|m| ShareDocumentMeta { + document_id: m.document_id, + owner_id: m.owner_id, + workspace_id: m.workspace_id, + }); + Ok(meta) + } +} diff --git a/api/crates/application/src/documents/services/sharing/crud.rs b/api/crates/application/src/documents/services/sharing/crud.rs new file mode 100644 index 00000000..0c9ae731 --- /dev/null +++ b/api/crates/application/src/documents/services/sharing/crud.rs @@ -0,0 +1,102 @@ +use uuid::Uuid; + +use domain::access::permissions::PermissionSet; +use domain::documents::share; + +use crate::core::services::errors::ServiceError; +use crate::documents::dtos::{ + ActiveShareItemDto, ApplicableShareDto, CreatedShareDto, ShareItemDto, +}; +use crate::documents::use_cases::sharing::create_share::CreateShare; +use crate::documents::use_cases::sharing::delete_share::DeleteShare; +use crate::documents::use_cases::sharing::list_active::ListActiveShares; +use crate::documents::use_cases::sharing::list_applicable::ListApplicableShares; +use crate::documents::use_cases::sharing::list_document_shares::ListDocumentShares; + +use super::ShareService; + +use super::guards::{ensure_share_create_permission, ensure_share_delete_permission}; + +impl ShareService { + pub async fn create_share( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + document_id: Uuid, + permission: &str, + expires_at: Option>, + ) -> Result { + ensure_share_create_permission(permissions)?; + let permission = share::SharePermission::parse(permission) + .ok_or(ServiceError::BadRequest("invalid_share_permission"))?; + let uc = CreateShare { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_id, actor_id, document_id, permission, expires_at) + .await + .map(|res| CreatedShareDto { + token: res.token, + document_id: res.document_id, + document_type: res.document_type.as_str().to_string(), + }) + .map_err(ServiceError::from) + } + + pub async fn list_document_shares( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + document_id: Uuid, + ) -> Result, ServiceError> { + ensure_share_create_permission(permissions)?; + let uc = ListDocumentShares { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_id, document_id) + .await + .map_err(ServiceError::from) + } + + pub async fn delete_share( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + token: &str, + ) -> Result { + ensure_share_delete_permission(permissions)?; + let uc = DeleteShare { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_id, token) + .await + .map_err(ServiceError::from) + } + + pub async fn list_applicable( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result, ServiceError> { + ensure_share_create_permission(permissions)?; + let uc = ListApplicableShares { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_id, doc_id) + .await + .map_err(ServiceError::from) + } + + pub async fn list_active( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + ) -> Result, ServiceError> { + ensure_share_create_permission(permissions)?; + let uc = ListActiveShares { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_id).await.map_err(ServiceError::from) + } +} diff --git a/api/crates/application/src/documents/services/sharing/guards.rs b/api/crates/application/src/documents/services/sharing/guards.rs new file mode 100644 index 00000000..f5455a0f --- /dev/null +++ b/api/crates/application/src/documents/services/sharing/guards.rs @@ -0,0 +1,20 @@ +use domain::access::permissions::PermissionSet; +use domain::documents::sharing_policy; + +use crate::core::services::errors::ServiceError; + +pub(super) fn ensure_share_create_permission( + permissions: &PermissionSet, +) -> Result<(), ServiceError> { + sharing_policy::ensure_share_create_allowed(permissions).map_err(|_| ServiceError::Forbidden) +} + +pub(super) fn ensure_share_delete_permission( + permissions: &PermissionSet, +) -> Result<(), ServiceError> { + sharing_policy::ensure_share_delete_allowed(permissions).map_err(|_| ServiceError::Forbidden) +} + +pub(super) fn ensure_doc_view_permission(permissions: &PermissionSet) -> Result<(), ServiceError> { + sharing_policy::ensure_document_view_allowed(permissions).map_err(|_| ServiceError::Forbidden) +} diff --git a/api/crates/application/src/documents/services/sharing/materialize.rs b/api/crates/application/src/documents/services/sharing/materialize.rs new file mode 100644 index 00000000..2f55996b --- /dev/null +++ b/api/crates/application/src/documents/services/sharing/materialize.rs @@ -0,0 +1,29 @@ +use uuid::Uuid; + +use domain::access::permissions::PermissionSet; + +use crate::core::services::errors::ServiceError; + +use super::ShareService; +use super::guards::ensure_share_create_permission; + +impl ShareService { + pub async fn materialize_folder_share( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + token: &str, + ) -> Result { + ensure_share_create_permission(permissions)?; + self.repo + .materialize_folder_share(workspace_id, actor_id, token) + .await + .map_err(|err| match err.to_string().as_str() { + "not_found" => ServiceError::NotFound, + "forbidden" => ServiceError::Forbidden, + "bad_request" => ServiceError::BadRequest("invalid_share_scope"), + _ => ServiceError::Unexpected(err.into()), + }) + } +} diff --git a/api/crates/application/src/documents/services/sharing/mod.rs b/api/crates/application/src/documents/services/sharing/mod.rs new file mode 100644 index 00000000..c0566e06 --- /dev/null +++ b/api/crates/application/src/documents/services/sharing/mod.rs @@ -0,0 +1,248 @@ +use std::sync::Arc; + +use uuid::Uuid; + +use crate::core::services::errors::ServiceError; +use crate::documents::dtos::{ + ActiveShareItemDto, ApplicableShareDto, CreatedShareDto, ShareBrowseResponseDto, + ShareDocumentDto, ShareItemDto, ShareMountDto, +}; +use crate::documents::ports::sharing::shares_repository::SharesRepository; +use async_trait::async_trait; +use domain::access::permissions::PermissionSet; +use domain::documents::share; + +mod browse; +mod crud; +mod guards; +mod materialize; +mod mounts; + +pub struct ShareService { + repo: Arc, +} + +pub struct ShareDocumentMeta { + pub document_id: Uuid, + pub owner_id: Uuid, + pub workspace_id: Uuid, +} + +#[async_trait] +pub trait ShareServiceFacade: Send + Sync { + async fn create_share( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + document_id: Uuid, + permission: &str, + expires_at: Option>, + ) -> Result; + + async fn list_document_shares( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + document_id: Uuid, + ) -> Result, ServiceError>; + + async fn delete_share( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + token: &str, + ) -> Result; + + async fn list_applicable( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result, ServiceError>; + + async fn list_active( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + ) -> Result, ServiceError>; + + async fn validate_token(&self, token: &str) -> Result, ServiceError>; + + async fn resolve_share_context( + &self, + token: &str, + ) -> Result, ServiceError>; + + async fn browse_share( + &self, + token: &str, + ) -> Result, ServiceError>; + + async fn share_document_meta( + &self, + token: &str, + ) -> Result, ServiceError>; + + async fn save_share_mount( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + token: &str, + parent_folder_id: Option, + ) -> Result; + + async fn list_share_mounts( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + ) -> Result, ServiceError>; + + async fn delete_share_mount( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + mount_id: Uuid, + ) -> Result; + + async fn materialize_folder_share( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + token: &str, + ) -> Result; +} + +#[async_trait] +impl ShareServiceFacade for ShareService { + async fn create_share( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + document_id: Uuid, + permission: &str, + expires_at: Option>, + ) -> Result { + self.create_share( + workspace_id, + actor_id, + permissions, + document_id, + permission, + expires_at, + ) + .await + } + + async fn list_document_shares( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + document_id: Uuid, + ) -> Result, ServiceError> { + self.list_document_shares(workspace_id, permissions, document_id) + .await + } + + async fn delete_share( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + token: &str, + ) -> Result { + self.delete_share(workspace_id, permissions, token).await + } + + async fn list_applicable( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + doc_id: Uuid, + ) -> Result, ServiceError> { + self.list_applicable(workspace_id, permissions, doc_id) + .await + } + + async fn list_active( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + ) -> Result, ServiceError> { + self.list_active(workspace_id, permissions).await + } + + async fn validate_token(&self, token: &str) -> Result, ServiceError> { + self.validate_token(token).await + } + + async fn resolve_share_context( + &self, + token: &str, + ) -> Result, ServiceError> { + self.resolve_share_context(token).await + } + + async fn browse_share( + &self, + token: &str, + ) -> Result, ServiceError> { + self.browse_share(token).await + } + + async fn share_document_meta( + &self, + token: &str, + ) -> Result, ServiceError> { + self.share_document_meta(token).await + } + + async fn save_share_mount( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + token: &str, + parent_folder_id: Option, + ) -> Result { + self.save_share_mount(workspace_id, actor_id, permissions, token, parent_folder_id) + .await + } + + async fn list_share_mounts( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + ) -> Result, ServiceError> { + self.list_share_mounts(workspace_id, permissions).await + } + + async fn delete_share_mount( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + mount_id: Uuid, + ) -> Result { + self.delete_share_mount(workspace_id, permissions, mount_id) + .await + } + + async fn materialize_folder_share( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + token: &str, + ) -> Result { + self.materialize_folder_share(workspace_id, actor_id, permissions, token) + .await + } +} + +impl ShareService { + pub fn new(repo: Arc) -> Self { + Self { repo } + } +} diff --git a/api/crates/application/src/documents/services/sharing/mounts.rs b/api/crates/application/src/documents/services/sharing/mounts.rs new file mode 100644 index 00000000..cbb66367 --- /dev/null +++ b/api/crates/application/src/documents/services/sharing/mounts.rs @@ -0,0 +1,106 @@ +use uuid::Uuid; + +use domain::access::permissions::PermissionSet; +use domain::documents::share; +use domain::documents::title::Title; + +use crate::core::services::errors::ServiceError; +use crate::documents::dtos::ShareMountDto; + +use super::ShareService; +use super::guards::ensure_doc_view_permission; + +impl ShareService { + pub async fn save_share_mount( + &self, + workspace_id: Uuid, + actor_id: Uuid, + permissions: &PermissionSet, + token: &str, + parent_folder_id: Option, + ) -> Result { + ensure_doc_view_permission(permissions)?; + let resolved = self + .repo + .resolve_share_by_token(token) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound)?; + if share::is_expired(resolved.expires_at.as_ref(), chrono::Utc::now()) { + return Err(ServiceError::NotFound); + } + let target_title = self + .repo + .validate_share_token(token) + .await + .map_err(ServiceError::from)? + .map(|doc| doc.title) + .unwrap_or_else(|| Title::new("Shared document")); + let row = self + .repo + .create_share_mount( + workspace_id, + actor_id, + token, + resolved.shared_id, + resolved.shared_type, + target_title, + resolved.permission, + parent_folder_id, + ) + .await + .map_err(|err| match err.to_string().as_str() { + "invalid_parent" => ServiceError::BadRequest("invalid_parent"), + _ => ServiceError::Unexpected(err.into()), + })?; + Ok(ShareMountDto { + id: row.id, + token: row.token, + target_document_id: row.target_document_id, + target_document_type: row.target_document_type.as_str().to_string(), + target_title: row.target_title.into_string(), + permission: row.permission.as_str().to_string(), + parent_folder_id: row.parent_folder_id, + created_at: row.created_at, + }) + } + + pub async fn list_share_mounts( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + ) -> Result, ServiceError> { + ensure_doc_view_permission(permissions)?; + let rows = self + .repo + .list_share_mounts(workspace_id) + .await + .map_err(ServiceError::from)?; + Ok(rows + .into_iter() + .map(|row| ShareMountDto { + id: row.id, + token: row.token, + target_document_id: row.target_document_id, + target_document_type: row.target_document_type.as_str().to_string(), + target_title: row.target_title.into_string(), + permission: row.permission.as_str().to_string(), + parent_folder_id: row.parent_folder_id, + created_at: row.created_at, + }) + .collect()) + } + + pub async fn delete_share_mount( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + mount_id: Uuid, + ) -> Result { + ensure_doc_view_permission(permissions)?; + self.repo + .delete_share_mount(workspace_id, mount_id) + .await + .map_err(ServiceError::from) + } +} diff --git a/api/crates/application/src/documents/services/snapshot_dto.rs b/api/crates/application/src/documents/services/snapshot_dto.rs new file mode 100644 index 00000000..555a8d53 --- /dev/null +++ b/api/crates/application/src/documents/services/snapshot_dto.rs @@ -0,0 +1,20 @@ +use crate::documents::dtos::{SnapshotDiffDto, SnapshotDiffSideDto, SnapshotSummaryDto}; +use crate::documents::use_cases::snapshot_diff::{SnapshotDiffResult, SnapshotDiffSide}; + +pub(super) fn snapshot_diff_dto_from_result(result: SnapshotDiffResult) -> SnapshotDiffDto { + SnapshotDiffDto { + base: snapshot_diff_side_from_use_case(result.base), + target: snapshot_diff_side_from_use_case(result.target), + diff: result.diff, + } +} + +fn snapshot_diff_side_from_use_case(side: SnapshotDiffSide) -> SnapshotDiffSideDto { + match side { + SnapshotDiffSide::Current { markdown } => SnapshotDiffSideDto::Current { markdown }, + SnapshotDiffSide::Snapshot { record, markdown } => SnapshotDiffSideDto::Snapshot { + snapshot: SnapshotSummaryDto::from(record), + markdown, + }, + } +} diff --git a/api/crates/application/src/documents/services/snapshots.rs b/api/crates/application/src/documents/services/snapshots.rs new file mode 100644 index 00000000..72364519 --- /dev/null +++ b/api/crates/application/src/documents/services/snapshots.rs @@ -0,0 +1,141 @@ +use uuid::Uuid; + +use crate::core::services::access::{self, Actor}; +use crate::core::services::errors::ServiceError; +use crate::documents::dtos::{SnapshotDiffBaseMode, SnapshotDiffDto, SnapshotSummaryDto}; +use crate::documents::use_cases::list_snapshots::ListSnapshots; +use crate::documents::use_cases::restore_snapshot::RestoreSnapshot; +use crate::documents::use_cases::snapshot_diff::SnapshotDiff; +use crate::documents::use_cases::snapshot_download::{DownloadSnapshot, SnapshotDownload}; + +use super::DocumentService; +use super::snapshot_dto::snapshot_diff_dto_from_result; + +impl DocumentService { + pub async fn list_snapshots( + &self, + actor: &Actor, + doc_id: Uuid, + limit: i64, + offset: i64, + ) -> Result, ServiceError> { + access::require_view( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + .map_err(|err| match err { + ServiceError::Forbidden => ServiceError::Unauthorized, + other => other, + })?; + + let uc = ListSnapshots { + snapshots: self.snapshot_service.as_ref(), + }; + let records = uc + .execute(doc_id, limit, offset) + .await + .map_err(ServiceError::from)?; + Ok(records.into_iter().map(SnapshotSummaryDto::from).collect()) + } + + pub async fn snapshot_diff( + &self, + actor: &Actor, + doc_id: Uuid, + snapshot_id: Uuid, + compare: Option, + base_mode: SnapshotDiffBaseMode, + ) -> Result { + access::require_view( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + .map_err(|err| match err { + ServiceError::Forbidden => ServiceError::Unauthorized, + other => other, + })?; + + let uc = SnapshotDiff { + snapshots: self.snapshot_service.as_ref(), + realtime: self.realtime.as_ref(), + }; + let result = uc + .execute(doc_id, snapshot_id, compare, base_mode) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound)?; + + Ok(snapshot_diff_dto_from_result(result)) + } + + pub async fn restore_snapshot( + &self, + actor: &Actor, + doc_id: Uuid, + snapshot_id: Uuid, + ) -> Result { + access::require_edit( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + .map_err(|err| match err { + ServiceError::Forbidden => ServiceError::Unauthorized, + other => other, + })?; + + let created_by = match actor { + Actor::User(uid) => Some(*uid), + _ => None, + }; + + let uc = RestoreSnapshot { + snapshots: self.snapshot_service.as_ref(), + realtime: self.realtime.as_ref(), + }; + let record = uc + .execute(doc_id, snapshot_id, created_by) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound)?; + + Ok(SnapshotSummaryDto::from(record)) + } + + pub async fn download_snapshot( + &self, + actor: &Actor, + doc_id: Uuid, + snapshot_id: Uuid, + ) -> Result { + access::require_view( + self.access_repo.as_ref(), + self.share_access.as_ref(), + actor, + doc_id, + ) + .await + .map_err(|err| match err { + ServiceError::Forbidden => ServiceError::Unauthorized, + other => other, + })?; + + let uc = DownloadSnapshot { + files: self.files_repo.as_ref(), + storage: self.storage.as_ref(), + snapshots: self.snapshot_service.as_ref(), + }; + uc.execute(doc_id, snapshot_id) + .await + .map_err(ServiceError::from)? + .ok_or(ServiceError::NotFound) + } +} diff --git a/api/crates/application/src/documents/services/tagging/mod.rs b/api/crates/application/src/documents/services/tagging/mod.rs new file mode 100644 index 00000000..9ac28a88 --- /dev/null +++ b/api/crates/application/src/documents/services/tagging/mod.rs @@ -0,0 +1,52 @@ +use std::sync::Arc; + +use uuid::Uuid; + +use crate::core::services::errors::ServiceError; +use crate::documents::dtos::TagItemDto; +use crate::documents::ports::tagging::tag_repository::TagRepository; +use crate::documents::use_cases::tagging::list_tags::ListTags; +use async_trait::async_trait; + +pub struct TagService { + repo: Arc, +} + +#[async_trait] +pub trait TagServiceFacade: Send + Sync { + async fn list( + &self, + workspace_id: Uuid, + filter: Option, + ) -> Result, ServiceError>; +} + +#[async_trait] +impl TagServiceFacade for TagService { + async fn list( + &self, + workspace_id: Uuid, + filter: Option, + ) -> Result, ServiceError> { + self.list(workspace_id, filter).await + } +} + +impl TagService { + pub fn new(repo: Arc) -> Self { + Self { repo } + } + + pub async fn list( + &self, + workspace_id: Uuid, + filter: Option, + ) -> Result, ServiceError> { + let uc = ListTags { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_id, filter) + .await + .map_err(ServiceError::from) + } +} diff --git a/api/crates/application/src/documents/services/util.rs b/api/crates/application/src/documents/services/util.rs new file mode 100644 index 00000000..8bd6f27e --- /dev/null +++ b/api/crates/application/src/documents/services/util.rs @@ -0,0 +1,41 @@ +use crate::core::services::errors::ServiceError; +use crate::documents::dtos::DocumentListFilter; +use crate::documents::ports::document_repository::DocumentListState; +use crate::documents::ports::document_repository::DocumentRepositoryError; +use domain::documents::hierarchy; +use domain::documents::policy::DocumentPolicyError; + +pub(super) fn to_repo_state(filter: DocumentListFilter) -> DocumentListState { + match filter { + DocumentListFilter::Active => DocumentListState::Active, + DocumentListFilter::Archived => DocumentListState::Archived, + DocumentListFilter::All => DocumentListState::All, + } +} + +pub(super) fn map_policy_error(err: DocumentPolicyError) -> ServiceError { + match err { + DocumentPolicyError::Forbidden => ServiceError::Forbidden, + DocumentPolicyError::Archived | DocumentPolicyError::NotArchived => ServiceError::Conflict, + DocumentPolicyError::FolderNotSupported => { + ServiceError::BadRequest("operation_not_supported_for_folder") + } + } +} + +pub(super) fn map_parent_error(err: hierarchy::ParentValidationError) -> ServiceError { + match err { + hierarchy::ParentValidationError::NotFound => ServiceError::NotFound, + hierarchy::ParentValidationError::Archived => ServiceError::Conflict, + } +} + +pub(super) fn map_tx_error(err: anyhow::Error) -> ServiceError { + match err.downcast::() { + Ok(service_error) => service_error, + Err(err) => match err.downcast::() { + Ok(repo_err) => ServiceError::from(repo_err), + Err(err) => ServiceError::from(err), + }, + } +} diff --git a/api/crates/application/src/documents/use_cases/create_document.rs b/api/crates/application/src/documents/use_cases/create_document.rs new file mode 100644 index 00000000..0b3ccf54 --- /dev/null +++ b/api/crates/application/src/documents/use_cases/create_document.rs @@ -0,0 +1,129 @@ +use uuid::Uuid; + +use crate::documents::ports::document_repository::{ + DocumentRepoResult, DocumentRepository, DocumentRepositoryError, DocumentRepositoryTx, +}; +use domain::documents::doc_type::DocumentType; +use domain::documents::document::Document as DomainDocument; +use domain::documents::path as doc_path; +use domain::documents::title::Title; + +const MAX_SLUG_ATTEMPTS: usize = 50; + +#[async_trait::async_trait] +pub trait CreateDocumentRepository: Send { + #[allow(clippy::too_many_arguments)] + async fn create_for_user( + &mut self, + workspace_id: Uuid, + created_by: Uuid, + title: &Title, + parent_id: Option, + doc_type: DocumentType, + created_by_plugin: Option<&str>, + slug: &doc_path::Slug, + desired_path: &doc_path::DesiredPath, + ) -> DocumentRepoResult; +} + +#[async_trait::async_trait] +impl CreateDocumentRepository for &R { + async fn create_for_user( + &mut self, + workspace_id: Uuid, + created_by: Uuid, + title: &Title, + parent_id: Option, + doc_type: DocumentType, + created_by_plugin: Option<&str>, + slug: &doc_path::Slug, + desired_path: &doc_path::DesiredPath, + ) -> DocumentRepoResult { + (*self) + .create_for_user( + workspace_id, + created_by, + title, + parent_id, + doc_type, + created_by_plugin, + slug, + desired_path, + ) + .await + } +} + +#[async_trait::async_trait] +impl<'a> CreateDocumentRepository for dyn DocumentRepositoryTx + 'a { + async fn create_for_user( + &mut self, + workspace_id: Uuid, + created_by: Uuid, + title: &Title, + parent_id: Option, + doc_type: DocumentType, + created_by_plugin: Option<&str>, + slug: &doc_path::Slug, + desired_path: &doc_path::DesiredPath, + ) -> DocumentRepoResult { + DocumentRepositoryTx::create_for_user( + self, + workspace_id, + created_by, + title, + parent_id, + doc_type, + created_by_plugin, + slug, + desired_path, + ) + .await + } +} + +pub struct CreateDocument<'a, R: CreateDocumentRepository + ?Sized> { + pub repo: &'a mut R, +} + +impl<'a, R: CreateDocumentRepository + ?Sized> CreateDocument<'a, R> { + #[allow(clippy::too_many_arguments)] + pub async fn execute( + &mut self, + workspace_id: Uuid, + created_by: Uuid, + title: &Title, + parent_id: Option, + parent_desired_path: Option<&doc_path::DesiredPath>, + doc_type: DocumentType, + created_by_plugin: Option<&str>, + ) -> DocumentRepoResult { + let base_slug = doc_path::Slug::from_title(title.as_str()); + for (slug, desired_path) in doc_path::desired_path_candidates( + &base_slug, + parent_desired_path, + doc_type, + MAX_SLUG_ATTEMPTS, + ) { + let result = self + .repo + .create_for_user( + workspace_id, + created_by, + title, + parent_id, + doc_type, + created_by_plugin, + &slug, + &desired_path, + ) + .await; + match result { + Ok(doc) => return Ok(doc), + Err(DocumentRepositoryError::PathConflict) => continue, + Err(err) => return Err(err), + } + } + Err(DocumentRepositoryError::PathConflict) + } +} diff --git a/api/crates/application/src/documents/use_cases/delete_document.rs b/api/crates/application/src/documents/use_cases/delete_document.rs new file mode 100644 index 00000000..ee66b6e4 --- /dev/null +++ b/api/crates/application/src/documents/use_cases/delete_document.rs @@ -0,0 +1,24 @@ +use uuid::Uuid; + +use crate::documents::ports::document_repository::{DocumentRepoResult, DocumentRepositoryTx}; +use domain::documents::doc_type::DocumentType; + +pub struct DeleteDocument<'a, R> +where + R: DocumentRepositoryTx + ?Sized, +{ + pub repo: &'a mut R, +} + +impl<'a, R> DeleteDocument<'a, R> +where + R: DocumentRepositoryTx + ?Sized, +{ + pub async fn execute( + &mut self, + id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult> { + self.repo.delete_owned(id, workspace_id).await + } +} diff --git a/api/src/application/use_cases/documents/download_document.rs b/api/crates/application/src/documents/use_cases/download_document.rs similarity index 75% rename from api/src/application/use_cases/documents/download_document.rs rename to api/crates/application/src/documents/use_cases/download_document.rs index 7e85d69f..3f8d75e7 100644 --- a/api/src/application/use_cases/documents/download_document.rs +++ b/api/crates/application/src/documents/use_cases/download_document.rs @@ -5,18 +5,19 @@ use std::path::{Component, Path}; use chrono::Utc; use uuid::Uuid; -use crate::application::access::{self, Actor, Capability}; -use crate::application::dto::document_export::{DocumentDownload, DocumentDownloadFormat}; -use crate::application::ports::access_repository::AccessRepository; -use crate::application::ports::document_exporter::{ +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::core::services::access::{self, Actor, Capability}; +use crate::documents::dtos::{DocumentDownload, DocumentDownloadFormat}; +use crate::documents::ports::access_repository::AccessRepository; +use crate::documents::ports::document_exporter::{ DocumentExportAssets, DocumentExportAttachment, DocumentExporter, }; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::share_access_port::ShareAccessPort; -use crate::application::ports::storage_port::StorageResolverPort; -use crate::application::services::realtime::snapshot::SnapshotService; -use crate::domain::documents::document::Document as DomainDocument; +use crate::documents::ports::document_repository::DocumentRepository; +use crate::documents::ports::files::files_repository::FilesRepository; +use crate::documents::ports::sharing::share_access_port::ShareAccessPort; +use crate::documents::services::realtime::snapshot::SnapshotService; +use domain::documents::doc_type::DocumentType; +use domain::documents::document::Document as DomainDocument; use thiserror::Error; use zip::write::FileOptions; use zip::{CompressionMethod, ZipWriter}; @@ -53,7 +54,7 @@ where doc_id: Uuid, format: DocumentDownloadFormat, ) -> anyhow::Result> { - let capability = access::resolve_document(self.access, self.shares, actor, doc_id).await; + let capability = access::resolve_document(self.access, self.shares, actor, doc_id).await?; if capability < Capability::View { return Ok(None); } @@ -63,7 +64,7 @@ where None => return Ok(None), }; - if document.doc_type == "folder" { + if document.doc_type() == DocumentType::Folder { return self.download_folder(actor, &document, format).await; } @@ -80,17 +81,18 @@ where &self, document: &DomainDocument, ) -> anyhow::Result> { - if document.doc_type == "folder" { + if document.doc_type() == DocumentType::Folder { return Ok(None); } - let export = match self.snapshot.export_current_markdown(&document.id).await? { + let doc_id = document.id(); + let export = match self.snapshot.export_current_markdown(&doc_id).await? { Some(export) => export, None => return Ok(None), }; - let doc_dir = self.storage.build_doc_dir(document.id).await?; - let attachments = self.collect_attachments(document.id, &doc_dir).await?; - let safe_title = sanitize_filename(&document.title); - let display_title = document.title.trim(); + let doc_dir = self.storage.build_doc_dir(doc_id).await?; + let attachments = self.collect_attachments(doc_id, &doc_dir).await?; + let safe_title = sanitize_filename(document.title().as_str()); + let display_title = document.title().as_str().trim(); let display_title = if display_title.is_empty() { None } else { @@ -150,23 +152,28 @@ where } let mut nodes: HashMap = HashMap::new(); - nodes.insert(folder.id, folder.clone()); + nodes.insert(folder.id(), folder.clone()); let subtree = self .documents - .list_owned_subtree_documents(folder.workspace_id, folder.id) + .list_owned_subtree_documents(folder.workspace_id(), folder.id()) .await?; for entry in subtree { - if entry.id == folder.id { + if entry.id == folder.id() { continue; } if let Some(doc) = self.documents.get_by_id(entry.id).await? { - nodes.insert(doc.id, doc); + nodes.insert(doc.id(), doc); } } - let root_name = sanitize_filename(&folder.title); + let root_name = sanitize_filename(folder.title().as_str()); let entries = self - .build_archive_entries(actor, &nodes, folder.id, Some(folder.desired_path.as_str())) + .build_archive_entries( + actor, + &nodes, + folder.id(), + Some(folder.desired_path().as_str()), + ) .await?; let bytes = build_folder_archive(&root_name, &entries)?; Ok(Some(DocumentDownload { @@ -193,29 +200,29 @@ where .await?; let mut nodes: HashMap = HashMap::new(); for doc in documents { - nodes.insert(doc.id, doc); + nodes.insert(doc.id(), doc); } - let root = DomainDocument { - id: workspace_id, - owner_id: workspace_id, - owner_user_id: None, + let root = DomainDocument::rehydrate( workspace_id, - title: workspace_name.to_string(), - parent_id: None, - doc_type: "folder".to_string(), - created_at: Utc::now(), - updated_at: Utc::now(), - created_by_plugin: None, - slug: sanitize_filename(workspace_name), - desired_path: String::new(), - path: None, - created_by: None, - archived_at: None, - archived_by: None, - archived_parent_id: None, - }; - nodes.insert(root.id, root); + None, + workspace_id, + domain::documents::title::Title::new(workspace_name), + None, + DocumentType::Folder, + Utc::now(), + Utc::now(), + None, + domain::documents::path::Slug::new(sanitize_filename(workspace_name)) + .unwrap_or_else(|_| domain::documents::path::Slug::from_title(workspace_name)), + domain::documents::path::DesiredPath::root(), + None, + None, + None, + None, + None, + ); + nodes.insert(root.id(), root); let root_name = sanitize_filename(workspace_name); let entries = self @@ -238,11 +245,11 @@ where ) -> anyhow::Result> { let mut entries: Vec = Vec::new(); for doc in nodes.values() { - if doc.id == root_id || doc.doc_type == "folder" { + if doc.id() == root_id || doc.doc_type() == DocumentType::Folder { continue; } let capability = - access::resolve_document(self.access, self.shares, actor, doc.id).await; + access::resolve_document(self.access, self.shares, actor, doc.id()).await?; if capability < Capability::View { continue; } @@ -277,20 +284,18 @@ fn sanitize_filename(name: &str) -> String { } fn resolve_relative_path(doc: &DomainDocument, base_prefix: Option<&str>) -> String { - let path = doc.desired_path.trim_start_matches('/'); - if let Some(base) = base_prefix { - let base = base.trim_start_matches('/'); - if !base.is_empty() { - if let Some(stripped) = path.strip_prefix(base) { - let trimmed = stripped.trim_start_matches('/'); - if !trimmed.is_empty() { - return trimmed.to_string(); - } - } - } + let path = doc.desired_path().as_str().trim_start_matches('/'); + if let Some(base) = base_prefix + .map(|b| b.trim_start_matches('/')) + .filter(|b| !b.is_empty()) + && let Some(stripped) = path.strip_prefix(base) + && let trimmed = stripped.trim_start_matches('/') + && !trimmed.is_empty() + { + return trimmed.to_string(); } if path.is_empty() { - format!("{}.md", sanitize_filename(&doc.title)) + format!("{}.md", sanitize_filename(doc.title().as_str())) } else { path.to_string() } diff --git a/api/src/application/use_cases/files/mod.rs b/api/crates/application/src/documents/use_cases/files/mod.rs similarity index 100% rename from api/src/application/use_cases/files/mod.rs rename to api/crates/application/src/documents/use_cases/files/mod.rs diff --git a/api/src/application/use_cases/files/upload_file.rs b/api/crates/application/src/documents/use_cases/files/upload_file.rs similarity index 94% rename from api/src/application/use_cases/files/upload_file.rs rename to api/crates/application/src/documents/use_cases/files/upload_file.rs index 501597d3..f589bcfc 100644 --- a/api/src/application/use_cases/files/upload_file.rs +++ b/api/crates/application/src/documents/use_cases/files/upload_file.rs @@ -1,7 +1,7 @@ use uuid::Uuid; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::storage_port::StorageResolverPort; +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::documents::ports::files::files_repository::FilesRepository; pub struct UploadFile<'a, R, S> where diff --git a/api/crates/application/src/documents/use_cases/get_backlinks.rs b/api/crates/application/src/documents/use_cases/get_backlinks.rs new file mode 100644 index 00000000..a121b431 --- /dev/null +++ b/api/crates/application/src/documents/use_cases/get_backlinks.rs @@ -0,0 +1,21 @@ +use uuid::Uuid; + +use crate::documents::ports::linkgraph_repository::LinkGraphRepository; +use domain::documents::document::BacklinkInfo; + +pub struct GetBacklinks<'a, R: LinkGraphRepository + ?Sized> { + pub repo: &'a R, +} + +impl<'a, R: LinkGraphRepository + ?Sized> GetBacklinks<'a, R> { + pub async fn execute( + &self, + workspace_id: Uuid, + doc_id: Uuid, + ) -> anyhow::Result> { + self.repo + .backlinks_for(workspace_id, doc_id) + .await + .map_err(Into::into) + } +} diff --git a/api/src/application/use_cases/documents/get_document.rs b/api/crates/application/src/documents/use_cases/get_document.rs similarity index 62% rename from api/src/application/use_cases/documents/get_document.rs rename to api/crates/application/src/documents/use_cases/get_document.rs index 1ed52a02..65f5621f 100644 --- a/api/src/application/use_cases/documents/get_document.rs +++ b/api/crates/application/src/documents/use_cases/get_document.rs @@ -1,10 +1,10 @@ use uuid::Uuid; -use crate::application::access::{self, Actor, Capability}; -use crate::application::ports::access_repository::AccessRepository; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::share_access_port::ShareAccessPort; -use crate::domain::documents::document::Document as DomainDocument; +use crate::core::services::access::{self, Actor, Capability}; +use crate::documents::ports::access_repository::AccessRepository; +use crate::documents::ports::document_repository::DocumentRepository; +use crate::documents::ports::sharing::share_access_port::ShareAccessPort; +use domain::documents::document::Document as DomainDocument; pub struct GetDocument<'a, R, S, A> where @@ -25,10 +25,10 @@ where { pub async fn execute(&self, actor: &Actor, id: Uuid) -> anyhow::Result> { // Enforce view permission using existing access policy - let cap = access::resolve_document(self.access, self.shares, actor, id).await; + let cap = access::resolve_document(self.access, self.shares, actor, id).await?; if cap < Capability::View { return Ok(None); } - self.repo.get_by_id(id).await + self.repo.get_by_id(id).await.map_err(Into::into) } } diff --git a/api/crates/application/src/documents/use_cases/get_outgoing_links.rs b/api/crates/application/src/documents/use_cases/get_outgoing_links.rs new file mode 100644 index 00000000..539a6ed3 --- /dev/null +++ b/api/crates/application/src/documents/use_cases/get_outgoing_links.rs @@ -0,0 +1,21 @@ +use uuid::Uuid; + +use crate::documents::ports::linkgraph_repository::LinkGraphRepository; +use domain::documents::document::OutgoingLink; + +pub struct GetOutgoingLinks<'a, R: LinkGraphRepository + ?Sized> { + pub repo: &'a R, +} + +impl<'a, R: LinkGraphRepository + ?Sized> GetOutgoingLinks<'a, R> { + pub async fn execute( + &self, + workspace_id: Uuid, + doc_id: Uuid, + ) -> anyhow::Result> { + self.repo + .outgoing_links_for(workspace_id, doc_id) + .await + .map_err(Into::into) + } +} diff --git a/api/src/application/use_cases/documents/list_documents.rs b/api/crates/application/src/documents/use_cases/list_documents.rs similarity index 65% rename from api/src/application/use_cases/documents/list_documents.rs rename to api/crates/application/src/documents/use_cases/list_documents.rs index 02abf4a6..3230c902 100644 --- a/api/src/application/use_cases/documents/list_documents.rs +++ b/api/crates/application/src/documents/use_cases/list_documents.rs @@ -1,7 +1,9 @@ use uuid::Uuid; -use crate::application::ports::document_repository::{DocumentListState, DocumentRepository}; -use crate::domain::documents::document::Document as DomainDocument; +use crate::documents::ports::document_repository::{ + DocumentListState, DocumentRepoResult, DocumentRepository, +}; +use domain::documents::document::Document as DomainDocument; pub struct ListDocuments<'a, R: DocumentRepository + ?Sized> { pub repo: &'a R, @@ -14,7 +16,7 @@ impl<'a, R: DocumentRepository + ?Sized> ListDocuments<'a, R> { query: Option, tag: Option, state: DocumentListState, - ) -> anyhow::Result> { + ) -> DocumentRepoResult> { self.repo .list_for_user(workspace_id, query, tag, state) .await diff --git a/api/src/application/use_cases/documents/list_snapshots.rs b/api/crates/application/src/documents/use_cases/list_snapshots.rs similarity index 70% rename from api/src/application/use_cases/documents/list_snapshots.rs rename to api/crates/application/src/documents/use_cases/list_snapshots.rs index 93917559..ed159c30 100644 --- a/api/src/application/use_cases/documents/list_snapshots.rs +++ b/api/crates/application/src/documents/use_cases/list_snapshots.rs @@ -1,7 +1,7 @@ use uuid::Uuid; -use crate::application::ports::document_snapshot_archive_repository::SnapshotArchiveRecord; -use crate::application::services::realtime::snapshot::SnapshotService; +use crate::documents::ports::document_snapshot_archive_repository::SnapshotArchiveRecord; +use crate::documents::services::realtime::snapshot::SnapshotService; pub struct ListSnapshots<'a> { pub snapshots: &'a SnapshotService, diff --git a/api/src/application/use_cases/documents/mod.rs b/api/crates/application/src/documents/use_cases/mod.rs similarity index 82% rename from api/src/application/use_cases/documents/mod.rs rename to api/crates/application/src/documents/use_cases/mod.rs index fd6a7c4f..ee4dcb18 100644 --- a/api/src/application/use_cases/documents/mod.rs +++ b/api/crates/application/src/documents/use_cases/mod.rs @@ -1,15 +1,17 @@ -pub mod archive_document; pub mod create_document; pub mod delete_document; pub mod download_document; +pub mod files; pub mod get_backlinks; pub mod get_document; pub mod get_outgoing_links; pub mod list_documents; pub mod list_snapshots; +pub mod publishing; pub mod restore_snapshot; pub mod search_documents; +pub mod sharing; pub mod snapshot_diff; pub mod snapshot_download; -pub mod unarchive_document; +pub mod tagging; pub mod update_document; diff --git a/api/src/application/use_cases/public/get_public.rs b/api/crates/application/src/documents/use_cases/publishing/get_public.rs similarity index 74% rename from api/src/application/use_cases/public/get_public.rs rename to api/crates/application/src/documents/use_cases/publishing/get_public.rs index 844d89c4..2f1c1ee8 100644 --- a/api/src/application/use_cases/public/get_public.rs +++ b/api/crates/application/src/documents/use_cases/publishing/get_public.rs @@ -1,7 +1,7 @@ use uuid::Uuid; -use crate::application::ports::public_repository::PublicRepository; -use crate::domain::documents::document::Document; +use crate::documents::ports::publishing::public_repository::PublicRepository; +use domain::documents::document::Document; pub struct GetPublicByWorkspaceAndId<'a, R: PublicRepository + ?Sized> { pub repo: &'a R, @@ -16,5 +16,6 @@ impl<'a, R: PublicRepository + ?Sized> GetPublicByWorkspaceAndId<'a, R> { self.repo .get_public_meta_by_workspace_and_id(workspace_slug, doc_id) .await + .map_err(Into::into) } } diff --git a/api/src/application/use_cases/public/get_status.rs b/api/crates/application/src/documents/use_cases/publishing/get_status.rs similarity index 56% rename from api/src/application/use_cases/public/get_status.rs rename to api/crates/application/src/documents/use_cases/publishing/get_status.rs index 5efedd1b..55c9bcb7 100644 --- a/api/src/application/use_cases/public/get_status.rs +++ b/api/crates/application/src/documents/use_cases/publishing/get_status.rs @@ -1,6 +1,6 @@ use uuid::Uuid; -use crate::application::ports::public_repository::PublicRepository; +use crate::documents::ports::publishing::public_repository::PublicRepository; #[derive(Debug, Clone)] pub struct PublishStatusDto { pub slug: String, @@ -17,11 +17,12 @@ impl<'a, R: PublicRepository + ?Sized> GetPublishStatus<'a, R> { workspace_id: Uuid, doc_id: Uuid, ) -> anyhow::Result> { - if let Some((slug, workspace_slug)) = - self.repo.get_publish_status(workspace_id, doc_id).await? - { - let public_url = format!("/w/{}/{}", workspace_slug, doc_id); - Ok(Some(PublishStatusDto { slug, public_url })) + if let Some(status) = self.repo.get_publish_status(workspace_id, doc_id).await? { + let public_url = format!("/w/{}/{}", status.workspace_slug, doc_id); + Ok(Some(PublishStatusDto { + slug: status.slug, + public_url, + })) } else { Ok(None) } diff --git a/api/src/application/use_cases/public/list_workspace.rs b/api/crates/application/src/documents/use_cases/publishing/list_workspace.rs similarity index 55% rename from api/src/application/use_cases/public/list_workspace.rs rename to api/crates/application/src/documents/use_cases/publishing/list_workspace.rs index b81b1ba6..eac40dee 100644 --- a/api/src/application/use_cases/public/list_workspace.rs +++ b/api/crates/application/src/documents/use_cases/publishing/list_workspace.rs @@ -1,5 +1,5 @@ -use crate::application::dto::public::PublicDocumentSummaryDto; -use crate::application::ports::public_repository::PublicRepository; +use crate::documents::dtos::PublicDocumentSummaryDto; +use crate::documents::ports::publishing::public_repository::PublicRepository; pub struct ListWorkspacePublic<'a, R: PublicRepository + ?Sized> { pub repo: &'a R, @@ -16,14 +16,12 @@ impl<'a, R: PublicRepository + ?Sized> ListWorkspacePublic<'a, R> { .await?; Ok(rows .into_iter() - .map( - |(id, title, updated_at, published_at)| PublicDocumentSummaryDto { - id, - title, - updated_at, - published_at, - }, - ) + .map(|row| PublicDocumentSummaryDto { + id: row.id, + title: row.title, + updated_at: row.updated_at, + published_at: row.published_at, + }) .collect()) } } diff --git a/api/src/application/use_cases/public/mod.rs b/api/crates/application/src/documents/use_cases/publishing/mod.rs similarity index 100% rename from api/src/application/use_cases/public/mod.rs rename to api/crates/application/src/documents/use_cases/publishing/mod.rs diff --git a/api/src/application/use_cases/public/publish.rs b/api/crates/application/src/documents/use_cases/publishing/publish.rs similarity index 85% rename from api/src/application/use_cases/public/publish.rs rename to api/crates/application/src/documents/use_cases/publishing/publish.rs index 88a7aa4b..9cfc3d93 100644 --- a/api/src/application/use_cases/public/publish.rs +++ b/api/crates/application/src/documents/use_cases/publishing/publish.rs @@ -1,6 +1,6 @@ use uuid::Uuid; -use crate::application::ports::public_repository::PublicRepository; +use crate::documents::ports::publishing::public_repository::PublicRepository; #[derive(Debug, Clone)] pub struct PublishResponseDto { pub slug: String, @@ -29,7 +29,7 @@ impl<'a, R: PublicRepository + ?Sized> PublishDocument<'a, R> { workspace_id: Uuid, doc_id: Uuid, ) -> anyhow::Result> { - let (title, workspace_slug) = match self + let ws = match self .repo .ensure_workspace_title_and_slug(doc_id, workspace_id) .await? @@ -37,7 +37,7 @@ impl<'a, R: PublicRepository + ?Sized> PublishDocument<'a, R> { Some(v) => v, None => return Ok(None), }; - let mut base_slug = sanitize_title_local(&title); + let mut base_slug = sanitize_title_local(&ws.title); if base_slug.is_empty() { base_slug = doc_id.to_string(); } @@ -48,7 +48,7 @@ impl<'a, R: PublicRepository + ?Sized> PublishDocument<'a, R> { i += 1; } self.repo.upsert_public_document(doc_id, &slug).await?; - let public_url = format!("/w/{}/{}", workspace_slug, doc_id); + let public_url = format!("/w/{}/{}", ws.workspace_slug, doc_id); Ok(Some(PublishResponseDto { slug, public_url })) } } diff --git a/api/src/application/use_cases/public/unpublish.rs b/api/crates/application/src/documents/use_cases/publishing/unpublish.rs similarity index 69% rename from api/src/application/use_cases/public/unpublish.rs rename to api/crates/application/src/documents/use_cases/publishing/unpublish.rs index 9e286cdb..6185eed2 100644 --- a/api/src/application/use_cases/public/unpublish.rs +++ b/api/crates/application/src/documents/use_cases/publishing/unpublish.rs @@ -1,6 +1,6 @@ use uuid::Uuid; -use crate::application::ports::public_repository::PublicRepository; +use crate::documents::ports::publishing::public_repository::PublicRepository; pub struct UnpublishDocument<'a, R: PublicRepository + ?Sized> { pub repo: &'a R, @@ -15,6 +15,9 @@ impl<'a, R: PublicRepository + ?Sized> UnpublishDocument<'a, R> { { return Ok(false); } - self.repo.delete_public_document(doc_id).await + self.repo + .delete_public_document(doc_id) + .await + .map_err(Into::into) } } diff --git a/api/src/application/use_cases/documents/restore_snapshot.rs b/api/crates/application/src/documents/use_cases/restore_snapshot.rs similarity index 91% rename from api/src/application/use_cases/documents/restore_snapshot.rs rename to api/crates/application/src/documents/use_cases/restore_snapshot.rs index 991c23b3..233005c7 100644 --- a/api/src/application/use_cases/documents/restore_snapshot.rs +++ b/api/crates/application/src/documents/use_cases/restore_snapshot.rs @@ -1,9 +1,9 @@ use chrono::Utc; use uuid::Uuid; -use crate::application::ports::document_snapshot_archive_repository::SnapshotArchiveRecord; -use crate::application::ports::realtime_port::RealtimeEngine; -use crate::application::services::realtime::snapshot::{ +use crate::documents::ports::document_snapshot_archive_repository::SnapshotArchiveRecord; +use crate::documents::ports::realtime::realtime_port::RealtimeEngine; +use crate::documents::services::realtime::snapshot::{ SnapshotArchiveKind, SnapshotArchiveOptions, SnapshotPersistOptions, SnapshotService, encode_doc_snapshot, }; diff --git a/api/src/application/use_cases/documents/search_documents.rs b/api/crates/application/src/documents/use_cases/search_documents.rs similarity index 66% rename from api/src/application/use_cases/documents/search_documents.rs rename to api/crates/application/src/documents/use_cases/search_documents.rs index de062881..2b2858c4 100644 --- a/api/src/application/use_cases/documents/search_documents.rs +++ b/api/crates/application/src/documents/use_cases/search_documents.rs @@ -1,7 +1,7 @@ use uuid::Uuid; -use crate::application::ports::document_repository::DocumentRepository; -use crate::domain::documents::document::SearchHit; +use crate::documents::ports::document_repository::{DocumentRepoResult, DocumentRepository}; +use domain::documents::document::SearchHit; pub struct SearchDocuments<'a, R: DocumentRepository + ?Sized> { pub repo: &'a R, @@ -13,7 +13,7 @@ impl<'a, R: DocumentRepository + ?Sized> SearchDocuments<'a, R> { workspace_id: Uuid, q: Option, limit: i64, - ) -> anyhow::Result> { + ) -> DocumentRepoResult> { self.repo.search_for_user(workspace_id, q, limit).await } } diff --git a/api/crates/application/src/documents/use_cases/sharing/browse_share.rs b/api/crates/application/src/documents/use_cases/sharing/browse_share.rs new file mode 100644 index 00000000..1dbe993a --- /dev/null +++ b/api/crates/application/src/documents/use_cases/sharing/browse_share.rs @@ -0,0 +1,71 @@ +use crate::documents::dtos::{ShareBrowseResponseDto, ShareBrowseTreeItemDto}; +use crate::documents::ports::sharing::shares_repository::SharesRepository; +use domain::documents::doc_type::DocumentType; +use domain::documents::share; + +pub struct BrowseShare<'a, R: SharesRepository + ?Sized> { + pub repo: &'a R, +} + +impl<'a, R: SharesRepository + ?Sized> BrowseShare<'a, R> { + pub async fn execute(&self, token: &str) -> anyhow::Result> { + let ctx = match self.repo.resolve_share_by_token(token).await? { + Some(ctx) => ctx, + None => return Ok(None), + }; + if share::is_expired(ctx.expires_at.as_ref(), chrono::Utc::now()) { + return Ok(None); + } + // If token targets a document (not folder), return single node + if !ctx.shared_type.is_folder() { + let mut tree = Vec::new(); + let doc_rows = self.repo.list_subtree_nodes(ctx.shared_id).await?; + if let Some(node) = doc_rows.into_iter().find(|n| n.id == ctx.shared_id) { + tree.push(ShareBrowseTreeItemDto { + id: node.id, + title: node.title.into_string(), + parent_id: None, + r#type: node.document_type.as_str().to_string(), + created_at: node.created_at, + updated_at: node.updated_at, + }); + } else { + let fallback_title = self + .repo + .validate_share_token(token) + .await? + .map(|doc| doc.title.into_string()) + .unwrap_or_default(); + tree.push(ShareBrowseTreeItemDto { + id: ctx.shared_id, + title: fallback_title, + parent_id: None, + r#type: ctx.shared_type.as_str().to_string(), + created_at: chrono::Utc::now(), + updated_at: chrono::Utc::now(), + }); + } + return Ok(Some(ShareBrowseResponseDto { tree })); + } + // Folder: list subtree and filter to materialized shares under this folder share + let rows = self.repo.list_subtree_nodes(ctx.shared_id).await?; + let allowed = self.repo.list_materialized_children(ctx.share_id).await?; + let tree: Vec = rows + .into_iter() + .filter_map(|node| { + if node.document_type == DocumentType::Document && !allowed.contains(&node.id) { + return None; + } + Some(ShareBrowseTreeItemDto { + id: node.id, + title: node.title.into_string(), + parent_id: node.parent_id, + r#type: node.document_type.as_str().to_string(), + created_at: node.created_at, + updated_at: node.updated_at, + }) + }) + .collect(); + Ok(Some(ShareBrowseResponseDto { tree })) + } +} diff --git a/api/src/application/use_cases/shares/create_share.rs b/api/crates/application/src/documents/use_cases/sharing/create_share.rs similarity index 65% rename from api/src/application/use_cases/shares/create_share.rs rename to api/crates/application/src/documents/use_cases/sharing/create_share.rs index e21da3b4..968ef29e 100644 --- a/api/src/application/use_cases/shares/create_share.rs +++ b/api/crates/application/src/documents/use_cases/sharing/create_share.rs @@ -1,6 +1,8 @@ use uuid::Uuid; -use crate::application::ports::shares_repository::SharesRepository; +use crate::documents::ports::sharing::shares_repository::SharesRepository; +use domain::documents::doc_type::DocumentType; +use domain::documents::share::SharePermission; pub struct CreateShare<'a, R: SharesRepository + ?Sized> { pub repo: &'a R, @@ -9,7 +11,7 @@ pub struct CreateShare<'a, R: SharesRepository + ?Sized> { pub struct CreateShareResult { pub token: String, pub document_id: Uuid, - pub document_type: String, + pub document_type: DocumentType, } impl<'a, R: SharesRepository + ?Sized> CreateShare<'a, R> { @@ -18,17 +20,17 @@ impl<'a, R: SharesRepository + ?Sized> CreateShare<'a, R> { workspace_id: Uuid, actor_id: Uuid, document_id: Uuid, - permission: &str, + permission: SharePermission, expires_at: Option>, ) -> anyhow::Result { - let (token, _share_id, dtype) = self + let created = self .repo .create_share(workspace_id, actor_id, document_id, permission, expires_at) .await?; Ok(CreateShareResult { - token, + token: created.token, document_id, - document_type: dtype, + document_type: created.document_type, }) } } diff --git a/api/src/application/use_cases/shares/delete_share.rs b/api/crates/application/src/documents/use_cases/sharing/delete_share.rs similarity index 56% rename from api/src/application/use_cases/shares/delete_share.rs rename to api/crates/application/src/documents/use_cases/sharing/delete_share.rs index a2560081..6b061fd7 100644 --- a/api/src/application/use_cases/shares/delete_share.rs +++ b/api/crates/application/src/documents/use_cases/sharing/delete_share.rs @@ -1,4 +1,4 @@ -use crate::application::ports::shares_repository::SharesRepository; +use crate::documents::ports::sharing::shares_repository::SharesRepository; pub struct DeleteShare<'a, R: SharesRepository + ?Sized> { pub repo: &'a R, @@ -6,6 +6,9 @@ pub struct DeleteShare<'a, R: SharesRepository + ?Sized> { impl<'a, R: SharesRepository + ?Sized> DeleteShare<'a, R> { pub async fn execute(&self, workspace_id: uuid::Uuid, token: &str) -> anyhow::Result { - self.repo.delete_share(workspace_id, token).await + self.repo + .delete_share(workspace_id, token) + .await + .map_err(Into::into) } } diff --git a/api/src/application/use_cases/shares/list_active.rs b/api/crates/application/src/documents/use_cases/sharing/list_active.rs similarity index 70% rename from api/src/application/use_cases/shares/list_active.rs rename to api/crates/application/src/documents/use_cases/sharing/list_active.rs index 35ffb033..a70990d3 100644 --- a/api/src/application/use_cases/shares/list_active.rs +++ b/api/crates/application/src/documents/use_cases/sharing/list_active.rs @@ -1,7 +1,7 @@ use uuid::Uuid; -use crate::application::dto::shares::ActiveShareItemDto; -use crate::application::ports::shares_repository::SharesRepository; +use crate::documents::dtos::ActiveShareItemDto; +use crate::documents::ports::sharing::shares_repository::SharesRepository; pub struct ListActiveShares<'a, R: SharesRepository + ?Sized> { pub repo: &'a R, @@ -15,12 +15,12 @@ impl<'a, R: SharesRepository + ?Sized> ListActiveShares<'a, R> { items.push(ActiveShareItemDto { id: r.id, token: r.token, - permission: r.permission, + permission: r.permission.as_str().to_string(), expires_at: r.expires_at, created_at: r.created_at, document_id: r.document_id, - document_title: r.document_title, - document_type: r.document_type, + document_title: r.document_title.into_string(), + document_type: r.document_type.as_str().to_string(), parent_share_id: r.parent_share_id, }); } diff --git a/api/src/application/use_cases/shares/list_applicable.rs b/api/crates/application/src/documents/use_cases/sharing/list_applicable.rs similarity index 55% rename from api/src/application/use_cases/shares/list_applicable.rs rename to api/crates/application/src/documents/use_cases/sharing/list_applicable.rs index b806eefb..99d10909 100644 --- a/api/src/application/use_cases/shares/list_applicable.rs +++ b/api/crates/application/src/documents/use_cases/sharing/list_applicable.rs @@ -1,7 +1,9 @@ use uuid::Uuid; -use crate::application::dto::shares::ApplicableShareDto; -use crate::application::ports::shares_repository::SharesRepository; +use crate::documents::dtos::ApplicableShareDto; +use crate::documents::ports::sharing::shares_repository::SharesRepository; +use domain::documents::doc_type::DOC_TYPE_DOCUMENT; +use domain::documents::share; pub struct ListApplicableShares<'a, R: SharesRepository + ?Sized> { pub repo: &'a R, @@ -18,16 +20,14 @@ impl<'a, R: SharesRepository + ?Sized> ListApplicableShares<'a, R> { .list_applicable_shares_for_doc(workspace_id, doc_id) .await?; let mut out = Vec::new(); - for (token, permission, expires_at) in rows.into_iter() { - if let Some(exp) = expires_at { - if exp < chrono::Utc::now() { - continue; - } + for row in rows.into_iter() { + if share::is_expired(row.expires_at.as_ref(), chrono::Utc::now()) { + continue; } out.push(ApplicableShareDto { - token, - permission, - scope: "document".into(), + token: row.token, + permission: row.permission.as_str().to_string(), + scope: DOC_TYPE_DOCUMENT.into(), excluded: false, }); } diff --git a/api/src/application/use_cases/shares/list_document_shares.rs b/api/crates/application/src/documents/use_cases/sharing/list_document_shares.rs similarity index 75% rename from api/src/application/use_cases/shares/list_document_shares.rs rename to api/crates/application/src/documents/use_cases/sharing/list_document_shares.rs index 7b338bf8..c78c3ac6 100644 --- a/api/src/application/use_cases/shares/list_document_shares.rs +++ b/api/crates/application/src/documents/use_cases/sharing/list_document_shares.rs @@ -1,7 +1,7 @@ use uuid::Uuid; -use crate::application::dto::shares::ShareItemDto; -use crate::application::ports::shares_repository::SharesRepository; +use crate::documents::dtos::ShareItemDto; +use crate::documents::ports::sharing::shares_repository::SharesRepository; pub struct ListDocumentShares<'a, R: SharesRepository + ?Sized> { pub repo: &'a R, @@ -22,10 +22,10 @@ impl<'a, R: SharesRepository + ?Sized> ListDocumentShares<'a, R> { .map(|r| ShareItemDto { id: r.id, token: r.token.clone(), - permission: r.permission, + permission: r.permission.as_str().to_string(), expires_at: r.expires_at, document_id: r.document_id, - document_type: r.document_type, + document_type: r.document_type.as_str().to_string(), parent_share_id: r.parent_share_id, }) .collect()) diff --git a/api/src/application/use_cases/shares/mod.rs b/api/crates/application/src/documents/use_cases/sharing/mod.rs similarity index 100% rename from api/src/application/use_cases/shares/mod.rs rename to api/crates/application/src/documents/use_cases/sharing/mod.rs diff --git a/api/crates/application/src/documents/use_cases/sharing/validate_share.rs b/api/crates/application/src/documents/use_cases/sharing/validate_share.rs new file mode 100644 index 00000000..a17900cd --- /dev/null +++ b/api/crates/application/src/documents/use_cases/sharing/validate_share.rs @@ -0,0 +1,25 @@ +use crate::documents::dtos::ShareDocumentDto; +use crate::documents::ports::sharing::shares_repository::SharesRepository; +use domain::documents::share; + +pub struct ValidateShare<'a, R: SharesRepository + ?Sized> { + pub repo: &'a R, +} + +impl<'a, R: SharesRepository + ?Sized> ValidateShare<'a, R> { + pub async fn execute(&self, token: &str) -> anyhow::Result> { + if let Some(doc) = self.repo.validate_share_token(token).await? { + if share::is_expired(doc.expires_at.as_ref(), chrono::Utc::now()) { + return Ok(None); + } + Ok(Some(ShareDocumentDto { + id: doc.document_id, + title: doc.title.into_string(), + permission: doc.permission.as_str().to_string(), + content: None, + })) + } else { + Ok(None) + } + } +} diff --git a/api/src/application/use_cases/documents/snapshot_diff.rs b/api/crates/application/src/documents/use_cases/snapshot_diff.rs similarity index 91% rename from api/src/application/use_cases/documents/snapshot_diff.rs rename to api/crates/application/src/documents/use_cases/snapshot_diff.rs index fe4a66a7..93ab9d7b 100644 --- a/api/src/application/use_cases/documents/snapshot_diff.rs +++ b/api/crates/application/src/documents/use_cases/snapshot_diff.rs @@ -1,11 +1,11 @@ use uuid::Uuid; -use crate::application::dto::diff::TextDiffResult; -use crate::application::dto::documents::SnapshotDiffBaseMode; -use crate::application::ports::document_snapshot_archive_repository::SnapshotArchiveRecord; -use crate::application::ports::realtime_port::RealtimeEngine; -use crate::application::services::diff::text_diff::compute_text_diff; -use crate::application::services::realtime::snapshot::SnapshotService; +use crate::core::dtos::TextDiffResult; +use crate::core::services::diff::text_diff::compute_text_diff; +use crate::documents::dtos::SnapshotDiffBaseMode; +use crate::documents::ports::document_snapshot_archive_repository::SnapshotArchiveRecord; +use crate::documents::ports::realtime::realtime_port::RealtimeEngine; +use crate::documents::services::realtime::snapshot::SnapshotService; pub enum SnapshotDiffSide { Current { diff --git a/api/src/application/use_cases/documents/snapshot_download.rs b/api/crates/application/src/documents/use_cases/snapshot_download.rs similarity index 93% rename from api/src/application/use_cases/documents/snapshot_download.rs rename to api/crates/application/src/documents/use_cases/snapshot_download.rs index bda6c0f6..fa59de66 100644 --- a/api/src/application/use_cases/documents/snapshot_download.rs +++ b/api/crates/application/src/documents/use_cases/snapshot_download.rs @@ -5,10 +5,10 @@ use anyhow::anyhow; use async_trait::async_trait; use uuid::Uuid; -use crate::application::ports::document_snapshot_archive_repository::SnapshotArchiveRecord; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::storage_port::StorageResolverPort; -use crate::application::services::realtime::snapshot::SnapshotService; +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::documents::ports::document_snapshot_archive_repository::SnapshotArchiveRecord; +use crate::documents::ports::files::files_repository::FilesRepository; +use crate::documents::services::realtime::snapshot::SnapshotService; pub struct SnapshotDownload { pub filename: String, diff --git a/api/src/application/use_cases/tags/list_tags.rs b/api/crates/application/src/documents/use_cases/tagging/list_tags.rs similarity index 65% rename from api/src/application/use_cases/tags/list_tags.rs rename to api/crates/application/src/documents/use_cases/tagging/list_tags.rs index 27e2d3cb..7a5df793 100644 --- a/api/src/application/use_cases/tags/list_tags.rs +++ b/api/crates/application/src/documents/use_cases/tagging/list_tags.rs @@ -1,7 +1,7 @@ use uuid::Uuid; -use crate::application::dto::tags::TagItemDto; -use crate::application::ports::tag_repository::TagRepository; +use crate::documents::dtos::TagItemDto; +use crate::documents::ports::tagging::tag_repository::TagRepository; pub struct ListTags<'a, R: TagRepository + ?Sized> { pub repo: &'a R, @@ -16,7 +16,10 @@ impl<'a, R: TagRepository + ?Sized> ListTags<'a, R> { let rows = self.repo.list_tags(owner_id, filter).await?; Ok(rows .into_iter() - .map(|(name, count)| TagItemDto { name, count }) + .map(|row| TagItemDto { + name: row.name, + count: row.count, + }) .collect()) } } diff --git a/api/src/application/use_cases/tags/mod.rs b/api/crates/application/src/documents/use_cases/tagging/mod.rs similarity index 100% rename from api/src/application/use_cases/tags/mod.rs rename to api/crates/application/src/documents/use_cases/tagging/mod.rs diff --git a/api/crates/application/src/documents/use_cases/update_document.rs b/api/crates/application/src/documents/use_cases/update_document.rs new file mode 100644 index 00000000..399f76e6 --- /dev/null +++ b/api/crates/application/src/documents/use_cases/update_document.rs @@ -0,0 +1,73 @@ +use uuid::Uuid; + +use crate::documents::ports::document_repository::{ + DocumentRepoResult, DocumentRepositoryError, DocumentRepositoryTx, +}; +use domain::documents::doc_type::DocumentType; +use domain::documents::document::Document as DomainDocument; +use domain::documents::path as doc_path; +use domain::documents::title::Title; + +const MAX_SLUG_ATTEMPTS: usize = 50; + +pub struct UpdateDocument<'a, R> +where + R: DocumentRepositoryTx + ?Sized, +{ + pub repo: &'a mut R, +} + +impl<'a, R> UpdateDocument<'a, R> +where + R: DocumentRepositoryTx + ?Sized, +{ + // parent_id: None => not provided; Some(None) => set null; Some(Some(uuid)) => set value + #[allow(clippy::too_many_arguments)] + pub async fn execute( + &mut self, + id: Uuid, + workspace_id: Uuid, + current_title: &Title, + current_slug: &doc_path::Slug, + current_desired_path: &doc_path::DesiredPath, + doc_type: DocumentType, + title: Option<&Title>, + parent_id: Option>, + parent_desired_path: Option<&doc_path::DesiredPath>, + ) -> DocumentRepoResult> { + let next_title = title.unwrap_or(current_title); + let base_slug = if title.is_some() { + doc_path::Slug::from_title(next_title.as_str()) + } else { + current_slug.clone() + }; + let current_parent_path = doc_path::parent_desired_path(current_desired_path); + let parent_path = match parent_id { + Some(Some(_)) => parent_desired_path, + Some(None) => None, + None => current_parent_path.as_ref(), + }; + + for (slug, desired_path) in + doc_path::desired_path_candidates(&base_slug, parent_path, doc_type, MAX_SLUG_ATTEMPTS) + { + let result = self + .repo + .update_title_and_parent_for_user( + id, + workspace_id, + next_title, + parent_id, + &slug, + &desired_path, + ) + .await; + match result { + Ok(doc) => return Ok(doc), + Err(DocumentRepositoryError::PathConflict) => continue, + Err(err) => return Err(err), + } + } + Err(DocumentRepositoryError::PathConflict) + } +} diff --git a/api/src/application/dto/git.rs b/api/crates/application/src/git/dtos/git.rs similarity index 98% rename from api/src/application/dto/git.rs rename to api/crates/application/src/git/dtos/git.rs index 76bea535..3f8fc489 100644 --- a/api/src/application/dto/git.rs +++ b/api/crates/application/src/git/dtos/git.rs @@ -139,7 +139,7 @@ pub struct GitPullResultDto { pub struct GitPullSessionDto { pub id: uuid::Uuid, pub workspace_id: uuid::Uuid, - pub status: String, + pub status: domain::git::pull_session::GitPullSessionStatus, pub conflicts: Vec, pub resolutions: Vec, pub message: Option, diff --git a/api/crates/application/src/git/dtos/mod.rs b/api/crates/application/src/git/dtos/mod.rs new file mode 100644 index 00000000..5ae97ace --- /dev/null +++ b/api/crates/application/src/git/dtos/mod.rs @@ -0,0 +1,3 @@ +mod git; + +pub use git::*; diff --git a/api/crates/application/src/git/mod.rs b/api/crates/application/src/git/mod.rs new file mode 100644 index 00000000..2e8e16cf --- /dev/null +++ b/api/crates/application/src/git/mod.rs @@ -0,0 +1,4 @@ +pub mod dtos; +pub mod ports; +pub mod services; +pub mod use_cases; diff --git a/api/crates/application/src/git/ports/git_pull_session_repository.rs b/api/crates/application/src/git/ports/git_pull_session_repository.rs new file mode 100644 index 00000000..7fe142cf --- /dev/null +++ b/api/crates/application/src/git/ports/git_pull_session_repository.rs @@ -0,0 +1,11 @@ +use async_trait::async_trait; +use uuid::Uuid; + +use crate::core::ports::errors::PortResult; +use crate::git::dtos::GitPullSessionDto; + +#[async_trait] +pub trait GitPullSessionRepository: Send + Sync { + async fn upsert(&self, session: GitPullSessionDto) -> PortResult<()>; + async fn get(&self, workspace_id: Uuid, id: Uuid) -> PortResult>; +} diff --git a/api/src/application/ports/git_rebuild_job_queue.rs b/api/crates/application/src/git/ports/git_rebuild_job_queue.rs similarity index 60% rename from api/src/application/ports/git_rebuild_job_queue.rs rename to api/crates/application/src/git/ports/git_rebuild_job_queue.rs index afc55cac..c56d422a 100644 --- a/api/src/application/ports/git_rebuild_job_queue.rs +++ b/api/crates/application/src/git/ports/git_rebuild_job_queue.rs @@ -1,6 +1,8 @@ use async_trait::async_trait; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Clone)] pub struct GitRebuildJob { pub id: i64, @@ -17,8 +19,8 @@ pub trait GitRebuildJobQueue: Send + Sync { workspace_id: Uuid, actor_id: Option, permission_snapshot: &[String], - ) -> anyhow::Result<()>; - async fn fetch_next(&self, lock_timeout_secs: i64) -> anyhow::Result>; - async fn complete(&self, job_id: i64) -> anyhow::Result<()>; - async fn fail(&self, job_id: i64, error: &str) -> anyhow::Result<()>; + ) -> PortResult<()>; + async fn fetch_next(&self, lock_timeout_secs: i64) -> PortResult>; + async fn complete(&self, job_id: i64) -> PortResult<()>; + async fn fail(&self, job_id: i64, error: &str) -> PortResult<()>; } diff --git a/api/crates/application/src/git/ports/git_repository.rs b/api/crates/application/src/git/ports/git_repository.rs new file mode 100644 index 00000000..19190d73 --- /dev/null +++ b/api/crates/application/src/git/ports/git_repository.rs @@ -0,0 +1,67 @@ +use async_trait::async_trait; +use chrono::{DateTime, Utc}; +use domain::git::auth::GitAuthType; +use domain::git::sync_log::{GitSyncOperation, GitSyncStatus}; +use serde_json::Value; +use uuid::Uuid; + +use crate::core::ports::errors::PortResult; + +#[derive(Debug, Clone)] +pub struct GitConfigRecord { + pub id: Uuid, + pub repository_url: String, + pub branch_name: String, + pub auth_type: GitAuthType, + pub auto_sync: bool, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +#[derive(Debug, Clone)] +pub struct GitLastSyncLog { + pub created_at: Option>, + pub status: Option, + pub message: Option, + pub commit_hash: Option, +} + +#[derive(Debug, Clone)] +pub struct UserGitCfg { + pub repository_url: String, + pub branch_name: String, + pub auth_type: Option, + pub auth_data: Option, + pub auto_sync: bool, +} + +#[async_trait] +pub trait GitRepository: Send + Sync { + async fn get_config(&self, workspace_id: Uuid) -> PortResult>; + async fn upsert_config( + &self, + workspace_id: Uuid, + repository_url: &str, + branch_name: Option<&str>, + auth_type: GitAuthType, + auth_data: &Value, + auto_sync: Option, + ) -> PortResult; + async fn delete_config(&self, workspace_id: Uuid) -> PortResult; + async fn load_user_git_cfg(&self, workspace_id: Uuid) -> PortResult>; + async fn get_last_sync_log(&self, workspace_id: Uuid) -> PortResult>; + async fn log_sync_operation( + &self, + workspace_id: Uuid, + operation: GitSyncOperation, + status: GitSyncStatus, + message: Option<&str>, + commit_hash: Option<&str>, + ) -> PortResult<()>; + + async fn delete_sync_logs(&self, workspace_id: Uuid) -> PortResult<()>; + + async fn delete_repository_state(&self, workspace_id: Uuid) -> PortResult<()>; + + async fn list_auto_sync_workspaces(&self) -> PortResult>; +} diff --git a/api/src/application/ports/git_storage.rs b/api/crates/application/src/git/ports/git_storage.rs similarity index 52% rename from api/src/application/ports/git_storage.rs rename to api/crates/application/src/git/ports/git_storage.rs index 1f9e57a3..13fe2324 100644 --- a/api/src/application/ports/git_storage.rs +++ b/api/crates/application/src/git/ports/git_storage.rs @@ -5,6 +5,8 @@ use async_trait::async_trait; use futures_core::Stream; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + pub type CommitId = Vec; #[derive(Debug, Clone)] @@ -31,60 +33,49 @@ pub struct BlobKey { pub path: String, } -pub type PackStream = Pin> + Send>>; +pub type PackStream = Pin> + Send>>; #[async_trait] pub trait GitStorage: Send + Sync { - async fn latest_commit(&self, user_id: Uuid) -> anyhow::Result>; - async fn store_pack(&self, user_id: Uuid, pack: &[u8], meta: &CommitMeta) - -> anyhow::Result<()>; - async fn load_pack_chain( - &self, - user_id: Uuid, - until: Option<&[u8]>, - ) -> anyhow::Result; - async fn put_blob(&self, key: &BlobKey, data: &[u8]) -> anyhow::Result<()>; - async fn fetch_blob(&self, key: &BlobKey) -> anyhow::Result>; - async fn commit_meta( - &self, - user_id: Uuid, - commit_id: &[u8], - ) -> anyhow::Result>; - async fn restore_commit_meta(&self, user_id: Uuid, meta: &CommitMeta) -> anyhow::Result<()>; + async fn latest_commit(&self, user_id: Uuid) -> PortResult>; + async fn store_pack(&self, user_id: Uuid, pack: &[u8], meta: &CommitMeta) -> PortResult<()>; + async fn load_pack_chain(&self, user_id: Uuid, until: Option<&[u8]>) -> PortResult; + async fn put_blob(&self, key: &BlobKey, data: &[u8]) -> PortResult<()>; + async fn fetch_blob(&self, key: &BlobKey) -> PortResult>; + async fn commit_meta(&self, user_id: Uuid, commit_id: &[u8]) -> PortResult>; + async fn restore_commit_meta(&self, user_id: Uuid, meta: &CommitMeta) -> PortResult<()>; async fn fetch_pack_for_commit( &self, user_id: Uuid, commit_id: &[u8], - ) -> anyhow::Result>>; - async fn delete_blob(&self, key: &BlobKey) -> anyhow::Result<()>; - async fn delete_pack(&self, user_id: Uuid, commit_id: &[u8]) -> anyhow::Result<()>; - async fn set_latest_commit( - &self, - user_id: Uuid, - meta: Option<&CommitMeta>, - ) -> anyhow::Result<()>; - async fn delete_all(&self, user_id: Uuid) -> anyhow::Result<()>; + ) -> PortResult>>; + async fn delete_blob(&self, key: &BlobKey) -> PortResult<()>; + async fn delete_pack(&self, user_id: Uuid, commit_id: &[u8]) -> PortResult<()>; + async fn set_latest_commit(&self, user_id: Uuid, meta: Option<&CommitMeta>) -> PortResult<()>; + async fn delete_all(&self, user_id: Uuid) -> PortResult<()>; } pub fn encode_commit_id(bytes: &[u8]) -> String { bytes.iter().map(|b| format!("{:02x}", b)).collect() } -pub fn decode_commit_id(hex: &str) -> anyhow::Result { - if hex.len() % 2 != 0 { - anyhow::bail!("invalid commit id length"); +pub fn decode_commit_id(hex: &str) -> PortResult { + if !hex.len().is_multiple_of(2) { + return Err(anyhow::anyhow!("invalid commit id length").into()); } let mut out = Vec::with_capacity(hex.len() / 2); let chars: Vec = hex.chars().collect(); for chunk in chars.chunks(2) { - let hi = chunk - .get(0) + let [hi, lo] = chunk else { + return Err(anyhow::anyhow!("invalid commit id").into()); + }; + let hi = hi + .to_digit(16) .ok_or_else(|| anyhow::anyhow!("invalid commit id"))?; - let lo = chunk - .get(1) + let lo = lo + .to_digit(16) .ok_or_else(|| anyhow::anyhow!("invalid commit id"))?; - let byte = u8::from_str_radix(&format!("{}{}", hi, lo), 16)?; - out.push(byte); + out.push(((hi << 4) | lo) as u8); } Ok(out) } diff --git a/api/crates/application/src/git/ports/git_workspace.rs b/api/crates/application/src/git/ports/git_workspace.rs new file mode 100644 index 00000000..95b292b0 --- /dev/null +++ b/api/crates/application/src/git/ports/git_workspace.rs @@ -0,0 +1,59 @@ +use async_trait::async_trait; +use uuid::Uuid; + +use crate::core::dtos::TextDiffResult; +use crate::core::ports::errors::PortResult; +use crate::git::dtos::{ + GitChangeItem, GitCommitInfo, GitImportOutcome, GitPullRequestDto, GitPullResultDto, + GitRemoteCheckDto, GitSyncOutcome, GitSyncRequestDto, GitWorkspaceStatus, +}; +use crate::git::ports::git_repository::UserGitCfg; + +#[async_trait] +pub trait GitWorkspacePort: Send + Sync { + async fn ensure_repository(&self, workspace_id: Uuid, default_branch: &str) -> PortResult<()>; + async fn remove_repository(&self, workspace_id: Uuid) -> PortResult<()>; + async fn status(&self, workspace_id: Uuid) -> PortResult; + async fn list_changes(&self, workspace_id: Uuid) -> PortResult>; + async fn working_diff(&self, workspace_id: Uuid) -> PortResult>; + async fn commit_diff( + &self, + workspace_id: Uuid, + from: &str, + to: &str, + ) -> PortResult>; + async fn history(&self, workspace_id: Uuid) -> PortResult>; + async fn sync( + &self, + workspace_id: Uuid, + req: &GitSyncRequestDto, + cfg: Option<&UserGitCfg>, + ) -> PortResult; + async fn import_repository( + &self, + workspace_id: Uuid, + actor_id: Uuid, + cfg: &UserGitCfg, + ) -> PortResult; + async fn pull( + &self, + workspace_id: Uuid, + actor_id: Uuid, + req: &GitPullRequestDto, + cfg: &UserGitCfg, + ) -> PortResult; + async fn head_commit(&self, workspace_id: Uuid) -> PortResult>>; + async fn remote_head( + &self, + workspace_id: Uuid, + cfg: &UserGitCfg, + ) -> PortResult>>; + async fn has_pending_changes(&self, workspace_id: Uuid) -> PortResult; + async fn drift_since_commit(&self, workspace_id: Uuid, base_commit: &[u8]) -> PortResult; + + async fn check_remote( + &self, + workspace_id: Uuid, + cfg: &UserGitCfg, + ) -> PortResult; +} diff --git a/api/crates/application/src/git/ports/gitignore_port.rs b/api/crates/application/src/git/ports/gitignore_port.rs new file mode 100644 index 00000000..efe70c79 --- /dev/null +++ b/api/crates/application/src/git/ports/gitignore_port.rs @@ -0,0 +1,10 @@ +use async_trait::async_trait; + +use crate::core::ports::errors::PortResult; + +#[async_trait] +pub trait GitignorePort: Send + Sync { + async fn ensure_gitignore(&self, dir: &str) -> PortResult; + async fn upsert_gitignore_patterns(&self, dir: &str, patterns: &[String]) -> PortResult; + async fn read_gitignore_patterns(&self, dir: &str) -> PortResult>; +} diff --git a/api/crates/application/src/git/ports/mod.rs b/api/crates/application/src/git/ports/mod.rs new file mode 100644 index 00000000..ab43457c --- /dev/null +++ b/api/crates/application/src/git/ports/mod.rs @@ -0,0 +1,6 @@ +pub mod git_pull_session_repository; +pub mod git_rebuild_job_queue; +pub mod git_repository; +pub mod git_storage; +pub mod git_workspace; +pub mod gitignore_port; diff --git a/api/src/application/services/git.rs b/api/crates/application/src/git/services/mod.rs similarity index 66% rename from api/src/application/services/git.rs rename to api/crates/application/src/git/services/mod.rs index 22e523ad..248dbba0 100644 --- a/api/src/application/services/git.rs +++ b/api/crates/application/src/git/services/mod.rs @@ -2,38 +2,44 @@ use std::sync::Arc; use uuid::Uuid; -use crate::application::dto::diff::TextDiffResult; -use crate::application::dto::git::{ - GitChangeItem, GitCommitInfo, GitConfigDto, GitPullConflictItemDto, GitPullRequestDto, - GitPullResolutionDto, GitPullResultDto, GitPullSessionDto, GitRemoteCheckDto, GitStatusDto, - GitSyncRequestDto, GitSyncResponseDto, GitignoreUpdateDto, UpsertGitConfigInput, +use crate::core::dtos::TextDiffResult; +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::core::services::errors::ServiceError; +use crate::documents::ports::document_repository::DocumentRepository; +use crate::documents::ports::files::files_repository::FilesRepository; +use crate::git::dtos::{ + GitChangeItem, GitCommitInfo, GitConfigDto, GitImportOutcome, GitPullConflictItemDto, + GitPullRequestDto, GitPullResolutionDto, GitPullResultDto, GitPullSessionDto, + GitRemoteCheckDto, GitStatusDto, GitSyncRequestDto, GitSyncResponseDto, GitignoreUpdateDto, + UpsertGitConfigInput, }; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::git_pull_session_repository::GitPullSessionRepository; -use crate::application::ports::git_repository::GitRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; -use crate::application::ports::gitignore_port::GitignorePort; -use crate::application::ports::storage_port::StorageResolverPort; -use crate::application::services::errors::ServiceError; -use crate::application::use_cases::git::delete_config::DeleteGitConfig; -use crate::application::use_cases::git::get_changes::GetChanges; -use crate::application::use_cases::git::get_commit_diff::GetCommitDiff; -use crate::application::use_cases::git::get_config::GetGitConfig; -use crate::application::use_cases::git::get_history::GetHistory; -use crate::application::use_cases::git::get_status::GetGitStatus; -use crate::application::use_cases::git::get_working_diff::GetWorkingDiff; -use crate::application::use_cases::git::gitignore_patterns::{ +use crate::git::ports::git_pull_session_repository::GitPullSessionRepository; +use crate::git::ports::git_repository::GitRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; +use crate::git::ports::gitignore_port::GitignorePort; +use crate::git::use_cases::delete_config::DeleteGitConfig; +use crate::git::use_cases::get_changes::GetChanges; +use crate::git::use_cases::get_commit_diff::GetCommitDiff; +use crate::git::use_cases::get_config::GetGitConfig; +use crate::git::use_cases::get_history::GetHistory; +use crate::git::use_cases::get_status::GetGitStatus; +use crate::git::use_cases::get_working_diff::GetWorkingDiff; +use crate::git::use_cases::gitignore_patterns::{ AddGitignorePatterns, CheckPathIgnored, GetGitignorePatterns, }; -use crate::application::use_cases::git::ignore_document::IgnoreDocument; -use crate::application::use_cases::git::ignore_folder::IgnoreFolder; -use crate::application::use_cases::git::init_repo::{DeinitRepo, InitRepo}; -use crate::application::use_cases::git::pull::PullRepository; -use crate::application::use_cases::git::sync_now::SyncNow; -use crate::application::use_cases::git::upsert_config::UpsertGitConfig; +use crate::git::use_cases::ignore_document::IgnoreDocument; +use crate::git::use_cases::ignore_folder::IgnoreFolder; +use crate::git::use_cases::init_repo::{DeinitRepo, InitRepo}; +use crate::git::use_cases::pull::PullRepository; +use crate::git::use_cases::sync_now::SyncNow; +use crate::git::use_cases::upsert_config::UpsertGitConfig; +use async_trait::async_trait; +use domain::git::pull_session::GitPullSessionStatus; use tracing::warn; +pub mod rebuild; +pub mod rebuild_scheduler; + pub struct GitService { repo: Arc, storage: Arc, @@ -49,6 +55,261 @@ pub struct FinalizePullSessionResult { pub git_status: Option, } +#[async_trait] +pub trait GitServiceFacade: Send + Sync { + async fn get_config(&self, workspace_id: Uuid) -> Result, ServiceError>; + async fn check_remote( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError>; + async fn upsert_config( + &self, + workspace_id: Uuid, + input: &UpsertGitConfigInput, + ) -> Result; + async fn delete_config(&self, workspace_id: Uuid) -> Result<(), ServiceError>; + async fn get_status(&self, workspace_id: Uuid) -> Result; + async fn sync_now( + &self, + workspace_id: Uuid, + payload: GitSyncRequestDto, + ) -> Result; + async fn init_repository(&self, workspace_id: Uuid) -> Result<(), ServiceError>; + async fn deinit_repository(&self, workspace_id: Uuid) -> Result<(), ServiceError>; + async fn get_changes(&self, workspace_id: Uuid) -> Result, ServiceError>; + async fn get_history(&self, workspace_id: Uuid) -> Result, ServiceError>; + async fn get_working_diff( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError>; + async fn get_commit_diff( + &self, + workspace_id: Uuid, + from: &str, + to: &str, + ) -> Result, ServiceError>; + async fn import_repository( + &self, + workspace_id: Uuid, + actor_id: Uuid, + input: &UpsertGitConfigInput, + ) -> Result; + async fn ignore_document( + &self, + workspace_id: Uuid, + doc_id: Uuid, + ) -> Result; + async fn ignore_folder( + &self, + workspace_id: Uuid, + folder_id: Uuid, + ) -> Result; + async fn add_gitignore_patterns( + &self, + workspace_id: Uuid, + patterns: Vec, + ) -> Result; + async fn get_gitignore_patterns(&self, workspace_id: Uuid) + -> Result, ServiceError>; + async fn check_path_ignored( + &self, + workspace_id: Uuid, + path: &str, + ) -> Result; + async fn pull_repository( + &self, + workspace_id: Uuid, + actor_id: Uuid, + req: GitPullRequestDto, + ) -> Result; + async fn start_pull_session_flow( + &self, + workspace_id: Uuid, + actor_id: Uuid, + ) -> Result; + async fn load_pull_session_with_stale_check( + &self, + workspace_id: Uuid, + session_id: Uuid, + ) -> Result, ServiceError>; + async fn resolve_pull_session_flow( + &self, + workspace_id: Uuid, + actor_id: Uuid, + session_id: Uuid, + resolutions: Vec, + ) -> Result; + async fn finalize_pull_session_flow( + &self, + workspace_id: Uuid, + session_id: Uuid, + ) -> Result; +} + +#[async_trait] +impl GitServiceFacade for GitService { + async fn get_config(&self, workspace_id: Uuid) -> Result, ServiceError> { + self.get_config(workspace_id).await + } + + async fn check_remote( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError> { + self.check_remote(workspace_id).await + } + + async fn upsert_config( + &self, + workspace_id: Uuid, + input: &UpsertGitConfigInput, + ) -> Result { + self.upsert_config(workspace_id, input).await + } + + async fn delete_config(&self, workspace_id: Uuid) -> Result<(), ServiceError> { + self.delete_config(workspace_id).await + } + + async fn get_status(&self, workspace_id: Uuid) -> Result { + self.get_status(workspace_id).await + } + + async fn sync_now( + &self, + workspace_id: Uuid, + payload: GitSyncRequestDto, + ) -> Result { + self.sync_now(workspace_id, payload).await + } + + async fn init_repository(&self, workspace_id: Uuid) -> Result<(), ServiceError> { + self.init_repository(workspace_id).await + } + + async fn deinit_repository(&self, workspace_id: Uuid) -> Result<(), ServiceError> { + self.deinit_repository(workspace_id).await + } + + async fn get_changes(&self, workspace_id: Uuid) -> Result, ServiceError> { + self.get_changes(workspace_id).await + } + + async fn get_history(&self, workspace_id: Uuid) -> Result, ServiceError> { + self.get_history(workspace_id).await + } + + async fn get_working_diff( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError> { + self.get_working_diff(workspace_id).await + } + + async fn get_commit_diff( + &self, + workspace_id: Uuid, + from: &str, + to: &str, + ) -> Result, ServiceError> { + self.get_commit_diff(workspace_id, from, to).await + } + + async fn import_repository( + &self, + workspace_id: Uuid, + actor_id: Uuid, + input: &UpsertGitConfigInput, + ) -> Result { + self.import_repository(workspace_id, actor_id, input).await + } + + async fn ignore_document( + &self, + workspace_id: Uuid, + doc_id: Uuid, + ) -> Result { + self.ignore_document(workspace_id, doc_id).await + } + + async fn ignore_folder( + &self, + workspace_id: Uuid, + folder_id: Uuid, + ) -> Result { + self.ignore_folder(workspace_id, folder_id).await + } + + async fn add_gitignore_patterns( + &self, + workspace_id: Uuid, + patterns: Vec, + ) -> Result { + self.add_gitignore_patterns(workspace_id, patterns).await + } + + async fn get_gitignore_patterns( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError> { + self.get_gitignore_patterns(workspace_id).await + } + + async fn check_path_ignored( + &self, + workspace_id: Uuid, + path: &str, + ) -> Result { + self.check_path_ignored(workspace_id, path).await + } + + async fn pull_repository( + &self, + workspace_id: Uuid, + actor_id: Uuid, + req: GitPullRequestDto, + ) -> Result { + self.pull_repository(workspace_id, actor_id, req).await + } + + async fn start_pull_session_flow( + &self, + workspace_id: Uuid, + actor_id: Uuid, + ) -> Result { + self.start_pull_session_flow(workspace_id, actor_id).await + } + + async fn load_pull_session_with_stale_check( + &self, + workspace_id: Uuid, + session_id: Uuid, + ) -> Result, ServiceError> { + self.load_pull_session_with_stale_check(workspace_id, session_id) + .await + } + + async fn resolve_pull_session_flow( + &self, + workspace_id: Uuid, + actor_id: Uuid, + session_id: Uuid, + resolutions: Vec, + ) -> Result { + self.resolve_pull_session_flow(workspace_id, actor_id, session_id, resolutions) + .await + } + + async fn finalize_pull_session_flow( + &self, + workspace_id: Uuid, + session_id: Uuid, + ) -> Result { + self.finalize_pull_session_flow(workspace_id, session_id) + .await + } +} + impl GitService { #[allow(clippy::too_many_arguments)] pub fn new( @@ -250,7 +511,7 @@ impl GitService { workspace_id: Uuid, actor_id: Uuid, input: &UpsertGitConfigInput, - ) -> Result { + ) -> Result { // Save configuration first let _ = self.upsert_config(workspace_id, input).await?; let cfg = self @@ -382,9 +643,9 @@ impl GitService { ServiceError::BadRequest("workspace_has_pending_changes") } else if msg.contains("not initialized") { ServiceError::BadRequest("repository_not_initialized") - } else if msg.contains("remote not configured") { - ServiceError::BadRequest("remote_not_configured") - } else if msg.contains("git_not_configured") { + } else if msg.contains("remote not configured") + || msg.contains("git_not_configured") + { ServiceError::BadRequest("remote_not_configured") } else if msg.contains("custom_text content required") { ServiceError::BadRequest("resolution_content_required") @@ -425,11 +686,11 @@ impl GitService { dto.base_commit = Some(head); } let status = if !dto.success && conflicts.is_empty() { - "error".to_string() + GitPullSessionStatus::Error } else if conflicts.is_empty() { - "merged".to_string() + GitPullSessionStatus::Merged } else { - "pending".to_string() + GitPullSessionStatus::Pending }; let session = GitPullSessionDto { id: session_id, @@ -458,7 +719,7 @@ impl GitService { .ok_or(ServiceError::NotFound)?; if self.pull_session_is_stale(workspace_id, &existing).await? { let mut stale = existing.clone(); - stale.status = "stale".to_string(); + stale.status = GitPullSessionStatus::Stale; stale.message = Some("Pull session is stale".to_string()); let _ = self.save_pull_session(stale.clone()).await; return Ok(stale); @@ -475,19 +736,19 @@ impl GitService { .await?; let conflicts = dto.conflicts.clone().unwrap_or_default(); let status = if !dto.success && conflicts.is_empty() { - "error".to_string() + GitPullSessionStatus::Error } else if conflicts.is_empty() { - "merged".to_string() + GitPullSessionStatus::Merged } else { - "resolving".to_string() + GitPullSessionStatus::Resolving }; // When the pull completed (no conflicts), record the latest head as the session base so // subsequent finalize calls don't treat the session as stale. let mut base_commit = dto.base_commit.clone(); - if conflicts.is_empty() { - if let Some(head) = self.workspace.head_commit(workspace_id).await? { - base_commit = Some(head); - } + if conflicts.is_empty() + && let Some(head) = self.workspace.head_commit(workspace_id).await? + { + base_commit = Some(head); } let session = GitPullSessionDto { id: session_id, @@ -512,14 +773,14 @@ impl GitService { .load_pull_session(workspace_id, session_id) .await? .ok_or(ServiceError::NotFound)?; - if existing.status == "merged" { + if existing.status == GitPullSessionStatus::Merged { let git_status = self.get_status(workspace_id).await?; return Ok(FinalizePullSessionResult { session: existing, git_status: Some(git_status), }); } - if existing.status == "stale" { + if existing.status == GitPullSessionStatus::Stale { let mut stale = existing.clone(); if stale.message.is_none() { stale.message = Some("Pull session is stale".to_string()); @@ -530,19 +791,17 @@ impl GitService { git_status: None, }); } - if existing.status == "error" { + if existing.status == GitPullSessionStatus::Error { return Ok(FinalizePullSessionResult { session: existing, git_status: None, }); } - if matches!(existing.status.as_str(), "pending" | "resolving") - && self - .pull_session_is_stale(workspace_id, &existing) - .await? + if existing.status.is_in_progress() + && self.pull_session_is_stale(workspace_id, &existing).await? { let mut stale = existing.clone(); - stale.status = "stale".to_string(); + stale.status = GitPullSessionStatus::Stale; if stale.message.is_none() { stale.message = Some("Pull session is stale".to_string()); } @@ -562,7 +821,7 @@ impl GitService { let merged = GitPullSessionDto { id: session_id, workspace_id, - status: "merged".to_string(), + status: GitPullSessionStatus::Merged, conflicts: Vec::new(), resolutions: existing.resolutions.clone(), message: Some("merge completed".to_string()), @@ -585,12 +844,10 @@ impl GitService { Some(s) => s, None => return Ok(None), }; - if matches!(session.status.as_str(), "pending" | "resolving") - && self - .pull_session_is_stale(workspace_id, &session) - .await? + if session.status.is_in_progress() + && self.pull_session_is_stale(workspace_id, &session).await? { - session.status = "stale".to_string(); + session.status = GitPullSessionStatus::Stale; session.message = Some("Pull session is stale".to_string()); let _ = self.save_pull_session(session.clone()).await; } @@ -625,13 +882,13 @@ impl GitService { let mut matched = None; for doc in docs.iter() { let mut paths: Vec = Vec::new(); - if let Some(p) = doc.path.as_ref() { + if let Some(p) = doc.path() { let norm = normalize(p); if !norm.is_empty() { paths.push(norm); } } - let desired = normalize(&doc.desired_path); + let desired = normalize(doc.desired_path().as_str()); if !desired.is_empty() { paths.push(desired); } @@ -641,16 +898,16 @@ impl GitService { || candidate.ends_with(&format!("/{p}")) || p.ends_with(&candidate) }) { - matched = Some(doc.id); + matched = Some(doc.id()); break; } } conflict.document_id = matched; - if let Some(doc_id) = matched { - if let Some(doc) = docs.iter().find(|d| d.id == doc_id) { - conflict.path = doc.desired_path.clone(); - } + if let Some(doc_id) = matched + && let Some(doc) = docs.iter().find(|d| d.id() == doc_id) + { + conflict.path = doc.desired_path().as_str().to_string(); } out.push(conflict); } diff --git a/api/src/application/services/git_rebuild.rs b/api/crates/application/src/git/services/rebuild.rs similarity index 76% rename from api/src/application/services/git_rebuild.rs rename to api/crates/application/src/git/services/rebuild.rs index 6d4993c7..4b5d8147 100644 --- a/api/src/application/services/git_rebuild.rs +++ b/api/crates/application/src/git/services/rebuild.rs @@ -5,15 +5,18 @@ use tracing::{error, info, warn}; #[cfg(test)] use uuid::Uuid; -use crate::application::dto::git::GitSyncRequestDto; -use crate::application::ports::git_rebuild_job_queue::{GitRebuildJob, GitRebuildJobQueue}; -use crate::application::ports::git_repository::GitRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; -use crate::application::services::metrics::MetricsRegistry; -use crate::application::services::workspaces::WorkspacePermissionResolver; -use crate::application::services::workspaces::permission_snapshot::permission_set_from_snapshot; -use crate::application::use_cases::git::helpers::needs_force_retry; -use crate::domain::workspaces::permissions::{PERM_GIT_SYNC, PermissionSet}; +use crate::core::services::metrics::MetricsRegistry; +use crate::core::services::worker::WorkerTick; +use crate::git::dtos::GitSyncRequestDto; +use crate::git::ports::git_rebuild_job_queue::{GitRebuildJob, GitRebuildJobQueue}; +use crate::git::ports::git_repository::GitRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; +use crate::git::use_cases::helpers::needs_force_retry; +use crate::workspaces::services::WorkspacePermissionResolver; +use crate::workspaces::services::permission_snapshot::permission_set_from_snapshot; +use domain::access::permissions::{PERM_GIT_SYNC, PermissionSet}; +use domain::git::policy; +use domain::git::sync_log::{GitSyncOperation, GitSyncStatus}; pub struct GitRebuildService { jobs: Arc, @@ -61,26 +64,22 @@ impl GitRebuildService { self } - pub async fn run(self: Arc) { - loop { - match self.jobs.fetch_next(self.lock_timeout_secs).await { - Ok(Some(job)) => { - if let Err(err) = self.process_job(&job).await { - error!(error = ?err, job_id = job.id, "git_rebuild_job_failed"); - } - } - Ok(None) => tokio::time::sleep(self.idle_backoff).await, - Err(err) => { - error!(error = ?err, "git_rebuild_fetch_failed"); - tokio::time::sleep(self.idle_backoff).await; + pub async fn tick(&self) -> anyhow::Result { + match self.jobs.fetch_next(self.lock_timeout_secs).await { + Ok(Some(job)) => { + if let Err(err) = self.process_job(&job).await { + error!(error = ?err, job_id = job.id, "git_rebuild_job_failed"); } + Ok(WorkerTick::Processed) } + Ok(None) => Ok(WorkerTick::Idle), + Err(err) => Err(err.into()), } } async fn process_job(&self, job: &GitRebuildJob) -> anyhow::Result<()> { let permissions = self.permissions_for_job(job).await; - if !permissions.allows(PERM_GIT_SYNC) { + if policy::ensure_git_sync_allowed(&permissions).is_err() { warn!( workspace_id = %job.workspace_id, "git_rebuild_missing_permission" @@ -123,10 +122,10 @@ impl GitRebuildService { .await { Ok(outcome) => outcome, - Err(err) => return self.on_job_error(job, err).await, + Err(err) => return self.on_job_error(job, err.into()).await, } } else { - return self.on_job_error(job, err).await; + return self.on_job_error(job, err.into()).await; } } }; @@ -142,8 +141,8 @@ impl GitRebuildService { .git_repo .log_sync_operation( job.workspace_id, - "commit", - "success", + GitSyncOperation::Commit, + GitSyncStatus::Success, Some(&outcome.message), outcome.commit_hash.as_deref(), ) @@ -171,7 +170,13 @@ impl GitRebuildService { ); if let Err(log_err) = self .git_repo - .log_sync_operation(job.workspace_id, "commit", "error", Some(&msg), None) + .log_sync_operation( + job.workspace_id, + GitSyncOperation::Commit, + GitSyncStatus::Error, + Some(&msg), + None, + ) .await { warn!( @@ -242,7 +247,8 @@ mod tests { use std::collections::VecDeque; use std::sync::Mutex; - use crate::application::services::errors::ServiceError; + use crate::core::ports::errors::PortResult; + use crate::core::services::errors::ServiceError; struct RecordingWorkspace { outcomes: Mutex>, @@ -272,19 +278,19 @@ mod tests { &self, _workspace_id: Uuid, _default_branch: &str, - ) -> anyhow::Result<()> { + ) -> PortResult<()> { unimplemented!() } - async fn remove_repository(&self, _workspace_id: Uuid) -> anyhow::Result<()> { + async fn remove_repository(&self, _workspace_id: Uuid) -> PortResult<()> { unimplemented!() } async fn status( &self, _workspace_id: Uuid, - ) -> anyhow::Result { - Ok(crate::application::dto::git::GitWorkspaceStatus { + ) -> PortResult { + Ok(crate::git::dtos::GitWorkspaceStatus { repository_initialized: true, current_branch: Some("main".into()), uncommitted_changes: 0, @@ -295,14 +301,14 @@ mod tests { async fn list_changes( &self, _workspace_id: Uuid, - ) -> anyhow::Result> { + ) -> PortResult> { unimplemented!() } async fn working_diff( &self, _workspace_id: Uuid, - ) -> anyhow::Result> { + ) -> PortResult> { unimplemented!() } @@ -311,14 +317,14 @@ mod tests { _workspace_id: Uuid, _from: &str, _to: &str, - ) -> anyhow::Result> { + ) -> PortResult> { unimplemented!() } async fn history( &self, _workspace_id: Uuid, - ) -> anyhow::Result> { + ) -> PortResult> { unimplemented!() } @@ -326,13 +332,13 @@ mod tests { &self, _workspace_id: Uuid, req: &GitSyncRequestDto, - _cfg: Option<&crate::application::ports::git_repository::UserGitCfg>, - ) -> anyhow::Result { + _cfg: Option<&crate::git::ports::git_repository::UserGitCfg>, + ) -> PortResult { self.outcomes.lock().unwrap().push(req.clone()); if let Some(err) = self.failures.lock().unwrap().pop_front() { - Err(err) + Err(err.into()) } else { - Ok(crate::application::dto::git::GitSyncOutcome { + Ok(crate::git::dtos::GitSyncOutcome { files_changed: 1, commit_hash: Some("abc123".into()), pushed: false, @@ -345,9 +351,9 @@ mod tests { &self, _workspace_id: Uuid, _actor_id: Uuid, - _cfg: &crate::application::ports::git_repository::UserGitCfg, - ) -> anyhow::Result { - Ok(crate::application::dto::git::GitImportOutcome { + _cfg: &crate::git::ports::git_repository::UserGitCfg, + ) -> PortResult { + Ok(crate::git::dtos::GitImportOutcome { files_changed: 0, commit_hash: None, docs_created: 0, @@ -360,10 +366,10 @@ mod tests { &self, _workspace_id: Uuid, _actor_id: Uuid, - _req: &crate::application::dto::git::GitPullRequestDto, - _cfg: &crate::application::ports::git_repository::UserGitCfg, - ) -> anyhow::Result { - Ok(crate::application::dto::git::GitPullResultDto { + _req: &crate::git::dtos::GitPullRequestDto, + _cfg: &crate::git::ports::git_repository::UserGitCfg, + ) -> PortResult { + Ok(crate::git::dtos::GitPullResultDto { success: true, message: "ok".to_string(), files_changed: 0, @@ -377,28 +383,28 @@ mod tests { async fn check_remote( &self, _workspace_id: Uuid, - _cfg: &crate::application::ports::git_repository::UserGitCfg, - ) -> anyhow::Result { - Ok(crate::application::dto::git::GitRemoteCheckDto { + _cfg: &crate::git::ports::git_repository::UserGitCfg, + ) -> PortResult { + Ok(crate::git::dtos::GitRemoteCheckDto { ok: true, message: "ok".into(), reason: None, }) } - async fn head_commit(&self, _workspace_id: Uuid) -> anyhow::Result>> { + async fn head_commit(&self, _workspace_id: Uuid) -> PortResult>> { Ok(None) } async fn remote_head( &self, _workspace_id: Uuid, - _cfg: &crate::application::ports::git_repository::UserGitCfg, - ) -> anyhow::Result>> { + _cfg: &crate::git::ports::git_repository::UserGitCfg, + ) -> PortResult>> { Ok(None) } - async fn has_pending_changes(&self, _workspace_id: Uuid) -> anyhow::Result { + async fn has_pending_changes(&self, _workspace_id: Uuid) -> PortResult { Ok(false) } @@ -406,7 +412,7 @@ mod tests { &self, _workspace_id: Uuid, _base_commit: &[u8], - ) -> anyhow::Result { + ) -> PortResult { Ok(false) } } @@ -432,23 +438,20 @@ mod tests { _workspace_id: Uuid, _actor_id: Option, _permission_snapshot: &[String], - ) -> anyhow::Result<()> { + ) -> PortResult<()> { Ok(()) } - async fn fetch_next( - &self, - _lock_timeout_secs: i64, - ) -> anyhow::Result> { + async fn fetch_next(&self, _lock_timeout_secs: i64) -> PortResult> { Ok(None) } - async fn complete(&self, job_id: i64) -> anyhow::Result<()> { + async fn complete(&self, job_id: i64) -> PortResult<()> { self.complete.lock().unwrap().push(job_id); Ok(()) } - async fn fail(&self, job_id: i64, _error: &str) -> anyhow::Result<()> { + async fn fail(&self, job_id: i64, _error: &str) -> PortResult<()> { self.failed.lock().unwrap().push(job_id); Ok(()) } @@ -471,17 +474,7 @@ mod tests { async fn get_config( &self, _user_id: Uuid, - ) -> anyhow::Result< - Option<( - Uuid, - String, - String, - String, - bool, - chrono::DateTime, - chrono::DateTime, - )>, - > { + ) -> PortResult> { unimplemented!() } @@ -490,67 +483,52 @@ mod tests { _user_id: Uuid, _repository_url: &str, _branch_name: Option<&str>, - _auth_type: &str, + _auth_type: domain::git::auth::GitAuthType, _auth_data: &serde_json::Value, _auto_sync: Option, - ) -> anyhow::Result<( - Uuid, - String, - String, - String, - bool, - chrono::DateTime, - chrono::DateTime, - )> { + ) -> PortResult { unimplemented!() } - async fn delete_config(&self, _user_id: Uuid) -> anyhow::Result { + async fn delete_config(&self, _user_id: Uuid) -> PortResult { unimplemented!() } async fn load_user_git_cfg( &self, _user_id: Uuid, - ) -> anyhow::Result> { + ) -> PortResult> { Ok(None) } async fn get_last_sync_log( &self, _user_id: Uuid, - ) -> anyhow::Result< - Option<( - Option>, - Option, - Option, - Option, - )>, - > { + ) -> PortResult> { Ok(None) } async fn log_sync_operation( &self, _workspace_id: Uuid, - _operation: &str, - status: &str, + _operation: domain::git::sync_log::GitSyncOperation, + status: domain::git::sync_log::GitSyncStatus, _message: Option<&str>, _commit_hash: Option<&str>, - ) -> anyhow::Result<()> { - *self.last_status.lock().unwrap() = Some(status.to_string()); + ) -> PortResult<()> { + *self.last_status.lock().unwrap() = Some(status.as_str().to_string()); Ok(()) } - async fn delete_sync_logs(&self, _workspace_id: Uuid) -> anyhow::Result<()> { + async fn delete_sync_logs(&self, _workspace_id: Uuid) -> PortResult<()> { Ok(()) } - async fn delete_repository_state(&self, _workspace_id: Uuid) -> anyhow::Result<()> { + async fn delete_repository_state(&self, _workspace_id: Uuid) -> PortResult<()> { Ok(()) } - async fn list_auto_sync_workspaces(&self) -> anyhow::Result> { + async fn list_auto_sync_workspaces(&self) -> PortResult> { Ok(Vec::new()) } } diff --git a/api/src/application/services/git_rebuild_scheduler.rs b/api/crates/application/src/git/services/rebuild_scheduler.rs similarity index 58% rename from api/src/application/services/git_rebuild_scheduler.rs rename to api/crates/application/src/git/services/rebuild_scheduler.rs index 0999c8c4..aa4a2819 100644 --- a/api/src/application/services/git_rebuild_scheduler.rs +++ b/api/crates/application/src/git/services/rebuild_scheduler.rs @@ -1,13 +1,12 @@ use std::sync::Arc; -use std::time::Duration; use tracing::{debug, error, info}; use uuid::Uuid; -use crate::application::ports::git_rebuild_job_queue::GitRebuildJobQueue; -use crate::application::ports::git_repository::GitRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; -use crate::domain::workspaces::permissions::{ +use crate::git::ports::git_rebuild_job_queue::GitRebuildJobQueue; +use crate::git::ports::git_repository::GitRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; +use domain::access::permissions::{ PERM_GIT_CONFIGURE, PERM_GIT_INIT, PERM_GIT_SYNC, PermissionSet, }; @@ -17,7 +16,6 @@ pub struct GitRebuildScheduler { jobs: Arc, git_repo: Arc, workspace: Arc, - interval: Duration, } impl GitRebuildScheduler { @@ -25,33 +23,28 @@ impl GitRebuildScheduler { jobs: Arc, git_repo: Arc, workspace: Arc, - interval: Duration, ) -> Self { Self { jobs, git_repo, workspace, - interval, } } - pub async fn run(self) { - loop { - match self.git_repo.list_auto_sync_workspaces().await { - Ok(ids) => { - for workspace_id in ids { - if let Err(err) = self.enqueue_job_if_ready(workspace_id).await { - error!( - error = ?err, - workspace_id = %workspace_id, - "git_rebuild_enqueue_failed" - ); - } + pub async fn tick(&self) { + match self.git_repo.list_auto_sync_workspaces().await { + Ok(ids) => { + for workspace_id in ids { + if let Err(err) = self.enqueue_job_if_ready(workspace_id).await { + error!( + error = ?err, + workspace_id = %workspace_id, + "git_rebuild_enqueue_failed" + ); } } - Err(err) => error!(error = ?err, "git_rebuild_scheduler_workspace_list_failed"), } - tokio::time::sleep(self.interval).await; + Err(err) => error!(error = ?err, "git_rebuild_scheduler_workspace_list_failed"), } } diff --git a/api/src/application/use_cases/git/delete_config.rs b/api/crates/application/src/git/use_cases/delete_config.rs similarity index 60% rename from api/src/application/use_cases/git/delete_config.rs rename to api/crates/application/src/git/use_cases/delete_config.rs index 523ca35b..abe62532 100644 --- a/api/src/application/use_cases/git/delete_config.rs +++ b/api/crates/application/src/git/use_cases/delete_config.rs @@ -1,4 +1,4 @@ -use crate::application::ports::git_repository::GitRepository; +use crate::git::ports::git_repository::GitRepository; use uuid::Uuid; pub struct DeleteGitConfig<'a, R: GitRepository + ?Sized> { @@ -7,6 +7,9 @@ pub struct DeleteGitConfig<'a, R: GitRepository + ?Sized> { impl<'a, R: GitRepository + ?Sized> DeleteGitConfig<'a, R> { pub async fn execute(&self, workspace_id: Uuid) -> anyhow::Result { - self.repo.delete_config(workspace_id).await + self.repo + .delete_config(workspace_id) + .await + .map_err(Into::into) } } diff --git a/api/src/application/use_cases/git/get_changes.rs b/api/crates/application/src/git/use_cases/get_changes.rs similarity index 55% rename from api/src/application/use_cases/git/get_changes.rs rename to api/crates/application/src/git/use_cases/get_changes.rs index 701caea6..a0ac67e2 100644 --- a/api/src/application/use_cases/git/get_changes.rs +++ b/api/crates/application/src/git/use_cases/get_changes.rs @@ -1,5 +1,5 @@ -use crate::application::dto::git::GitChangeItem; -use crate::application::ports::git_workspace::GitWorkspacePort; +use crate::git::dtos::GitChangeItem; +use crate::git::ports::git_workspace::GitWorkspacePort; use uuid::Uuid; pub struct GetChanges<'a, W: GitWorkspacePort + ?Sized> { @@ -8,6 +8,9 @@ pub struct GetChanges<'a, W: GitWorkspacePort + ?Sized> { impl<'a, W: GitWorkspacePort + ?Sized> GetChanges<'a, W> { pub async fn execute(&self, workspace_id: Uuid) -> anyhow::Result> { - self.workspace.list_changes(workspace_id).await + self.workspace + .list_changes(workspace_id) + .await + .map_err(Into::into) } } diff --git a/api/src/application/use_cases/git/get_commit_diff.rs b/api/crates/application/src/git/use_cases/get_commit_diff.rs similarity index 60% rename from api/src/application/use_cases/git/get_commit_diff.rs rename to api/crates/application/src/git/use_cases/get_commit_diff.rs index 256c4b0b..0803e04a 100644 --- a/api/src/application/use_cases/git/get_commit_diff.rs +++ b/api/crates/application/src/git/use_cases/get_commit_diff.rs @@ -1,5 +1,5 @@ -use crate::application::dto::diff::TextDiffResult; -use crate::application::ports::git_workspace::GitWorkspacePort; +use crate::core::dtos::TextDiffResult; +use crate::git::ports::git_workspace::GitWorkspacePort; use uuid::Uuid; pub struct GetCommitDiff<'a, W: GitWorkspacePort + ?Sized> { @@ -13,6 +13,9 @@ impl<'a, W: GitWorkspacePort + ?Sized> GetCommitDiff<'a, W> { from: String, to: String, ) -> anyhow::Result> { - self.workspace.commit_diff(workspace_id, &from, &to).await + self.workspace + .commit_diff(workspace_id, &from, &to) + .await + .map_err(Into::into) } } diff --git a/api/crates/application/src/git/use_cases/get_config.rs b/api/crates/application/src/git/use_cases/get_config.rs new file mode 100644 index 00000000..aa30dc26 --- /dev/null +++ b/api/crates/application/src/git/use_cases/get_config.rs @@ -0,0 +1,25 @@ +use crate::git::dtos::GitConfigDto; +use crate::git::ports::git_repository::GitRepository; +use uuid::Uuid; + +pub struct GetGitConfig<'a, R: GitRepository + ?Sized> { + pub repo: &'a R, +} + +impl<'a, R: GitRepository + ?Sized> GetGitConfig<'a, R> { + pub async fn execute(&self, workspace_id: Uuid) -> anyhow::Result> { + Ok(self + .repo + .get_config(workspace_id) + .await? + .map(|record| GitConfigDto { + id: record.id, + repository_url: record.repository_url, + branch_name: record.branch_name, + auth_type: record.auth_type.as_str().to_string(), + auto_sync: record.auto_sync, + created_at: record.created_at, + updated_at: record.updated_at, + })) + } +} diff --git a/api/src/application/use_cases/git/get_history.rs b/api/crates/application/src/git/use_cases/get_history.rs similarity index 56% rename from api/src/application/use_cases/git/get_history.rs rename to api/crates/application/src/git/use_cases/get_history.rs index 4011b8a6..7c5c11ee 100644 --- a/api/src/application/use_cases/git/get_history.rs +++ b/api/crates/application/src/git/use_cases/get_history.rs @@ -1,5 +1,5 @@ -use crate::application::dto::git::GitCommitInfo; -use crate::application::ports::git_workspace::GitWorkspacePort; +use crate::git::dtos::GitCommitInfo; +use crate::git::ports::git_workspace::GitWorkspacePort; use uuid::Uuid; pub struct GetHistory<'a, W: GitWorkspacePort + ?Sized> { @@ -8,6 +8,9 @@ pub struct GetHistory<'a, W: GitWorkspacePort + ?Sized> { impl<'a, W: GitWorkspacePort + ?Sized> GetHistory<'a, W> { pub async fn execute(&self, workspace_id: Uuid) -> anyhow::Result> { - self.workspace.history(workspace_id).await + self.workspace + .history(workspace_id) + .await + .map_err(Into::into) } } diff --git a/api/src/application/use_cases/git/get_status.rs b/api/crates/application/src/git/use_cases/get_status.rs similarity index 61% rename from api/src/application/use_cases/git/get_status.rs rename to api/crates/application/src/git/use_cases/get_status.rs index 4ca02b67..03f295bb 100644 --- a/api/src/application/use_cases/git/get_status.rs +++ b/api/crates/application/src/git/use_cases/get_status.rs @@ -1,6 +1,6 @@ -use crate::application::dto::git::{GitStatusDto, GitWorkspaceStatus}; -use crate::application::ports::git_repository::GitRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; +use crate::git::dtos::{GitStatusDto, GitWorkspaceStatus}; +use crate::git::ports::git_repository::GitRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; use uuid::Uuid; pub struct GetGitStatus<'a, R, W> @@ -19,12 +19,9 @@ where { pub async fn execute(&self, workspace_id: Uuid) -> anyhow::Result { let cfg_row = self.repo.get_config(workspace_id).await?; - let (repository_url, auto_sync) = - if let Some((_id, url, _branch, _auth_type, auto_sync, _c, _u)) = cfg_row { - (url, auto_sync) - } else { - (String::new(), false) - }; + let (repository_url, auto_sync) = cfg_row + .map(|cfg| (cfg.repository_url, cfg.auto_sync)) + .unwrap_or((String::new(), false)); let GitWorkspaceStatus { repository_initialized, @@ -33,11 +30,16 @@ where untracked_files, } = self.workspace.status(workspace_id).await?; - let (last_sync, last_sync_status, last_sync_message, last_sync_commit_hash) = self - .repo - .get_last_sync_log(workspace_id) - .await? - .unwrap_or((None, None, None, None)); + let last = self.repo.get_last_sync_log(workspace_id).await?; + let (last_sync, last_sync_status, last_sync_message, last_sync_commit_hash) = match last { + Some(log) => ( + log.created_at, + log.status.map(|s| s.as_str().to_string()), + log.message, + log.commit_hash, + ), + None => (None, None, None, None), + }; Ok(GitStatusDto { repository_initialized, diff --git a/api/src/application/use_cases/git/get_working_diff.rs b/api/crates/application/src/git/use_cases/get_working_diff.rs similarity index 56% rename from api/src/application/use_cases/git/get_working_diff.rs rename to api/crates/application/src/git/use_cases/get_working_diff.rs index f78d7eff..200a0e37 100644 --- a/api/src/application/use_cases/git/get_working_diff.rs +++ b/api/crates/application/src/git/use_cases/get_working_diff.rs @@ -1,5 +1,5 @@ -use crate::application::dto::diff::TextDiffResult; -use crate::application::ports::git_workspace::GitWorkspacePort; +use crate::core::dtos::TextDiffResult; +use crate::git::ports::git_workspace::GitWorkspacePort; use uuid::Uuid; pub struct GetWorkingDiff<'a, W: GitWorkspacePort + ?Sized> { @@ -8,6 +8,9 @@ pub struct GetWorkingDiff<'a, W: GitWorkspacePort + ?Sized> { impl<'a, W: GitWorkspacePort + ?Sized> GetWorkingDiff<'a, W> { pub async fn execute(&self, workspace_id: Uuid) -> anyhow::Result> { - self.workspace.working_diff(workspace_id).await + self.workspace + .working_diff(workspace_id) + .await + .map_err(Into::into) } } diff --git a/api/src/application/use_cases/git/gitignore_patterns.rs b/api/crates/application/src/git/use_cases/gitignore_patterns.rs similarity index 92% rename from api/src/application/use_cases/git/gitignore_patterns.rs rename to api/crates/application/src/git/use_cases/gitignore_patterns.rs index f1792fd8..213c6490 100644 --- a/api/src/application/use_cases/git/gitignore_patterns.rs +++ b/api/crates/application/src/git/use_cases/gitignore_patterns.rs @@ -1,6 +1,6 @@ -use crate::application::ports::git_workspace::GitWorkspacePort; -use crate::application::ports::gitignore_port::GitignorePort; -use crate::application::ports::storage_port::StorageResolverPort; +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::git::ports::git_workspace::GitWorkspacePort; +use crate::git::ports::gitignore_port::GitignorePort; pub struct GetGitignorePatterns<'a, G, S> where diff --git a/api/src/application/use_cases/git/helpers.rs b/api/crates/application/src/git/use_cases/helpers.rs similarity index 90% rename from api/src/application/use_cases/git/helpers.rs rename to api/crates/application/src/git/use_cases/helpers.rs index 0b4f7495..f8e01993 100644 --- a/api/src/application/use_cases/git/helpers.rs +++ b/api/crates/application/src/git/use_cases/helpers.rs @@ -1,7 +1,8 @@ -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::storage_port::StorageResolverPort; +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::documents::ports::document_repository::DocumentRepository; +use crate::documents::ports::files::files_repository::FilesRepository; use anyhow::Error; +use domain::documents::doc_type::DocumentType; use uuid::Uuid; fn strip_user_prefix(owner_id: Uuid, rel_from_uploads: &str) -> String { @@ -36,7 +37,7 @@ pub async fn compute_doc_patterns_with< let dtype = meta.doc_type; // Folder: ignore the entire directory under the repo root - if dtype == "folder" { + if dtype == DocumentType::Folder { let dir_full = storage.build_doc_dir(node_id).await?; // .../uploads// let rel_from_uploads = storage.relative_from_uploads(&dir_full); let repo_rel = strip_user_prefix(owner_id, &rel_from_uploads); diff --git a/api/src/application/use_cases/git/ignore_document.rs b/api/crates/application/src/git/use_cases/ignore_document.rs similarity index 76% rename from api/src/application/use_cases/git/ignore_document.rs rename to api/crates/application/src/git/use_cases/ignore_document.rs index 86bd1e66..b006f085 100644 --- a/api/src/application/use_cases/git/ignore_document.rs +++ b/api/crates/application/src/git/use_cases/ignore_document.rs @@ -1,11 +1,11 @@ use uuid::Uuid; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; -use crate::application::ports::gitignore_port::GitignorePort; -use crate::application::ports::storage_port::StorageResolverPort; -use crate::application::use_cases::git::helpers::compute_doc_patterns_with; +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::documents::ports::document_repository::DocumentRepository; +use crate::documents::ports::files::files_repository::FilesRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; +use crate::git::ports::gitignore_port::GitignorePort; +use crate::git::use_cases::helpers::compute_doc_patterns_with; pub struct IgnoreDocument<'a, G, S, F, D, W> where diff --git a/api/src/application/use_cases/git/ignore_folder.rs b/api/crates/application/src/git/use_cases/ignore_folder.rs similarity index 76% rename from api/src/application/use_cases/git/ignore_folder.rs rename to api/crates/application/src/git/use_cases/ignore_folder.rs index eab3252d..40151927 100644 --- a/api/src/application/use_cases/git/ignore_folder.rs +++ b/api/crates/application/src/git/use_cases/ignore_folder.rs @@ -1,11 +1,11 @@ use uuid::Uuid; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; -use crate::application::ports::gitignore_port::GitignorePort; -use crate::application::ports::storage_port::StorageResolverPort; -use crate::application::use_cases::git::helpers::compute_doc_patterns_with; +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::documents::ports::document_repository::DocumentRepository; +use crate::documents::ports::files::files_repository::FilesRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; +use crate::git::ports::gitignore_port::GitignorePort; +use crate::git::use_cases::helpers::compute_doc_patterns_with; pub struct IgnoreFolder<'a, G, S, F, D, W> where diff --git a/api/src/application/use_cases/git/init_repo.rs b/api/crates/application/src/git/use_cases/init_repo.rs similarity index 65% rename from api/src/application/use_cases/git/init_repo.rs rename to api/crates/application/src/git/use_cases/init_repo.rs index 0fcaab63..991a11ac 100644 --- a/api/src/application/use_cases/git/init_repo.rs +++ b/api/crates/application/src/git/use_cases/init_repo.rs @@ -1,7 +1,7 @@ -use crate::application::ports::git_repository::GitRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; -use crate::application::ports::gitignore_port::GitignorePort; -use crate::application::ports::storage_port::StorageResolverPort; +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::git::ports::git_repository::GitRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; +use crate::git::ports::gitignore_port::GitignorePort; use uuid::Uuid; pub struct InitRepo<'a, R, G, S, W> @@ -25,11 +25,12 @@ where W: GitWorkspacePort + ?Sized, { pub async fn execute(&self, workspace_id: Uuid) -> anyhow::Result<()> { - let default_branch = if let Some(row) = self.repo.get_config(workspace_id).await? { - row.2 - } else { - "main".to_string() - }; + let default_branch = self + .repo + .get_config(workspace_id) + .await? + .map(|row| row.branch_name) + .unwrap_or_else(|| "main".to_string()); self.workspace .ensure_repository(workspace_id, &default_branch) @@ -47,6 +48,9 @@ pub struct DeinitRepo<'a, W: GitWorkspacePort + ?Sized> { impl<'a, W: GitWorkspacePort + ?Sized> DeinitRepo<'a, W> { pub async fn execute(&self, workspace_id: Uuid) -> anyhow::Result<()> { - self.workspace.remove_repository(workspace_id).await + self.workspace + .remove_repository(workspace_id) + .await + .map_err(Into::into) } } diff --git a/api/src/application/use_cases/git/mod.rs b/api/crates/application/src/git/use_cases/mod.rs similarity index 100% rename from api/src/application/use_cases/git/mod.rs rename to api/crates/application/src/git/use_cases/mod.rs diff --git a/api/src/application/use_cases/git/pull.rs b/api/crates/application/src/git/use_cases/pull.rs similarity index 77% rename from api/src/application/use_cases/git/pull.rs rename to api/crates/application/src/git/use_cases/pull.rs index f1d8d801..8756e07f 100644 --- a/api/src/application/use_cases/git/pull.rs +++ b/api/crates/application/src/git/use_cases/pull.rs @@ -1,9 +1,9 @@ use anyhow::anyhow; use uuid::Uuid; -use crate::application::dto::git::{GitPullRequestDto, GitPullResultDto}; -use crate::application::ports::git_repository::GitRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; +use crate::git::dtos::{GitPullRequestDto, GitPullResultDto}; +use crate::git::ports::git_repository::GitRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; pub struct PullRepository<'a, R, W> where @@ -30,5 +30,6 @@ where self.workspace .pull(workspace_id, actor_id, &req, &cfg) .await + .map_err(Into::into) } } diff --git a/api/src/application/use_cases/git/sync_now.rs b/api/crates/application/src/git/use_cases/sync_now.rs similarity index 51% rename from api/src/application/use_cases/git/sync_now.rs rename to api/crates/application/src/git/use_cases/sync_now.rs index c10d8097..0abe2c67 100644 --- a/api/src/application/use_cases/git/sync_now.rs +++ b/api/crates/application/src/git/use_cases/sync_now.rs @@ -1,8 +1,9 @@ use uuid::Uuid; -use crate::application::dto::git::{GitSyncRequestDto, GitSyncResponseDto}; -use crate::application::ports::git_repository::GitRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; +use crate::git::dtos::{GitSyncRequestDto, GitSyncResponseDto}; +use crate::git::ports::git_repository::GitRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; +use domain::git::sync_log::{GitSyncOperation, GitSyncStatus}; pub struct SyncNow<'a, R, W> where @@ -30,37 +31,37 @@ where .sync(workspace_id, &attempt_req, cfg.as_ref()) .await?; - if let Some(cfg) = cfg.as_ref() { - if !cfg.repository_url.is_empty() { - if attempt_req.skip_push.unwrap_or(false) { - let _ = self - .repo - .log_sync_operation( - workspace_id, - "commit", - "success", - Some(&outcome.message), - outcome.commit_hash.as_deref(), - ) - .await; + if let Some(cfg) = cfg.as_ref() + && !cfg.repository_url.is_empty() + { + if attempt_req.skip_push.unwrap_or(false) { + let _ = self + .repo + .log_sync_operation( + workspace_id, + GitSyncOperation::Commit, + GitSyncStatus::Success, + Some(&outcome.message), + outcome.commit_hash.as_deref(), + ) + .await; + } else { + // Treat "nothing to commit" as success even if no push occurred. + let status = if outcome.files_changed == 0 || outcome.pushed { + GitSyncStatus::Success } else { - // Treat "nothing to commit" as success even if no push occurred. - let status = if outcome.files_changed == 0 || outcome.pushed { - "success" - } else { - "error" - }; - let _ = self - .repo - .log_sync_operation( - workspace_id, - "push", - status, - Some(&outcome.message), - outcome.commit_hash.as_deref(), - ) - .await; - } + GitSyncStatus::Error + }; + let _ = self + .repo + .log_sync_operation( + workspace_id, + GitSyncOperation::Push, + status, + Some(&outcome.message), + outcome.commit_hash.as_deref(), + ) + .await; } } diff --git a/api/src/application/use_cases/git/upsert_config.rs b/api/crates/application/src/git/use_cases/upsert_config.rs similarity index 55% rename from api/src/application/use_cases/git/upsert_config.rs rename to api/crates/application/src/git/use_cases/upsert_config.rs index af7bcdc5..e82d1cdb 100644 --- a/api/src/application/use_cases/git/upsert_config.rs +++ b/api/crates/application/src/git/use_cases/upsert_config.rs @@ -1,8 +1,9 @@ -use crate::application::dto::git::{GitConfigDto, UpsertGitConfigInput}; -use crate::application::ports::git_repository::GitRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; -use crate::application::ports::gitignore_port::GitignorePort; -use crate::application::ports::storage_port::StorageResolverPort; +use crate::core::ports::storage::storage_port::StorageResolverPort; +use crate::git::dtos::{GitConfigDto, UpsertGitConfigInput}; +use crate::git::ports::git_repository::GitRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; +use crate::git::ports::gitignore_port::GitignorePort; +use domain::git::auth::GitAuthType; use uuid::Uuid; pub struct UpsertGitConfig<'a, R, G, S, W> @@ -30,36 +31,35 @@ where workspace_id: Uuid, req: &UpsertGitConfigInput, ) -> anyhow::Result { - if req.auth_type != "token" && req.auth_type != "ssh" { + let auth_type = + GitAuthType::parse(&req.auth_type).ok_or_else(|| anyhow::anyhow!("bad_request"))?; + if !auth_type.validate_repository_url(&req.repository_url) { anyhow::bail!("bad_request"); } - if req.auth_type == "token" && !req.repository_url.starts_with("https://") { - anyhow::bail!("bad_request"); - } - let (id, repository_url, branch_name, auth_type, auto_sync, created_at, updated_at) = self + let record = self .repo .upsert_config( workspace_id, &req.repository_url, req.branch_name.as_deref(), - &req.auth_type, + auth_type, &req.auth_data, req.auto_sync, ) .await?; self.workspace - .ensure_repository(workspace_id, &branch_name) + .ensure_repository(workspace_id, &record.branch_name) .await?; let dir = self.storage.user_repo_dir(workspace_id); let _ = self.gitignore.ensure_gitignore(&dir).await?; Ok(GitConfigDto { - id, - repository_url, - branch_name, - auth_type, - auto_sync, - created_at, - updated_at, + id: record.id, + repository_url: record.repository_url, + branch_name: record.branch_name, + auth_type: record.auth_type.as_str().to_string(), + auto_sync: record.auto_sync, + created_at: record.created_at, + updated_at: record.updated_at, }) } } diff --git a/api/src/application/dto/api_tokens.rs b/api/crates/application/src/identity/dtos/api_tokens.rs similarity index 92% rename from api/src/application/dto/api_tokens.rs rename to api/crates/application/src/identity/dtos/api_tokens.rs index c3d20192..32f9c218 100644 --- a/api/src/application/dto/api_tokens.rs +++ b/api/crates/application/src/identity/dtos/api_tokens.rs @@ -1,7 +1,7 @@ use chrono::{DateTime, Utc}; use uuid::Uuid; -use crate::application::ports::api_token_repository::ApiToken; +use crate::identity::ports::api_token_repository::ApiToken; #[derive(Debug, Clone)] pub struct ApiTokenDto { diff --git a/api/src/application/dto/auth.rs b/api/crates/application/src/identity/dtos/auth.rs similarity index 100% rename from api/src/application/dto/auth.rs rename to api/crates/application/src/identity/dtos/auth.rs diff --git a/api/crates/application/src/identity/dtos/mod.rs b/api/crates/application/src/identity/dtos/mod.rs new file mode 100644 index 00000000..80430ce7 --- /dev/null +++ b/api/crates/application/src/identity/dtos/mod.rs @@ -0,0 +1,7 @@ +mod api_tokens; +mod auth; +mod user_shortcuts; + +pub use api_tokens::*; +pub use auth::*; +pub use user_shortcuts::*; diff --git a/api/src/application/dto/user_shortcuts.rs b/api/crates/application/src/identity/dtos/user_shortcuts.rs similarity index 85% rename from api/src/application/dto/user_shortcuts.rs rename to api/crates/application/src/identity/dtos/user_shortcuts.rs index ba12f5a5..fad3c13c 100644 --- a/api/src/application/dto/user_shortcuts.rs +++ b/api/crates/application/src/identity/dtos/user_shortcuts.rs @@ -2,7 +2,7 @@ use chrono::{DateTime, Utc}; use serde_json::Value; use uuid::Uuid; -use crate::application::ports::user_shortcut_repository::UserShortcutProfile; +use crate::identity::ports::user_shortcuts::user_shortcut_repository::UserShortcutProfile; #[derive(Debug, Clone)] pub struct UserShortcutProfileDto { diff --git a/api/crates/application/src/identity/mod.rs b/api/crates/application/src/identity/mod.rs new file mode 100644 index 00000000..2e8e16cf --- /dev/null +++ b/api/crates/application/src/identity/mod.rs @@ -0,0 +1,4 @@ +pub mod dtos; +pub mod ports; +pub mod services; +pub mod use_cases; diff --git a/api/src/application/ports/api_token_repository.rs b/api/crates/application/src/identity/ports/api_token_repository.rs similarity index 70% rename from api/src/application/ports/api_token_repository.rs rename to api/crates/application/src/identity/ports/api_token_repository.rs index 1c35823d..174b967c 100644 --- a/api/src/application/ports/api_token_repository.rs +++ b/api/crates/application/src/identity/ports/api_token_repository.rs @@ -1,6 +1,8 @@ use async_trait::async_trait; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Clone)] pub struct ApiToken { pub id: Uuid, @@ -28,13 +30,13 @@ pub trait ApiTokenRepository: Send + Sync { name: &str, token_hash: &str, token_digest: &str, - ) -> anyhow::Result; + ) -> PortResult; - async fn list_active(&self, workspace_id: Uuid) -> anyhow::Result>; + async fn list_active(&self, workspace_id: Uuid) -> PortResult>; - async fn revoke(&self, workspace_id: Uuid, token_id: Uuid) -> anyhow::Result; + async fn revoke(&self, workspace_id: Uuid, token_id: Uuid) -> PortResult; - async fn find_by_digest(&self, digest: &str) -> anyhow::Result>; + async fn find_by_digest(&self, digest: &str) -> PortResult>; - async fn touch_last_used(&self, token_id: Uuid) -> anyhow::Result<()>; + async fn touch_last_used(&self, token_id: Uuid) -> PortResult<()>; } diff --git a/api/crates/application/src/identity/ports/jwt_codec.rs b/api/crates/application/src/identity/ports/jwt_codec.rs new file mode 100644 index 00000000..e81a64b7 --- /dev/null +++ b/api/crates/application/src/identity/ports/jwt_codec.rs @@ -0,0 +1,24 @@ +use uuid::Uuid; + +#[derive(Debug, Clone)] +pub struct JwtClaims { + pub sub: Uuid, + pub workspace_id: Option, + pub iat: usize, + pub exp: usize, + pub sid: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum JwtDecodeError { + Expired, + Invalid, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct JwtEncodeError; + +pub trait JwtCodec: Send + Sync { + fn decode(&self, token: &str) -> Result; + fn encode(&self, claims: &JwtClaims) -> Result; +} diff --git a/api/crates/application/src/identity/ports/mod.rs b/api/crates/application/src/identity/ports/mod.rs new file mode 100644 index 00000000..4f5afe38 --- /dev/null +++ b/api/crates/application/src/identity/ports/mod.rs @@ -0,0 +1,6 @@ +pub mod api_token_repository; +pub mod jwt_codec; +pub mod secret_hasher; +pub mod user_repository; +pub mod user_session_repository; +pub mod user_shortcuts; diff --git a/api/crates/application/src/identity/ports/secret_hasher.rs b/api/crates/application/src/identity/ports/secret_hasher.rs new file mode 100644 index 00000000..17e7b493 --- /dev/null +++ b/api/crates/application/src/identity/ports/secret_hasher.rs @@ -0,0 +1,6 @@ +use crate::core::ports::errors::PortResult; + +pub trait SecretHasher: Send + Sync { + fn hash_secret(&self, secret: &str) -> PortResult; + fn verify_secret(&self, secret: &str, secret_hash: &str) -> PortResult; +} diff --git a/api/src/application/ports/user_repository.rs b/api/crates/application/src/identity/ports/user_repository.rs similarity index 61% rename from api/src/application/ports/user_repository.rs rename to api/crates/application/src/identity/ports/user_repository.rs index 32f09747..43176004 100644 --- a/api/src/application/ports/user_repository.rs +++ b/api/crates/application/src/identity/ports/user_repository.rs @@ -1,6 +1,8 @@ use async_trait::async_trait; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Clone)] pub struct UserRow { pub id: Uuid, @@ -18,20 +20,20 @@ pub trait UserRepository: Send + Sync { name: &str, password_hash: Option<&str>, default_workspace_id: Uuid, - ) -> anyhow::Result; - async fn find_by_email(&self, email: &str) -> anyhow::Result>; + ) -> PortResult; + async fn find_by_email(&self, email: &str) -> PortResult>; async fn find_by_external_identity( &self, provider: &str, subject: &str, - ) -> anyhow::Result>; - async fn find_by_id(&self, id: Uuid) -> anyhow::Result>; + ) -> PortResult>; + async fn find_by_id(&self, id: Uuid) -> PortResult>; async fn link_external_identity( &self, user_id: Uuid, provider: &str, subject: &str, - ) -> anyhow::Result<()>; - async fn delete_user(&self, id: Uuid) -> anyhow::Result; - async fn list_user_ids(&self) -> anyhow::Result>; + ) -> PortResult<()>; + async fn delete_user(&self, id: Uuid) -> PortResult; + async fn list_user_ids(&self) -> PortResult>; } diff --git a/api/src/application/ports/user_session_repository.rs b/api/crates/application/src/identity/ports/user_session_repository.rs similarity index 62% rename from api/src/application/ports/user_session_repository.rs rename to api/crates/application/src/identity/ports/user_session_repository.rs index 8b9517ae..7f71f875 100644 --- a/api/src/application/ports/user_session_repository.rs +++ b/api/crates/application/src/identity/ports/user_session_repository.rs @@ -2,6 +2,8 @@ use async_trait::async_trait; use chrono::{DateTime, Utc}; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Clone)] pub struct UserSessionRecord { pub id: Uuid, @@ -25,6 +27,7 @@ pub struct UserSessionSecret { #[async_trait] pub trait UserSessionRepository: Send + Sync { + #[allow(clippy::too_many_arguments)] async fn create( &self, user_id: Uuid, @@ -35,11 +38,11 @@ pub trait UserSessionRepository: Send + Sync { remember_me: bool, user_agent: Option<&str>, ip_address: Option<&str>, - ) -> anyhow::Result; + ) -> PortResult; - async fn find_by_digest(&self, token_digest: &str) - -> anyhow::Result>; + async fn find_by_digest(&self, token_digest: &str) -> PortResult>; + #[allow(clippy::too_many_arguments)] async fn update_token( &self, session_id: Uuid, @@ -50,21 +53,21 @@ pub trait UserSessionRepository: Send + Sync { user_agent: Option<&str>, ip_address: Option<&str>, workspace_id: Option, - ) -> anyhow::Result; + ) -> PortResult; - async fn update_workspace(&self, session_id: Uuid, workspace_id: Uuid) -> anyhow::Result; + async fn update_workspace(&self, session_id: Uuid, workspace_id: Uuid) -> PortResult; - async fn touch(&self, session_id: Uuid) -> anyhow::Result<()>; + async fn touch(&self, session_id: Uuid) -> PortResult<()>; - async fn list_for_user(&self, user_id: Uuid) -> anyhow::Result>; + async fn list_for_user(&self, user_id: Uuid) -> PortResult>; - async fn find_by_id(&self, session_id: Uuid) -> anyhow::Result>; + async fn find_by_id(&self, session_id: Uuid) -> PortResult>; - async fn revoke(&self, session_id: Uuid) -> anyhow::Result; + async fn revoke(&self, session_id: Uuid) -> PortResult; - async fn revoke_by_digest(&self, token_digest: &str) -> anyhow::Result; + async fn revoke_by_digest(&self, token_digest: &str) -> PortResult; - async fn revoke_all_for_user(&self, user_id: Uuid) -> anyhow::Result<()>; + async fn revoke_all_for_user(&self, user_id: Uuid) -> PortResult<()>; - async fn delete_expired(&self, before: DateTime, batch_size: i64) -> anyhow::Result; + async fn delete_expired(&self, before: DateTime, batch_size: i64) -> PortResult; } diff --git a/api/crates/application/src/identity/ports/user_shortcuts/mod.rs b/api/crates/application/src/identity/ports/user_shortcuts/mod.rs new file mode 100644 index 00000000..df5c1c48 --- /dev/null +++ b/api/crates/application/src/identity/ports/user_shortcuts/mod.rs @@ -0,0 +1 @@ +pub mod user_shortcut_repository; diff --git a/api/src/application/ports/user_shortcut_repository.rs b/api/crates/application/src/identity/ports/user_shortcuts/user_shortcut_repository.rs similarity index 72% rename from api/src/application/ports/user_shortcut_repository.rs rename to api/crates/application/src/identity/ports/user_shortcuts/user_shortcut_repository.rs index ca828b65..869af6d1 100644 --- a/api/src/application/ports/user_shortcut_repository.rs +++ b/api/crates/application/src/identity/ports/user_shortcuts/user_shortcut_repository.rs @@ -3,6 +3,8 @@ use chrono::{DateTime, Utc}; use serde_json::Value; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Clone)] pub struct UserShortcutProfile { pub user_id: Uuid, @@ -13,12 +15,12 @@ pub struct UserShortcutProfile { #[async_trait] pub trait UserShortcutRepository: Send + Sync { - async fn get_by_user(&self, user_id: Uuid) -> anyhow::Result>; + async fn get_by_user(&self, user_id: Uuid) -> PortResult>; async fn upsert( &self, user_id: Uuid, bindings: Value, leader_key: Option, - ) -> anyhow::Result; + ) -> PortResult; } diff --git a/api/crates/application/src/identity/services/api_tokens/mod.rs b/api/crates/application/src/identity/services/api_tokens/mod.rs new file mode 100644 index 00000000..e11eb40a --- /dev/null +++ b/api/crates/application/src/identity/services/api_tokens/mod.rs @@ -0,0 +1,166 @@ +use std::sync::Arc; + +use rand::{Rng, distributions::Alphanumeric, rngs::OsRng}; +use uuid::Uuid; + +use crate::core::services::errors::ServiceError; +use crate::core::services::utils::hash::sha256_hex_str; +use crate::identity::dtos::{ApiTokenDto, CreatedApiTokenDto}; +use crate::identity::ports::api_token_repository::ApiTokenRepository; +use crate::identity::ports::secret_hasher::SecretHasher; +use crate::identity::use_cases::api_tokens::create_token::CreateApiToken; +use crate::identity::use_cases::api_tokens::list_tokens::ListApiTokens; +use crate::identity::use_cases::api_tokens::revoke_token::RevokeApiToken; +use async_trait::async_trait; +use domain::access::permissions::PermissionSet; +use domain::identity::policy; + +pub struct ApiTokenService { + repo: Arc, + hasher: Arc, +} + +#[async_trait] +pub trait ApiTokenServiceFacade: Send + Sync { + async fn list( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + ) -> Result, ServiceError>; + async fn create( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + name: Option<&str>, + ) -> Result; + async fn revoke( + &self, + workspace_id: Uuid, + id: Uuid, + permissions: &PermissionSet, + ) -> Result; +} + +#[async_trait] +impl ApiTokenServiceFacade for ApiTokenService { + async fn list( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + ) -> Result, ServiceError> { + self.list(workspace_id, permissions).await + } + + async fn create( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + name: Option<&str>, + ) -> Result { + self.create(workspace_id, user_id, permissions, name).await + } + + async fn revoke( + &self, + workspace_id: Uuid, + id: Uuid, + permissions: &PermissionSet, + ) -> Result { + self.revoke(workspace_id, id, permissions).await + } +} + +impl ApiTokenService { + pub fn new(repo: Arc, hasher: Arc) -> Self { + Self { repo, hasher } + } + + pub async fn list( + &self, + workspace_id: Uuid, + permissions: &PermissionSet, + ) -> Result, ServiceError> { + ensure_api_token_permission(workspace_id, permissions)?; + let uc = ListApiTokens { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_id).await.map_err(ServiceError::from) + } + + pub async fn create( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + name: Option<&str>, + ) -> Result { + ensure_api_token_permission(workspace_id, permissions)?; + let uc = CreateApiToken { + repo: self.repo.as_ref(), + hasher: self.hasher.as_ref(), + }; + uc.execute(workspace_id, user_id, name) + .await + .map_err(ServiceError::from) + } + + pub async fn revoke( + &self, + workspace_id: Uuid, + id: Uuid, + permissions: &PermissionSet, + ) -> Result { + ensure_api_token_permission(workspace_id, permissions)?; + let uc = RevokeApiToken { + repo: self.repo.as_ref(), + }; + uc.execute(workspace_id, id) + .await + .map_err(ServiceError::from) + } +} + +fn ensure_api_token_permission( + _workspace_id: Uuid, + permissions: &PermissionSet, +) -> Result<(), ServiceError> { + policy::ensure_api_token_manage_allowed(permissions).map_err(|_| ServiceError::Forbidden) +} + +pub struct GeneratedApiToken { + pub plaintext: String, + pub token_hash: String, + pub token_digest: String, +} + +pub fn generate_api_token(hasher: &dyn SecretHasher) -> anyhow::Result { + let random: String = OsRng + .sample_iter(&Alphanumeric) + .take(48) + .map(char::from) + .collect(); + let plaintext = format!("rmd_{random}"); + + let hash = hasher.hash_secret(&plaintext)?; + let digest = compute_digest(&plaintext); + + Ok(GeneratedApiToken { + plaintext, + token_hash: hash, + token_digest: digest, + }) +} + +pub fn compute_digest(token: &str) -> String { + sha256_hex_str(token) +} + +pub fn verify_token( + hasher: &dyn SecretHasher, + token: &str, + token_hash: &str, +) -> anyhow::Result { + Ok(hasher.verify_secret(token, token_hash)?) +} diff --git a/api/src/application/services/auth/account.rs b/api/crates/application/src/identity/services/auth/account.rs similarity index 73% rename from api/src/application/services/auth/account.rs rename to api/crates/application/src/identity/services/auth/account.rs index fa5496f0..86ffca93 100644 --- a/api/src/application/services/auth/account.rs +++ b/api/crates/application/src/identity/services/auth/account.rs @@ -2,26 +2,29 @@ use std::sync::Arc; use uuid::Uuid; -use crate::application::dto::auth::UserDto; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::git_repository::GitRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; -use crate::application::ports::plugin_asset_store::PluginAssetStore; -use crate::application::ports::plugin_installation_repository::PluginInstallationRepository; -use crate::application::ports::plugin_repository::PluginRepository; -use crate::application::ports::storage_projection_queue::StorageProjectionQueue; -use crate::application::ports::user_repository::UserRepository; -use crate::application::services::auth::external::ExternalAuthIdentity; -use crate::application::services::errors::ServiceError; -use crate::application::services::workspaces::WorkspaceService; -use crate::application::use_cases::auth::delete_account::DeleteAccount; -use crate::application::use_cases::auth::login::{Login as LoginUc, LoginRequest}; -use crate::application::use_cases::auth::me::GetMe; -use crate::application::use_cases::auth::register::{Register as RegisterUc, RegisterRequest}; +use crate::core::ports::storage::storage_projection_queue::StorageProjectionQueue; +use crate::core::services::errors::ServiceError; +use crate::documents::ports::document_repository::DocumentRepository; +use crate::documents::ports::files::files_repository::FilesRepository; +use crate::git::ports::git_repository::GitRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; +use crate::identity::dtos::UserDto; +use crate::identity::ports::secret_hasher::SecretHasher; +use crate::identity::ports::user_repository::UserRepository; +use crate::identity::services::auth::external::ExternalAuthIdentity; +use crate::identity::use_cases::auth::delete_account::DeleteAccount; +use crate::identity::use_cases::auth::login::{Login as LoginUc, LoginRequest}; +use crate::identity::use_cases::auth::me::GetMe; +use crate::identity::use_cases::auth::register::{Register as RegisterUc, RegisterRequest}; +use crate::plugins::ports::plugin_asset_store::PluginAssetStore; +use crate::plugins::ports::plugin_installation_repository::PluginInstallationRepository; +use crate::plugins::ports::plugin_repository::PluginRepository; +use crate::workspaces::services::WorkspaceService; +use async_trait::async_trait; pub struct AccountService { user_repo: Arc, + secret_hasher: Arc, document_repo: Arc, files_repo: Arc, plugin_installations: Arc, @@ -33,10 +36,59 @@ pub struct AccountService { workspace_service: Arc, } +#[async_trait] +pub trait AccountServiceFacade: Send + Sync { + async fn register( + &self, + email: &str, + name: &str, + password: &str, + ) -> Result; + async fn login(&self, email: &str, password: &str) -> Result, ServiceError>; + async fn get_me(&self, user_id: Uuid) -> Result, ServiceError>; + async fn delete_account(&self, user_id: Uuid) -> Result<(), ServiceError>; + async fn sign_in_with_external( + &self, + identity: ExternalAuthIdentity, + ) -> Result; +} + +#[async_trait] +impl AccountServiceFacade for AccountService { + async fn register( + &self, + email: &str, + name: &str, + password: &str, + ) -> Result { + self.register(email, name, password).await + } + + async fn login(&self, email: &str, password: &str) -> Result, ServiceError> { + self.login(email, password).await + } + + async fn get_me(&self, user_id: Uuid) -> Result, ServiceError> { + self.get_me(user_id).await + } + + async fn delete_account(&self, user_id: Uuid) -> Result<(), ServiceError> { + self.delete_account(user_id).await + } + + async fn sign_in_with_external( + &self, + identity: ExternalAuthIdentity, + ) -> Result { + self.sign_in_with_external(identity).await + } +} + impl AccountService { #[allow(clippy::too_many_arguments)] pub fn new( user_repo: Arc, + secret_hasher: Arc, document_repo: Arc, files_repo: Arc, plugin_installations: Arc, @@ -49,6 +101,7 @@ impl AccountService { ) -> Self { Self { user_repo, + secret_hasher, document_repo, files_repo, plugin_installations, @@ -74,6 +127,7 @@ impl AccountService { .await?; let uc = RegisterUc { repo: self.user_repo.as_ref(), + hasher: self.secret_hasher.as_ref(), }; let register_request = RegisterRequest { id: user_id, @@ -111,6 +165,7 @@ impl AccountService { ) -> Result, ServiceError> { let uc = LoginUc { repo: self.user_repo.as_ref(), + hasher: self.secret_hasher.as_ref(), }; uc.execute(&LoginRequest { email: email.to_string(), @@ -165,9 +220,7 @@ impl AccountService { if !identity.email_verified { return Err(ServiceError::Unauthorized); } - self.handle_external_identity(identity) - .await - .map_err(ServiceError::from) + self.handle_external_identity(identity).await } async fn handle_external_identity( diff --git a/api/crates/application/src/identity/services/auth/auth_service.rs b/api/crates/application/src/identity/services/auth/auth_service.rs new file mode 100644 index 00000000..cd0205ed --- /dev/null +++ b/api/crates/application/src/identity/services/auth/auth_service.rs @@ -0,0 +1,134 @@ +use std::sync::Arc; + +use chrono::Utc; +use uuid::Uuid; + +use crate::core::services::errors::ServiceError; +use crate::identity::ports::jwt_codec::{JwtClaims, JwtCodec, JwtDecodeError}; +use crate::identity::services::auth::token_validation::TokenValidationService; +use async_trait::async_trait; + +#[derive(Clone)] +pub struct AuthService { + jwt: Arc, + tokens: Arc, + jwt_expires_secs: usize, +} + +#[derive(Debug, Clone)] +pub struct IssuedSession { + pub token: String, + pub expires_at: usize, +} + +#[async_trait] +pub trait AuthServiceFacade: Send + Sync { + async fn subject_from_token(&self, token: &str) -> Result, ServiceError>; + fn workspace_from_token_claim(&self, token: &str) -> Option; + fn session_id_from_token_claim(&self, token: &str) -> Option; + async fn workspace_from_token_async(&self, token: &str) -> Result, ServiceError>; + fn session_ttl_secs(&self) -> usize; +} + +#[async_trait] +impl AuthServiceFacade for AuthService { + async fn subject_from_token(&self, token: &str) -> Result, ServiceError> { + self.subject_from_token(token).await + } + + fn workspace_from_token_claim(&self, token: &str) -> Option { + self.workspace_from_token_claim(token) + } + + fn session_id_from_token_claim(&self, token: &str) -> Option { + self.session_id_from_token_claim(token) + } + + async fn workspace_from_token_async(&self, token: &str) -> Result, ServiceError> { + self.workspace_from_token_async(token).await + } + + fn session_ttl_secs(&self) -> usize { + self.session_ttl_secs() + } +} + +impl AuthService { + pub fn new( + jwt: Arc, + tokens: Arc, + jwt_expires_secs: usize, + ) -> Self { + Self { + jwt, + tokens, + jwt_expires_secs, + } + } + + pub async fn subject_from_token(&self, token: &str) -> Result, ServiceError> { + match self.jwt.decode(token) { + Ok(claims) => return Ok(Some(claims.sub.to_string())), + Err(JwtDecodeError::Expired) => return Err(ServiceError::TokenExpired), + Err(JwtDecodeError::Invalid) => {} + }; + + self.tokens + .validate(token) + .await + .map(|opt| opt.map(|subject| subject.owner_id.to_string())) + } + + pub fn workspace_from_token_claim(&self, token: &str) -> Option { + self.jwt + .decode(token) + .ok() + .and_then(|claims| claims.workspace_id) + } + + pub fn session_id_from_token_claim(&self, token: &str) -> Option { + self.jwt.decode(token).ok().and_then(|claims| claims.sid) + } + + pub async fn workspace_from_token_async( + &self, + token: &str, + ) -> Result, ServiceError> { + if let Some(id) = self.workspace_from_token_claim(token) { + return Ok(Some(id)); + } + self.tokens + .validate(token) + .await + .map(|opt| opt.map(|subject| subject.workspace_id)) + } + + pub fn issue_session( + &self, + user_id: Uuid, + workspace_id: Uuid, + session_id: Option, + ) -> Result { + let now = Utc::now().timestamp() as usize; + let exp = now + self.jwt_expires_secs; + let claims = JwtClaims { + sub: user_id, + workspace_id: Some(workspace_id), + iat: now, + exp, + sid: session_id, + }; + let token = self + .jwt + .encode(&claims) + .map_err(|_| ServiceError::Unexpected(anyhow::anyhow!("jwt_encode_failed")))?; + Ok(IssuedSession { + token, + expires_at: exp, + }) + } + + pub fn session_ttl_secs(&self) -> usize { + self.jwt_expires_secs + } +} diff --git a/api/src/application/services/auth/external.rs b/api/crates/application/src/identity/services/auth/external.rs similarity index 82% rename from api/src/application/services/auth/external.rs rename to api/crates/application/src/identity/services/auth/external.rs index 1c92cbc2..cd2701f5 100644 --- a/api/src/application/services/auth/external.rs +++ b/api/crates/application/src/identity/services/auth/external.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use async_trait::async_trait; -use crate::application::services::errors::ServiceError; +use crate::core::services::errors::ServiceError; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum ExternalAuthProviderKind { @@ -95,6 +95,12 @@ pub struct ExternalAuthRegistry { providers: HashMap>, } +pub trait ExternalAuthRegistryFacade: Send + Sync { + fn get(&self, provider: ExternalAuthProviderKind) -> Option>; + fn is_empty(&self) -> bool; + fn list_descriptors(&self) -> Vec; +} + impl ExternalAuthRegistry { pub fn new(providers: Vec>) -> Self { let mut map = HashMap::new(); @@ -119,3 +125,17 @@ impl ExternalAuthRegistry { .collect() } } + +impl ExternalAuthRegistryFacade for ExternalAuthRegistry { + fn get(&self, provider: ExternalAuthProviderKind) -> Option> { + self.get(provider) + } + + fn is_empty(&self) -> bool { + self.is_empty() + } + + fn list_descriptors(&self) -> Vec { + self.list_descriptors() + } +} diff --git a/api/src/application/services/auth/mod.rs b/api/crates/application/src/identity/services/auth/mod.rs similarity index 79% rename from api/src/application/services/auth/mod.rs rename to api/crates/application/src/identity/services/auth/mod.rs index 092aee0d..53adafe2 100644 --- a/api/src/application/services/auth/mod.rs +++ b/api/crates/application/src/identity/services/auth/mod.rs @@ -1,5 +1,5 @@ pub mod account; +pub mod auth_service; pub mod external; -pub mod service; pub mod token_validation; pub mod user_sessions; diff --git a/api/crates/application/src/identity/services/auth/token_validation.rs b/api/crates/application/src/identity/services/auth/token_validation.rs new file mode 100644 index 00000000..c9581e2f --- /dev/null +++ b/api/crates/application/src/identity/services/auth/token_validation.rs @@ -0,0 +1,46 @@ +use std::sync::Arc; + +use crate::core::services::errors::ServiceError; +use crate::identity::ports::api_token_repository::ApiTokenRepository; +use crate::identity::ports::secret_hasher::SecretHasher; +use crate::identity::services::api_tokens::{compute_digest, verify_token}; +use domain::identity::api_token::ApiTokenSubject; + +pub struct TokenValidationService { + repo: Arc, + hasher: Arc, +} + +impl TokenValidationService { + pub fn new(repo: Arc, hasher: Arc) -> Self { + Self { repo, hasher } + } + + pub async fn validate(&self, token: &str) -> Result, ServiceError> { + let digest = compute_digest(token); + let record = self + .repo + .find_by_digest(&digest) + .await + .map_err(ServiceError::from)?; + let Some(secret) = record else { + return Ok(None); + }; + if secret.token.revoked_at.is_some() { + return Ok(None); + } + let ok = verify_token(self.hasher.as_ref(), token, &secret.token_hash) + .map_err(ServiceError::from)?; + if !ok { + return Ok(None); + } + self.repo + .touch_last_used(secret.token.id) + .await + .map_err(ServiceError::from)?; + Ok(Some(ApiTokenSubject { + owner_id: secret.token.owner_id, + workspace_id: secret.token.workspace_id, + })) + } +} diff --git a/api/src/application/services/auth/user_sessions.rs b/api/crates/application/src/identity/services/auth/user_sessions.rs similarity index 74% rename from api/src/application/services/auth/user_sessions.rs rename to api/crates/application/src/identity/services/auth/user_sessions.rs index a76858b6..f4e408ac 100644 --- a/api/src/application/services/auth/user_sessions.rs +++ b/api/crates/application/src/identity/services/auth/user_sessions.rs @@ -1,19 +1,15 @@ use std::sync::Arc; -use argon2::{ - Argon2, - password_hash::{PasswordHasher, SaltString}, -}; use chrono::{DateTime, Duration, Utc}; use rand::{Rng, distributions::Alphanumeric, rngs::OsRng}; use uuid::Uuid; -use crate::application::ports::user_session_repository::{ - UserSessionRecord, UserSessionRepository, -}; -use crate::application::services::api_tokens::{compute_digest, verify_token}; -use crate::application::services::auth::service::{AuthService, IssuedSession}; -use crate::application::services::errors::ServiceError; +use crate::core::services::errors::ServiceError; +use crate::identity::ports::secret_hasher::SecretHasher; +use crate::identity::ports::user_session_repository::{UserSessionRecord, UserSessionRepository}; +use crate::identity::services::api_tokens::{compute_digest, verify_token}; +use crate::identity::services::auth::auth_service::{AuthService, IssuedSession}; +use async_trait::async_trait; pub struct SessionMetadata<'a> { pub user_agent: Option<&'a str>, @@ -30,20 +26,101 @@ pub struct IssuedSessionBundle { pub struct UserSessionService { repo: Arc, + hasher: Arc, auth: Arc, refresh_ttl_secs: i64, refresh_ttl_long_secs: i64, } +#[async_trait] +pub trait UserSessionServiceFacade: Send + Sync { + async fn issue_new_session( + &self, + user_id: Uuid, + workspace_id: Uuid, + remember_me: bool, + meta: SessionMetadata<'_>, + ) -> Result; + + async fn refresh_session( + &self, + token: &str, + workspace_override: Option, + meta: SessionMetadata<'_>, + ) -> Result; + + async fn revoke_by_token(&self, token: &str) -> Result<(), ServiceError>; + async fn revoke_session(&self, user_id: Uuid, session_id: Uuid) -> Result; + async fn revoke_all_for_user(&self, user_id: Uuid) -> Result<(), ServiceError>; + async fn ensure_session_active(&self, session_id: Uuid) -> Result<(), ServiceError>; + async fn list_for_user(&self, user_id: Uuid) -> Result, ServiceError>; + async fn find_session_by_token( + &self, + token: &str, + ) -> Result, ServiceError>; +} + +#[async_trait] +impl UserSessionServiceFacade for UserSessionService { + async fn issue_new_session( + &self, + user_id: Uuid, + workspace_id: Uuid, + remember_me: bool, + meta: SessionMetadata<'_>, + ) -> Result { + self.issue_new_session(user_id, workspace_id, remember_me, meta) + .await + } + + async fn refresh_session( + &self, + token: &str, + workspace_override: Option, + meta: SessionMetadata<'_>, + ) -> Result { + self.refresh_session(token, workspace_override, meta).await + } + + async fn revoke_by_token(&self, token: &str) -> Result<(), ServiceError> { + self.revoke_by_token(token).await + } + + async fn revoke_session(&self, user_id: Uuid, session_id: Uuid) -> Result { + self.revoke_session(user_id, session_id).await + } + + async fn revoke_all_for_user(&self, user_id: Uuid) -> Result<(), ServiceError> { + self.revoke_all_for_user(user_id).await + } + + async fn ensure_session_active(&self, session_id: Uuid) -> Result<(), ServiceError> { + self.ensure_session_active(session_id).await + } + + async fn list_for_user(&self, user_id: Uuid) -> Result, ServiceError> { + self.list_for_user(user_id).await + } + + async fn find_session_by_token( + &self, + token: &str, + ) -> Result, ServiceError> { + self.find_session_by_token(token).await + } +} + impl UserSessionService { pub fn new( repo: Arc, + hasher: Arc, auth: Arc, refresh_ttl_secs: i64, refresh_ttl_long_secs: i64, ) -> Self { Self { repo, + hasher, auth, refresh_ttl_secs, refresh_ttl_long_secs, @@ -59,7 +136,7 @@ impl UserSessionService { Duration::seconds(secs.max(60)) } - fn sanitize_metadata<'a>(value: Option<&'a str>) -> Option<&'a str> { + fn sanitize_metadata(value: Option<&str>) -> Option<&str> { value.and_then(|raw| { let trimmed = raw.trim(); if trimmed.is_empty() { @@ -79,19 +156,14 @@ impl UserSessionService { }) } - fn generate_refresh_token() -> anyhow::Result<(String, String, String)> { + fn generate_refresh_token(&self) -> anyhow::Result<(String, String, String)> { let random: String = OsRng .sample_iter(&Alphanumeric) .take(48) .map(char::from) .collect(); let plaintext = format!("rmds_{random}"); - let salt = SaltString::generate(&mut OsRng); - let argon = Argon2::default(); - let hash = argon - .hash_password(plaintext.as_bytes(), &salt) - .map_err(|e| anyhow::anyhow!(e.to_string()))? - .to_string(); + let hash = self.hasher.hash_secret(&plaintext)?; let digest = compute_digest(&plaintext); Ok((plaintext, hash, digest)) } @@ -112,8 +184,9 @@ impl UserSessionService { ) -> Result { let ttl = self.ttl_for(remember_me); let expires_at = Utc::now() + ttl; - let (refresh_token, token_hash, token_digest) = - Self::generate_refresh_token().map_err(ServiceError::Unexpected)?; + let (refresh_token, token_hash, token_digest) = self + .generate_refresh_token() + .map_err(ServiceError::Unexpected)?; let (user_agent, ip_address) = self.metadata(&meta); let record = self .repo @@ -131,8 +204,7 @@ impl UserSessionService { .map_err(ServiceError::from)?; let access = self .auth - .issue_session(user_id, workspace_id, Some(record.id)) - .map_err(ServiceError::from)?; + .issue_session(user_id, workspace_id, Some(record.id))?; Ok(IssuedSessionBundle { access, refresh_token, @@ -162,7 +234,9 @@ impl UserSessionService { let _ = self.repo.revoke(secret.session.id).await; return Err(ServiceError::Unauthorized); } - if !verify_token(token, &secret.token_hash).map_err(ServiceError::from)? { + if !verify_token(self.hasher.as_ref(), token, &secret.token_hash) + .map_err(ServiceError::from)? + { let _ = self.repo.revoke(secret.session.id).await; return Err(ServiceError::Unauthorized); } @@ -175,8 +249,9 @@ impl UserSessionService { let remember_me = session.remember_me; let ttl = self.ttl_for(remember_me); let expires_at = now + ttl; - let (refresh_token, token_hash, token_digest) = - Self::generate_refresh_token().map_err(ServiceError::Unexpected)?; + let (refresh_token, token_hash, token_digest) = self + .generate_refresh_token() + .map_err(ServiceError::Unexpected)?; let (user_agent, ip_address) = self.metadata(&meta); session.expires_at = expires_at; @@ -202,10 +277,9 @@ impl UserSessionService { return Err(ServiceError::Unauthorized); } - let access = self - .auth - .issue_session(session.user_id, session.workspace_id, Some(session.id)) - .map_err(ServiceError::from)?; + let access = + self.auth + .issue_session(session.user_id, session.workspace_id, Some(session.id))?; Ok(IssuedSessionBundle { access, @@ -297,14 +371,16 @@ impl UserSessionService { #[cfg(test)] mod tests { use super::*; - use crate::application::ports::api_token_repository::{ + use crate::core::ports::errors::PortResult; + use crate::identity::ports::api_token_repository::{ ApiToken, ApiTokenRepository, ApiTokenSecret, }; - use crate::application::ports::user_session_repository::{ + use crate::identity::ports::jwt_codec::{JwtClaims, JwtCodec, JwtDecodeError, JwtEncodeError}; + use crate::identity::ports::secret_hasher::SecretHasher; + use crate::identity::ports::user_session_repository::{ UserSessionRepository, UserSessionSecret, }; - use crate::application::services::auth::token_validation::TokenValidationService; - use anyhow::bail; + use crate::identity::services::auth::token_validation::TokenValidationService; use async_trait::async_trait; use std::collections::HashMap; use std::sync::Arc; @@ -344,7 +420,7 @@ mod tests { remember_me: bool, user_agent: Option<&str>, ip_address: Option<&str>, - ) -> anyhow::Result { + ) -> PortResult { let mut sessions = self.sessions.lock().await; let mut digests = self.digests.lock().await; let id = Uuid::new_v4(); @@ -376,7 +452,7 @@ mod tests { async fn find_by_digest( &self, token_digest: &str, - ) -> anyhow::Result> { + ) -> PortResult> { let digests = self.digests.lock().await; let sessions = self.sessions.lock().await; Ok(digests @@ -395,7 +471,7 @@ mod tests { user_agent: Option<&str>, ip_address: Option<&str>, workspace_id: Option, - ) -> anyhow::Result { + ) -> PortResult { let mut sessions = self.sessions.lock().await; let mut digests = self.digests.lock().await; let Some(entry) = sessions.get_mut(&session_id) else { @@ -421,29 +497,25 @@ mod tests { Ok(true) } - async fn update_workspace( - &self, - session_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result { + async fn update_workspace(&self, session_id: Uuid, workspace_id: Uuid) -> PortResult { let mut sessions = self.sessions.lock().await; - if let Some(entry) = sessions.get_mut(&session_id) { - if entry.record.revoked_at.is_none() { - entry.record.workspace_id = workspace_id; - return Ok(true); - } + if let Some(entry) = sessions.get_mut(&session_id) + && entry.record.revoked_at.is_none() + { + entry.record.workspace_id = workspace_id; + return Ok(true); } Ok(false) } - async fn touch(&self, session_id: Uuid) -> anyhow::Result<()> { + async fn touch(&self, session_id: Uuid) -> PortResult<()> { if let Some(entry) = self.sessions.lock().await.get_mut(&session_id) { entry.record.last_seen_at = Utc::now(); } Ok(()) } - async fn list_for_user(&self, user_id: Uuid) -> anyhow::Result> { + async fn list_for_user(&self, user_id: Uuid) -> PortResult> { let sessions = self.sessions.lock().await; Ok(sessions .values() @@ -452,7 +524,7 @@ mod tests { .collect()) } - async fn find_by_id(&self, session_id: Uuid) -> anyhow::Result> { + async fn find_by_id(&self, session_id: Uuid) -> PortResult> { Ok(self .sessions .lock() @@ -461,18 +533,18 @@ mod tests { .map(|entry| entry.record.clone())) } - async fn revoke(&self, session_id: Uuid) -> anyhow::Result { + async fn revoke(&self, session_id: Uuid) -> PortResult { let mut sessions = self.sessions.lock().await; - if let Some(entry) = sessions.get_mut(&session_id) { - if entry.record.revoked_at.is_none() { - entry.record.revoked_at = Some(Utc::now()); - return Ok(true); - } + if let Some(entry) = sessions.get_mut(&session_id) + && entry.record.revoked_at.is_none() + { + entry.record.revoked_at = Some(Utc::now()); + return Ok(true); } Ok(false) } - async fn revoke_by_digest(&self, token_digest: &str) -> anyhow::Result { + async fn revoke_by_digest(&self, token_digest: &str) -> PortResult { let id = { let digests = self.digests.lock().await; digests.get(token_digest).cloned() @@ -483,7 +555,7 @@ mod tests { Ok(false) } - async fn revoke_all_for_user(&self, user_id: Uuid) -> anyhow::Result<()> { + async fn revoke_all_for_user(&self, user_id: Uuid) -> PortResult<()> { let mut sessions = self.sessions.lock().await; for entry in sessions .values_mut() @@ -494,11 +566,7 @@ mod tests { Ok(()) } - async fn delete_expired( - &self, - before: DateTime, - batch_size: i64, - ) -> anyhow::Result { + async fn delete_expired(&self, before: DateTime, batch_size: i64) -> PortResult { let mut sessions = self.sessions.lock().await; let mut digests = self.digests.lock().await; let mut removed = 0u64; @@ -530,32 +598,63 @@ mod tests { _name: &str, _token_hash: &str, _token_digest: &str, - ) -> anyhow::Result { - bail!("not implemented") + ) -> PortResult { + Err(anyhow::anyhow!("not implemented").into()) } - async fn list_active(&self, _workspace_id: Uuid) -> anyhow::Result> { - bail!("not implemented") + async fn list_active(&self, _workspace_id: Uuid) -> PortResult> { + Err(anyhow::anyhow!("not implemented").into()) } - async fn revoke(&self, _workspace_id: Uuid, _token_id: Uuid) -> anyhow::Result { - bail!("not implemented") + async fn revoke(&self, _workspace_id: Uuid, _token_id: Uuid) -> PortResult { + Err(anyhow::anyhow!("not implemented").into()) } - async fn find_by_digest(&self, _digest: &str) -> anyhow::Result> { + async fn find_by_digest(&self, _digest: &str) -> PortResult> { Ok(None) } - async fn touch_last_used(&self, _token_id: Uuid) -> anyhow::Result<()> { + async fn touch_last_used(&self, _token_id: Uuid) -> PortResult<()> { Ok(()) } } + #[derive(Debug, Default)] + struct NoopSecretHasher; + + impl SecretHasher for NoopSecretHasher { + fn hash_secret(&self, secret: &str) -> PortResult { + Ok(format!("h:{secret}")) + } + + fn verify_secret(&self, secret: &str, secret_hash: &str) -> PortResult { + Ok(secret_hash == format!("h:{secret}")) + } + } + + #[derive(Debug)] + struct NoopJwtCodec; + + impl JwtCodec for NoopJwtCodec { + fn decode(&self, _token: &str) -> Result { + Err(JwtDecodeError::Invalid) + } + + fn encode(&self, _claims: &JwtClaims) -> Result { + Ok("jwt".to_string()) + } + } + fn build_service() -> UserSessionService { let repo = Arc::new(InMemorySessionRepo::default()); - let token_validation = Arc::new(TokenValidationService::new(Arc::new(NoopApiTokenRepo))); - let auth = Arc::new(AuthService::new("secret", token_validation, 60)); - UserSessionService::new(repo, auth, 120, 600) + let hasher: Arc = Arc::new(NoopSecretHasher); + let token_validation = Arc::new(TokenValidationService::new( + Arc::new(NoopApiTokenRepo), + hasher.clone(), + )); + let jwt: Arc = Arc::new(NoopJwtCodec); + let auth = Arc::new(AuthService::new(jwt, token_validation, 60)); + UserSessionService::new(repo, hasher, auth, 120, 600) } #[tokio::test] diff --git a/api/crates/application/src/identity/services/mod.rs b/api/crates/application/src/identity/services/mod.rs new file mode 100644 index 00000000..de652f14 --- /dev/null +++ b/api/crates/application/src/identity/services/mod.rs @@ -0,0 +1,3 @@ +pub mod api_tokens; +pub mod auth; +pub mod user_shortcuts; diff --git a/api/src/application/services/user_shortcuts.rs b/api/crates/application/src/identity/services/user_shortcuts/mod.rs similarity index 52% rename from api/src/application/services/user_shortcuts.rs rename to api/crates/application/src/identity/services/user_shortcuts/mod.rs index 687567fe..86dcc2a1 100644 --- a/api/src/application/services/user_shortcuts.rs +++ b/api/crates/application/src/identity/services/user_shortcuts/mod.rs @@ -3,20 +3,65 @@ use std::sync::Arc; use serde_json::Value; use uuid::Uuid; -use crate::application::dto::user_shortcuts::UserShortcutProfileDto; -use crate::application::ports::user_shortcut_repository::UserShortcutRepository; -use crate::application::services::errors::ServiceError; -use crate::application::use_cases::user_shortcuts::get_shortcuts::GetUserShortcuts; -use crate::application::use_cases::user_shortcuts::update_shortcuts::{ +use crate::core::services::errors::ServiceError; +use crate::identity::dtos::UserShortcutProfileDto; +use crate::identity::ports::user_shortcuts::user_shortcut_repository::UserShortcutRepository; +use crate::identity::use_cases::user_shortcuts::get_shortcuts::GetUserShortcuts; +use crate::identity::use_cases::user_shortcuts::update_shortcuts::{ UpdateUserShortcuts, UpdateUserShortcutsError, UpdateUserShortcutsPayload, }; -use crate::domain::workspaces::permissions::{PERM_SHORTCUT_UPDATE, PermissionSet}; +use async_trait::async_trait; +use domain::access::permissions::PermissionSet; +use domain::identity::policy; pub struct UserShortcutService { repo: Arc, max_payload_bytes: usize, } +#[async_trait] +pub trait UserShortcutServiceFacade: Send + Sync { + async fn get_profile( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + ) -> Result, ServiceError>; + + async fn update_profile( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + bindings: Value, + leader_key: Option, + ) -> Result; +} + +#[async_trait] +impl UserShortcutServiceFacade for UserShortcutService { + async fn get_profile( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + ) -> Result, ServiceError> { + self.get_profile(workspace_id, user_id, permissions).await + } + + async fn update_profile( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + bindings: Value, + leader_key: Option, + ) -> Result { + self.update_profile(workspace_id, user_id, permissions, bindings, leader_key) + .await + } +} + impl UserShortcutService { pub fn new(repo: Arc, max_payload_bytes: usize) -> Self { Self { @@ -72,9 +117,5 @@ fn ensure_shortcut_permission( _workspace_id: Uuid, permissions: &PermissionSet, ) -> Result<(), ServiceError> { - if permissions.allows(PERM_SHORTCUT_UPDATE) { - Ok(()) - } else { - Err(ServiceError::Forbidden) - } + policy::ensure_shortcut_update_allowed(permissions).map_err(|_| ServiceError::Forbidden) } diff --git a/api/src/application/use_cases/api_tokens/create_token.rs b/api/crates/application/src/identity/use_cases/api_tokens/create_token.rs similarity index 75% rename from api/src/application/use_cases/api_tokens/create_token.rs rename to api/crates/application/src/identity/use_cases/api_tokens/create_token.rs index 7b6866fe..21eed848 100644 --- a/api/src/application/use_cases/api_tokens/create_token.rs +++ b/api/crates/application/src/identity/use_cases/api_tokens/create_token.rs @@ -1,11 +1,13 @@ use uuid::Uuid; -use crate::application::dto::api_tokens::{ApiTokenDto, CreatedApiTokenDto}; -use crate::application::ports::api_token_repository::ApiTokenRepository; -use crate::application::services::api_tokens::generate_api_token; +use crate::identity::dtos::{ApiTokenDto, CreatedApiTokenDto}; +use crate::identity::ports::api_token_repository::ApiTokenRepository; +use crate::identity::ports::secret_hasher::SecretHasher; +use crate::identity::services::api_tokens::generate_api_token; pub struct CreateApiToken<'a, R: ApiTokenRepository + ?Sized> { pub repo: &'a R, + pub hasher: &'a dyn SecretHasher, } impl<'a, R> CreateApiToken<'a, R> @@ -18,7 +20,7 @@ where owner_id: Uuid, name: Option<&str>, ) -> anyhow::Result { - let material = generate_api_token()?; + let material = generate_api_token(self.hasher)?; let friendly_name = name .and_then(|n| { let trimmed = n.trim(); diff --git a/api/src/application/use_cases/api_tokens/list_tokens.rs b/api/crates/application/src/identity/use_cases/api_tokens/list_tokens.rs similarity index 76% rename from api/src/application/use_cases/api_tokens/list_tokens.rs rename to api/crates/application/src/identity/use_cases/api_tokens/list_tokens.rs index 6bb2941c..b0f06a19 100644 --- a/api/src/application/use_cases/api_tokens/list_tokens.rs +++ b/api/crates/application/src/identity/use_cases/api_tokens/list_tokens.rs @@ -1,7 +1,7 @@ use uuid::Uuid; -use crate::application::dto::api_tokens::ApiTokenDto; -use crate::application::ports::api_token_repository::ApiTokenRepository; +use crate::identity::dtos::ApiTokenDto; +use crate::identity::ports::api_token_repository::ApiTokenRepository; pub struct ListApiTokens<'a, R: ApiTokenRepository + ?Sized> { pub repo: &'a R, diff --git a/api/src/application/use_cases/api_tokens/mod.rs b/api/crates/application/src/identity/use_cases/api_tokens/mod.rs similarity index 100% rename from api/src/application/use_cases/api_tokens/mod.rs rename to api/crates/application/src/identity/use_cases/api_tokens/mod.rs diff --git a/api/src/application/use_cases/api_tokens/revoke_token.rs b/api/crates/application/src/identity/use_cases/api_tokens/revoke_token.rs similarity index 60% rename from api/src/application/use_cases/api_tokens/revoke_token.rs rename to api/crates/application/src/identity/use_cases/api_tokens/revoke_token.rs index 7e429a94..ef6e29c7 100644 --- a/api/src/application/use_cases/api_tokens/revoke_token.rs +++ b/api/crates/application/src/identity/use_cases/api_tokens/revoke_token.rs @@ -1,6 +1,6 @@ use uuid::Uuid; -use crate::application::ports::api_token_repository::ApiTokenRepository; +use crate::identity::ports::api_token_repository::ApiTokenRepository; pub struct RevokeApiToken<'a, R: ApiTokenRepository + ?Sized> { pub repo: &'a R, @@ -11,6 +11,9 @@ where R: ApiTokenRepository + ?Sized, { pub async fn execute(&self, workspace_id: Uuid, token_id: Uuid) -> anyhow::Result { - self.repo.revoke(workspace_id, token_id).await + self.repo + .revoke(workspace_id, token_id) + .await + .map_err(Into::into) } } diff --git a/api/src/application/use_cases/auth/delete_account.rs b/api/crates/application/src/identity/use_cases/auth/delete_account.rs similarity index 75% rename from api/src/application/use_cases/auth/delete_account.rs rename to api/crates/application/src/identity/use_cases/auth/delete_account.rs index 28bd3d5a..6527d671 100644 --- a/api/src/application/use_cases/auth/delete_account.rs +++ b/api/crates/application/src/identity/use_cases/auth/delete_account.rs @@ -3,18 +3,20 @@ use std::sync::Arc; use serde_json; use uuid::Uuid; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::git_repository::GitRepository; -use crate::application::ports::git_workspace::GitWorkspacePort; -use crate::application::ports::plugin_asset_store::PluginAssetStore; -use crate::application::ports::plugin_installation_repository::PluginInstallationRepository; -use crate::application::ports::plugin_repository::PluginRepository; -use crate::application::ports::storage_projection_queue::{ +use crate::core::ports::storage::storage_projection_queue::{ StorageDeleteJobMetadata, StorageJobReason, StorageProjectionJobKind, StorageProjectionQueue, }; -use crate::application::ports::user_repository::UserRepository; -use crate::domain::workspaces::permissions::PermissionSet; +use crate::documents::ports::document_repository::DocumentRepository; +use crate::documents::ports::files::files_repository::FilesRepository; +use crate::git::ports::git_repository::GitRepository; +use crate::git::ports::git_workspace::GitWorkspacePort; +use crate::identity::ports::user_repository::UserRepository; +use crate::plugins::ports::plugin_asset_store::PluginAssetStore; +use crate::plugins::ports::plugin_installation_repository::PluginInstallationRepository; +use crate::plugins::ports::plugin_repository::PluginRepository; +use domain::access::permissions::PermissionSet; +use domain::documents::doc_type::DocumentType; +use domain::plugins::scope::{PluginRecordScope, PluginScope}; pub struct DeleteAccount<'a, UR, DR, PIR, PR, GR, GW, SJ, FR> where @@ -75,16 +77,18 @@ where .await?; self.plugin_repo - .delete_scoped_kv("user", &[user_id]) + .delete_scoped_kv(PluginScope::User, &[user_id]) .await?; self.plugin_repo - .delete_scoped_records("user", &[user_id]) + .delete_scoped_records(PluginRecordScope::User, &[user_id]) .await?; if !doc_ids.is_empty() { - self.plugin_repo.delete_scoped_kv("doc", &doc_ids).await?; self.plugin_repo - .delete_scoped_records("doc", &doc_ids) + .delete_scoped_kv(PluginScope::Doc, &doc_ids) + .await?; + self.plugin_repo + .delete_scoped_records(PluginRecordScope::Doc, &doc_ids) .await?; } @@ -94,7 +98,7 @@ where .get_meta_for_owner(*doc_id, user_id) .await? { - let attachment_paths = if meta.doc_type != "folder" { + let attachment_paths = if meta.doc_type != DocumentType::Folder { Some( self.files_repo .list_storage_paths_for_document(*doc_id) @@ -105,8 +109,8 @@ where }; let delete_metadata = StorageDeleteJobMetadata { workspace_id: meta.workspace_id, - repo_path: Some(meta.desired_path.clone()), - doc_type: meta.doc_type.clone(), + repo_path: Some(meta.desired_path.as_str().to_string()), + doc_type: meta.doc_type, attachment_paths, permission_snapshot: PermissionSet::all().to_vec(), actor_id: Some(user_id), @@ -117,9 +121,10 @@ where }) .ok(); let reason_ref = reason.as_deref(); - let kind = match meta.doc_type.as_str() { - "folder" => StorageProjectionJobKind::DeleteFolder, - _ => StorageProjectionJobKind::DeleteDoc, + let kind = if meta.doc_type == DocumentType::Folder { + StorageProjectionJobKind::DeleteFolder + } else { + StorageProjectionJobKind::DeleteDoc }; if let Err(err) = match kind { StorageProjectionJobKind::DeleteFolder => { diff --git a/api/src/application/use_cases/auth/login.rs b/api/crates/application/src/identity/use_cases/auth/login.rs similarity index 69% rename from api/src/application/use_cases/auth/login.rs rename to api/crates/application/src/identity/use_cases/auth/login.rs index 3e319312..154980ba 100644 --- a/api/src/application/use_cases/auth/login.rs +++ b/api/crates/application/src/identity/use_cases/auth/login.rs @@ -1,12 +1,9 @@ -use argon2::{ - Argon2, - password_hash::{PasswordHash, PasswordVerifier}, -}; - -use crate::application::ports::user_repository::{UserRepository, UserRow}; +use crate::identity::ports::secret_hasher::SecretHasher; +use crate::identity::ports::user_repository::{UserRepository, UserRow}; pub struct Login<'a, R: UserRepository + ?Sized> { pub repo: &'a R, + pub hasher: &'a dyn SecretHasher, } #[derive(Debug, Clone)] @@ -25,11 +22,7 @@ impl<'a, R: UserRepository + ?Sized> Login<'a, R> { Some(hash) if !hash.is_empty() => hash, _ => return Ok(None), }; - let parsed = PasswordHash::new(hash).map_err(|e| anyhow::anyhow!(e.to_string()))?; - if Argon2::default() - .verify_password(req.password.as_bytes(), &parsed) - .is_ok() - { + if self.hasher.verify_secret(&req.password, hash)? { Ok(Some(UserRow { id: row.id, email: row.email, diff --git a/api/src/application/use_cases/auth/me.rs b/api/crates/application/src/identity/use_cases/auth/me.rs similarity index 63% rename from api/src/application/use_cases/auth/me.rs rename to api/crates/application/src/identity/use_cases/auth/me.rs index 04b70673..d1025d3a 100644 --- a/api/src/application/use_cases/auth/me.rs +++ b/api/crates/application/src/identity/use_cases/auth/me.rs @@ -1,6 +1,6 @@ use uuid::Uuid; -use crate::application::ports::user_repository::{UserRepository, UserRow}; +use crate::identity::ports::user_repository::{UserRepository, UserRow}; pub struct GetMe<'a, R: UserRepository + ?Sized> { pub repo: &'a R, @@ -8,6 +8,6 @@ pub struct GetMe<'a, R: UserRepository + ?Sized> { impl<'a, R: UserRepository + ?Sized> GetMe<'a, R> { pub async fn execute(&self, id: Uuid) -> anyhow::Result> { - self.repo.find_by_id(id).await + self.repo.find_by_id(id).await.map_err(Into::into) } } diff --git a/api/src/application/use_cases/auth/mod.rs b/api/crates/application/src/identity/use_cases/auth/mod.rs similarity index 100% rename from api/src/application/use_cases/auth/mod.rs rename to api/crates/application/src/identity/use_cases/auth/mod.rs diff --git a/api/src/application/use_cases/auth/register.rs b/api/crates/application/src/identity/use_cases/auth/register.rs similarity index 62% rename from api/src/application/use_cases/auth/register.rs rename to api/crates/application/src/identity/use_cases/auth/register.rs index 11aa2447..5ad8edbd 100644 --- a/api/src/application/use_cases/auth/register.rs +++ b/api/crates/application/src/identity/use_cases/auth/register.rs @@ -1,14 +1,11 @@ -use argon2::{ - Argon2, - password_hash::{PasswordHasher, SaltString}, -}; -use password_hash::rand_core::OsRng; use uuid::Uuid; -use crate::application::ports::user_repository::{UserRepository, UserRow}; +use crate::identity::ports::secret_hasher::SecretHasher; +use crate::identity::ports::user_repository::{UserRepository, UserRow}; pub struct Register<'a, R: UserRepository + ?Sized> { pub repo: &'a R, + pub hasher: &'a dyn SecretHasher, } #[derive(Debug, Clone)] @@ -22,11 +19,7 @@ pub struct RegisterRequest { impl<'a, R: UserRepository + ?Sized> Register<'a, R> { pub async fn execute(&self, req: &RegisterRequest) -> anyhow::Result { - let salt = SaltString::generate(&mut OsRng); - let hash = Argon2::default() - .hash_password(req.password.as_bytes(), &salt) - .map_err(|e| anyhow::anyhow!(e.to_string()))? - .to_string(); + let hash = self.hasher.hash_secret(&req.password)?; let user = self .repo .create_user( diff --git a/api/crates/application/src/identity/use_cases/mod.rs b/api/crates/application/src/identity/use_cases/mod.rs new file mode 100644 index 00000000..de652f14 --- /dev/null +++ b/api/crates/application/src/identity/use_cases/mod.rs @@ -0,0 +1,3 @@ +pub mod api_tokens; +pub mod auth; +pub mod user_shortcuts; diff --git a/api/src/application/use_cases/user_shortcuts/get_shortcuts.rs b/api/crates/application/src/identity/use_cases/user_shortcuts/get_shortcuts.rs similarity index 73% rename from api/src/application/use_cases/user_shortcuts/get_shortcuts.rs rename to api/crates/application/src/identity/use_cases/user_shortcuts/get_shortcuts.rs index f1a0e6b8..af929d14 100644 --- a/api/src/application/use_cases/user_shortcuts/get_shortcuts.rs +++ b/api/crates/application/src/identity/use_cases/user_shortcuts/get_shortcuts.rs @@ -1,7 +1,7 @@ use uuid::Uuid; -use crate::application::dto::user_shortcuts::UserShortcutProfileDto; -use crate::application::ports::user_shortcut_repository::UserShortcutRepository; +use crate::identity::dtos::UserShortcutProfileDto; +use crate::identity::ports::user_shortcuts::user_shortcut_repository::UserShortcutRepository; pub struct GetUserShortcuts<'a, R: UserShortcutRepository + ?Sized> { pub repo: &'a R, diff --git a/api/src/application/use_cases/user_shortcuts/mod.rs b/api/crates/application/src/identity/use_cases/user_shortcuts/mod.rs similarity index 100% rename from api/src/application/use_cases/user_shortcuts/mod.rs rename to api/crates/application/src/identity/use_cases/user_shortcuts/mod.rs diff --git a/api/src/application/use_cases/user_shortcuts/update_shortcuts.rs b/api/crates/application/src/identity/use_cases/user_shortcuts/update_shortcuts.rs similarity index 78% rename from api/src/application/use_cases/user_shortcuts/update_shortcuts.rs rename to api/crates/application/src/identity/use_cases/user_shortcuts/update_shortcuts.rs index 9c1d9b9b..368fc60b 100644 --- a/api/src/application/use_cases/user_shortcuts/update_shortcuts.rs +++ b/api/crates/application/src/identity/use_cases/user_shortcuts/update_shortcuts.rs @@ -3,8 +3,8 @@ use serde_json::{Map, Value}; use thiserror::Error; use uuid::Uuid; -use crate::application::dto::user_shortcuts::UserShortcutProfileDto; -use crate::application::ports::user_shortcut_repository::UserShortcutRepository; +use crate::identity::dtos::UserShortcutProfileDto; +use crate::identity::ports::user_shortcuts::user_shortcut_repository::UserShortcutRepository; #[derive(Debug, Error)] pub enum UpdateUserShortcutsError { @@ -44,12 +44,12 @@ where } }; - if let Some(ref leader) = payload.leader_key { - if leader.len() > 16 { - return Err(UpdateUserShortcutsError::Validation( - "leader key is too long".into(), - )); - } + if let Some(leader) = payload.leader_key.as_deref() + && leader.len() > 16 + { + return Err(UpdateUserShortcutsError::Validation( + "leader key is too long".into(), + )); } let encoded = serde_json::to_vec(&bindings) @@ -65,7 +65,7 @@ where .repo .upsert(user_id, bindings, payload.leader_key) .await - .map_err(UpdateUserShortcutsError::Storage)?; + .map_err(|err| UpdateUserShortcutsError::Storage(anyhow::Error::from(err)))?; Ok(UserShortcutProfileDto::from(profile)) } } diff --git a/api/crates/application/src/lib.rs b/api/crates/application/src/lib.rs new file mode 100644 index 00000000..ca32eb5b --- /dev/null +++ b/api/crates/application/src/lib.rs @@ -0,0 +1,8 @@ +pub mod core; +pub mod documents; +pub mod git; +pub mod identity; +pub mod plugins; +pub mod workspaces; + +pub use ::domain; diff --git a/api/crates/application/src/plugins/dtos/mod.rs b/api/crates/application/src/plugins/dtos/mod.rs new file mode 100644 index 00000000..1e08e1ac --- /dev/null +++ b/api/crates/application/src/plugins/dtos/mod.rs @@ -0,0 +1,3 @@ +mod plugins; + +pub use plugins::*; diff --git a/api/src/application/dto/plugins.rs b/api/crates/application/src/plugins/dtos/plugins.rs similarity index 100% rename from api/src/application/dto/plugins.rs rename to api/crates/application/src/plugins/dtos/plugins.rs diff --git a/api/crates/application/src/plugins/mod.rs b/api/crates/application/src/plugins/mod.rs new file mode 100644 index 00000000..2e8e16cf --- /dev/null +++ b/api/crates/application/src/plugins/mod.rs @@ -0,0 +1,4 @@ +pub mod dtos; +pub mod ports; +pub mod services; +pub mod use_cases; diff --git a/api/crates/application/src/plugins/ports/mod.rs b/api/crates/application/src/plugins/ports/mod.rs new file mode 100644 index 00000000..470f0fe9 --- /dev/null +++ b/api/crates/application/src/plugins/ports/mod.rs @@ -0,0 +1,8 @@ +pub mod plugin_asset_store; +pub mod plugin_event_publisher; +pub mod plugin_event_subscriber; +pub mod plugin_installation_repository; +pub mod plugin_installer; +pub mod plugin_package_fetcher; +pub mod plugin_repository; +pub mod plugin_runtime; diff --git a/api/src/application/ports/plugin_asset_store.rs b/api/crates/application/src/plugins/ports/plugin_asset_store.rs similarity index 64% rename from api/src/application/ports/plugin_asset_store.rs rename to api/crates/application/src/plugins/ports/plugin_asset_store.rs index bbad66cc..f1d39a82 100644 --- a/api/src/application/ports/plugin_asset_store.rs +++ b/api/crates/application/src/plugins/ports/plugin_asset_store.rs @@ -2,6 +2,8 @@ use async_trait::async_trait; use serde_json::Value; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Clone)] pub struct PluginAssetPayload { pub bytes: Vec, @@ -14,6 +16,13 @@ pub enum PluginAssetStoreScope<'a> { User { owner_id: &'a Uuid }, } +#[derive(Debug, Clone)] +pub struct LatestGlobalManifest { + pub plugin_id: String, + pub version: String, + pub manifest: Value, +} + #[async_trait] pub trait PluginAssetStore: Send + Sync { async fn fetch_asset( @@ -22,16 +31,16 @@ pub trait PluginAssetStore: Send + Sync { plugin_id: &str, version: &str, relative_path: &str, - ) -> anyhow::Result; + ) -> PortResult; - async fn remove_user_plugin_dir(&self, user_id: &Uuid, plugin_id: &str) -> anyhow::Result<()>; + async fn remove_user_plugin_dir(&self, user_id: &Uuid, plugin_id: &str) -> PortResult<()>; - async fn list_latest_global_manifests(&self) -> anyhow::Result>; + async fn list_latest_global_manifests(&self) -> PortResult>; async fn load_user_manifest( &self, user_id: &Uuid, plugin_id: &str, version: &str, - ) -> anyhow::Result>; + ) -> PortResult>; } diff --git a/api/src/application/ports/plugin_event_publisher.rs b/api/crates/application/src/plugins/ports/plugin_event_publisher.rs similarity index 70% rename from api/src/application/ports/plugin_event_publisher.rs rename to api/crates/application/src/plugins/ports/plugin_event_publisher.rs index 5810fc9c..69241ff4 100644 --- a/api/src/application/ports/plugin_event_publisher.rs +++ b/api/crates/application/src/plugins/ports/plugin_event_publisher.rs @@ -2,6 +2,8 @@ use async_trait::async_trait; use serde_json::Value; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[derive(Debug, Clone)] pub struct PluginScopedEvent { pub user_id: Option, @@ -11,5 +13,5 @@ pub struct PluginScopedEvent { #[async_trait] pub trait PluginEventPublisher: Send + Sync { - async fn publish(&self, event: &PluginScopedEvent) -> anyhow::Result<()>; + async fn publish(&self, event: &PluginScopedEvent) -> PortResult<()>; } diff --git a/api/crates/application/src/plugins/ports/plugin_event_subscriber.rs b/api/crates/application/src/plugins/ports/plugin_event_subscriber.rs new file mode 100644 index 00000000..1c50bba8 --- /dev/null +++ b/api/crates/application/src/plugins/ports/plugin_event_subscriber.rs @@ -0,0 +1,10 @@ +use async_trait::async_trait; +use futures_util::stream::BoxStream; + +use crate::core::ports::errors::PortResult; +use crate::plugins::ports::plugin_event_publisher::PluginScopedEvent; + +#[async_trait] +pub trait PluginEventSubscriber: Send + Sync { + async fn subscribe(&self) -> PortResult>; +} diff --git a/api/src/application/ports/plugin_installation_repository.rs b/api/crates/application/src/plugins/ports/plugin_installation_repository.rs similarity index 58% rename from api/src/application/ports/plugin_installation_repository.rs rename to api/crates/application/src/plugins/ports/plugin_installation_repository.rs index 6c378ba4..8fa05238 100644 --- a/api/src/application/ports/plugin_installation_repository.rs +++ b/api/crates/application/src/plugins/ports/plugin_installation_repository.rs @@ -1,14 +1,17 @@ use async_trait::async_trait; use uuid::Uuid; +use crate::core::ports::errors::PortResult; +use domain::plugins::scope::{PluginInstallationStatus, PluginScope}; + #[derive(Debug, Clone)] pub struct PluginInstallation { pub workspace_id: Uuid, pub plugin_id: String, pub version: String, - pub scope: String, + pub scope: PluginScope, pub origin_url: Option, - pub status: String, + pub status: PluginInstallationStatus, pub installed_at: chrono::DateTime, pub updated_at: chrono::DateTime, } @@ -20,19 +23,16 @@ pub trait PluginInstallationRepository: Send + Sync { workspace_id: Uuid, plugin_id: &str, version: &str, - scope: &str, + scope: PluginScope, origin_url: Option<&str>, - status: &str, - ) -> anyhow::Result<()>; + status: PluginInstallationStatus, + ) -> PortResult<()>; - async fn list_for_workspace( - &self, - workspace_id: Uuid, - ) -> anyhow::Result>; + async fn list_for_workspace(&self, workspace_id: Uuid) -> PortResult>; - async fn list_all(&self) -> anyhow::Result>; + async fn list_all(&self) -> PortResult>; - async fn remove(&self, workspace_id: Uuid, plugin_id: &str) -> anyhow::Result; + async fn remove(&self, workspace_id: Uuid, plugin_id: &str) -> PortResult; - async fn remove_all_for_workspace(&self, workspace_id: Uuid) -> anyhow::Result<()>; + async fn remove_all_for_workspace(&self, workspace_id: Uuid) -> PortResult<()>; } diff --git a/api/src/application/ports/plugin_installer.rs b/api/crates/application/src/plugins/ports/plugin_installer.rs similarity index 100% rename from api/src/application/ports/plugin_installer.rs rename to api/crates/application/src/plugins/ports/plugin_installer.rs diff --git a/api/crates/application/src/plugins/ports/plugin_package_fetcher.rs b/api/crates/application/src/plugins/ports/plugin_package_fetcher.rs new file mode 100644 index 00000000..65d7abf6 --- /dev/null +++ b/api/crates/application/src/plugins/ports/plugin_package_fetcher.rs @@ -0,0 +1,8 @@ +use async_trait::async_trait; + +use crate::core::ports::errors::PortResult; + +#[async_trait] +pub trait PluginPackageFetcher: Send + Sync { + async fn fetch(&self, url: &str, token: Option<&str>) -> PortResult>; +} diff --git a/api/src/application/ports/plugin_repository.rs b/api/crates/application/src/plugins/ports/plugin_repository.rs similarity index 56% rename from api/src/application/ports/plugin_repository.rs rename to api/crates/application/src/plugins/ports/plugin_repository.rs index bf228b95..2bd064eb 100644 --- a/api/src/application/ports/plugin_repository.rs +++ b/api/crates/application/src/plugins/ports/plugin_repository.rs @@ -2,11 +2,14 @@ use async_trait::async_trait; use serde_json::Value as JsonValue; use uuid::Uuid; +use crate::core::ports::errors::PortResult; +use domain::plugins::scope::{PluginRecordScope, PluginScope}; + #[derive(Debug, Clone)] pub struct PluginRecord { pub id: Uuid, pub plugin: String, - pub scope: String, + pub scope: PluginRecordScope, pub scope_id: Uuid, pub kind: String, pub data: JsonValue, @@ -20,50 +23,54 @@ pub trait PluginRepository: Send + Sync { async fn kv_get( &self, plugin: &str, - scope: &str, + scope: PluginScope, scope_id: Option, key: &str, - ) -> anyhow::Result>; + ) -> PortResult>; async fn kv_set( &self, plugin: &str, - scope: &str, + scope: PluginScope, scope_id: Option, key: &str, value: &JsonValue, - ) -> anyhow::Result<()>; + ) -> PortResult<()>; // Records async fn insert_record( &self, plugin: &str, - scope: &str, + scope: PluginRecordScope, scope_id: Uuid, kind: &str, data: &JsonValue, - ) -> anyhow::Result; + ) -> PortResult; async fn update_record_data( &self, record_id: Uuid, patch: &JsonValue, - ) -> anyhow::Result>; + ) -> PortResult>; - async fn delete_record(&self, record_id: Uuid) -> anyhow::Result; + async fn delete_record(&self, record_id: Uuid) -> PortResult; - async fn get_record(&self, record_id: Uuid) -> anyhow::Result>; + async fn get_record(&self, record_id: Uuid) -> PortResult>; async fn list_records( &self, plugin: &str, - scope: &str, + scope: PluginRecordScope, scope_id: Uuid, kind: &str, limit: i64, offset: i64, - ) -> anyhow::Result>; + ) -> PortResult>; - async fn delete_scoped_kv(&self, scope: &str, scope_ids: &[Uuid]) -> anyhow::Result<()>; + async fn delete_scoped_kv(&self, scope: PluginScope, scope_ids: &[Uuid]) -> PortResult<()>; - async fn delete_scoped_records(&self, scope: &str, scope_ids: &[Uuid]) -> anyhow::Result<()>; + async fn delete_scoped_records( + &self, + scope: PluginRecordScope, + scope_ids: &[Uuid], + ) -> PortResult<()>; } diff --git a/api/src/application/ports/plugin_runtime.rs b/api/crates/application/src/plugins/ports/plugin_runtime.rs similarity index 70% rename from api/src/application/ports/plugin_runtime.rs rename to api/crates/application/src/plugins/ports/plugin_runtime.rs index f76f6888..71be21d6 100644 --- a/api/src/application/ports/plugin_runtime.rs +++ b/api/crates/application/src/plugins/ports/plugin_runtime.rs @@ -1,8 +1,10 @@ use async_trait::async_trait; -use crate::application::dto::plugins::ExecResult; +use crate::plugins::dtos::ExecResult; use uuid::Uuid; +use crate::core::ports::errors::PortResult; + #[async_trait] pub trait PluginRuntime: Send + Sync { async fn execute( @@ -11,7 +13,7 @@ pub trait PluginRuntime: Send + Sync { plugin: &str, action: &str, payload: &serde_json::Value, - ) -> anyhow::Result>; + ) -> PortResult>; async fn render_placeholder( &self, @@ -19,11 +21,11 @@ pub trait PluginRuntime: Send + Sync { plugin: &str, function: &str, request: &serde_json::Value, - ) -> anyhow::Result>; + ) -> PortResult>; async fn permissions( &self, user_id: Option, plugin: &str, - ) -> anyhow::Result>>; + ) -> PortResult>>; } diff --git a/api/src/application/services/plugins/asset_signer.rs b/api/crates/application/src/plugins/services/asset_signer.rs similarity index 100% rename from api/src/application/services/plugins/asset_signer.rs rename to api/crates/application/src/plugins/services/asset_signer.rs diff --git a/api/crates/application/src/plugins/services/data.rs b/api/crates/application/src/plugins/services/data.rs new file mode 100644 index 00000000..ec95f913 --- /dev/null +++ b/api/crates/application/src/plugins/services/data.rs @@ -0,0 +1,227 @@ +use std::sync::Arc; + +use uuid::Uuid; + +use crate::core::services::errors::ServiceError; +use crate::plugins::ports::plugin_repository::{PluginRecord, PluginRepository}; +use crate::plugins::use_cases::kv::{GetPluginKv, PutPluginKv}; +use crate::plugins::use_cases::records::{ + CreatePluginRecord, DeletePluginRecord, GetPluginRecord, ListPluginRecords, UpdatePluginRecord, +}; +use async_trait::async_trait; +use domain::plugins::scope::{PluginRecordScope, PluginScope}; + +pub struct PluginDataService { + repo: Arc, +} + +#[async_trait] +pub trait PluginDataServiceFacade: Send + Sync { + async fn list_records( + &self, + plugin: &str, + scope: PluginRecordScope, + scope_id: Uuid, + kind: &str, + limit: i64, + offset: i64, + ) -> Result, ServiceError>; + + async fn create_record( + &self, + plugin: &str, + scope: PluginRecordScope, + scope_id: Uuid, + kind: &str, + data: &serde_json::Value, + ) -> Result; + + async fn get_record(&self, record_id: Uuid) -> Result, ServiceError>; + + async fn update_record( + &self, + record_id: Uuid, + patch: &serde_json::Value, + ) -> Result, ServiceError>; + + async fn delete_record(&self, record_id: Uuid) -> Result; + + async fn get_kv( + &self, + plugin: &str, + scope: PluginScope, + scope_id: Option, + key: &str, + ) -> Result, ServiceError>; + + async fn put_kv( + &self, + plugin: &str, + scope: PluginScope, + scope_id: Option, + key: &str, + value: &serde_json::Value, + ) -> Result<(), ServiceError>; +} + +#[async_trait] +impl PluginDataServiceFacade for PluginDataService { + async fn list_records( + &self, + plugin: &str, + scope: PluginRecordScope, + scope_id: Uuid, + kind: &str, + limit: i64, + offset: i64, + ) -> Result, ServiceError> { + self.list_records(plugin, scope, scope_id, kind, limit, offset) + .await + } + + async fn create_record( + &self, + plugin: &str, + scope: PluginRecordScope, + scope_id: Uuid, + kind: &str, + data: &serde_json::Value, + ) -> Result { + self.create_record(plugin, scope, scope_id, kind, data) + .await + } + + async fn get_record(&self, record_id: Uuid) -> Result, ServiceError> { + self.get_record(record_id).await + } + + async fn update_record( + &self, + record_id: Uuid, + patch: &serde_json::Value, + ) -> Result, ServiceError> { + self.update_record(record_id, patch).await + } + + async fn delete_record(&self, record_id: Uuid) -> Result { + self.delete_record(record_id).await + } + + async fn get_kv( + &self, + plugin: &str, + scope: PluginScope, + scope_id: Option, + key: &str, + ) -> Result, ServiceError> { + self.get_kv(plugin, scope, scope_id, key).await + } + + async fn put_kv( + &self, + plugin: &str, + scope: PluginScope, + scope_id: Option, + key: &str, + value: &serde_json::Value, + ) -> Result<(), ServiceError> { + self.put_kv(plugin, scope, scope_id, key, value).await + } +} + +impl PluginDataService { + pub fn new(repo: Arc) -> Self { + Self { repo } + } + + pub async fn list_records( + &self, + plugin: &str, + scope: PluginRecordScope, + scope_id: Uuid, + kind: &str, + limit: i64, + offset: i64, + ) -> Result, ServiceError> { + let uc = ListPluginRecords { + repo: self.repo.as_ref(), + }; + uc.execute(plugin, scope, scope_id, kind, limit, offset) + .await + .map_err(ServiceError::from) + } + + pub async fn create_record( + &self, + plugin: &str, + scope: PluginRecordScope, + scope_id: Uuid, + kind: &str, + data: &serde_json::Value, + ) -> Result { + let uc = CreatePluginRecord { + repo: self.repo.as_ref(), + }; + uc.execute(plugin, scope, scope_id, kind, data) + .await + .map_err(ServiceError::from) + } + + pub async fn get_record(&self, record_id: Uuid) -> Result, ServiceError> { + let uc = GetPluginRecord { + repo: self.repo.as_ref(), + }; + uc.execute(record_id).await.map_err(ServiceError::from) + } + + pub async fn update_record( + &self, + record_id: Uuid, + patch: &serde_json::Value, + ) -> Result, ServiceError> { + let uc = UpdatePluginRecord { + repo: self.repo.as_ref(), + }; + uc.execute(record_id, patch) + .await + .map_err(ServiceError::from) + } + + pub async fn delete_record(&self, record_id: Uuid) -> Result { + let uc = DeletePluginRecord { + repo: self.repo.as_ref(), + }; + uc.execute(record_id).await.map_err(ServiceError::from) + } + + pub async fn get_kv( + &self, + plugin: &str, + scope: PluginScope, + scope_id: Option, + key: &str, + ) -> Result, ServiceError> { + let uc = GetPluginKv { + repo: self.repo.as_ref(), + }; + uc.execute(plugin, scope, scope_id, key) + .await + .map_err(ServiceError::from) + } + + pub async fn put_kv( + &self, + plugin: &str, + scope: PluginScope, + scope_id: Option, + key: &str, + value: &serde_json::Value, + ) -> Result<(), ServiceError> { + let uc = PutPluginKv { + repo: self.repo.as_ref(), + }; + uc.execute(plugin, scope, scope_id, key, value) + .await + .map_err(ServiceError::from) + } +} diff --git a/api/crates/application/src/plugins/services/execution.rs b/api/crates/application/src/plugins/services/execution.rs new file mode 100644 index 00000000..fb50bbef --- /dev/null +++ b/api/crates/application/src/plugins/services/execution.rs @@ -0,0 +1,111 @@ +use std::sync::Arc; + +use uuid::Uuid; + +use crate::core::services::errors::ServiceError; +use crate::documents::ports::document_repository::DocumentRepository; +use crate::plugins::dtos::ExecResult; +use crate::plugins::ports::plugin_repository::PluginRepository; +use crate::plugins::ports::plugin_runtime::PluginRuntime; +use crate::plugins::use_cases::exec_action::ExecutePluginAction; +use async_trait::async_trait; +use domain::access::permissions::PermissionSet; + +pub struct PluginExecutionService { + plugin_repo: Arc, + document_repo: Arc, + runtime: Arc, + authorization: Arc, +} + +#[async_trait] +pub trait PluginExecutionServiceFacade: Send + Sync { + #[allow(clippy::too_many_arguments)] + async fn execute_action( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + plugin: &str, + action: &str, + payload: Option, + allowed_doc_id: Option, + actor: &crate::core::services::access::Actor, + ) -> Result, ServiceError>; +} + +#[async_trait] +impl PluginExecutionServiceFacade for PluginExecutionService { + #[allow(clippy::too_many_arguments)] + async fn execute_action( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + plugin: &str, + action: &str, + payload: Option, + allowed_doc_id: Option, + actor: &crate::core::services::access::Actor, + ) -> Result, ServiceError> { + self.execute_action( + workspace_id, + user_id, + permissions, + plugin, + action, + payload, + allowed_doc_id, + actor, + ) + .await + } +} + +impl PluginExecutionService { + pub fn new( + plugin_repo: Arc, + document_repo: Arc, + runtime: Arc, + authorization: Arc, + ) -> Self { + Self { + plugin_repo, + document_repo, + runtime, + authorization, + } + } + + #[allow(clippy::too_many_arguments)] + pub async fn execute_action( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + plugin: &str, + action: &str, + payload: Option, + allowed_doc_id: Option, + actor: &crate::core::services::access::Actor, + ) -> Result, ServiceError> { + let uc = ExecutePluginAction { + runtime: self.runtime.as_ref(), + plugin_repo: self.plugin_repo.as_ref(), + document_repo: self.document_repo.as_ref(), + authorization: self.authorization.as_ref(), + }; + uc.execute( + workspace_id, + user_id, + permissions, + plugin, + action, + payload, + allowed_doc_id, + actor, + ) + .await + .map_err(ServiceError::from) + } +} diff --git a/api/src/application/services/plugins/management.rs b/api/crates/application/src/plugins/services/management.rs similarity index 74% rename from api/src/application/services/plugins/management.rs rename to api/crates/application/src/plugins/services/management.rs index 96a02181..36e09465 100644 --- a/api/src/application/services/plugins/management.rs +++ b/api/crates/application/src/plugins/services/management.rs @@ -4,26 +4,27 @@ use serde_json::{Value, json}; use tracing::warn; use uuid::Uuid; -use crate::application::ports::plugin_asset_store::{ - PluginAssetPayload, PluginAssetStore, PluginAssetStoreScope, +use crate::core::services::errors::ServiceError; +use crate::plugins::ports::plugin_asset_store::{ + LatestGlobalManifest, PluginAssetPayload, PluginAssetStore, PluginAssetStoreScope, }; -use crate::application::ports::plugin_event_publisher::{PluginEventPublisher, PluginScopedEvent}; -use crate::application::ports::plugin_installation_repository::PluginInstallationRepository; -use crate::application::ports::plugin_installer::{InstalledPlugin, PluginInstaller}; -use crate::application::ports::plugin_package_fetcher::PluginPackageFetcher; -use crate::application::services::errors::ServiceError; -use crate::application::services::plugins::asset_signer::{AssetScope, AssetSigner}; -use crate::application::use_cases::plugins::install_from_url::{ - InstallPluginError, InstallPluginFromUrl, -}; -use crate::domain::workspaces::permissions::PermissionSet; +use crate::plugins::ports::plugin_event_publisher::{PluginEventPublisher, PluginScopedEvent}; +use crate::plugins::ports::plugin_installation_repository::PluginInstallationRepository; +use crate::plugins::ports::plugin_installer::{InstalledPlugin, PluginInstaller}; +use crate::plugins::ports::plugin_package_fetcher::PluginPackageFetcher; +use crate::plugins::services::asset_signer::{AssetScope, AssetSigner}; +use crate::plugins::use_cases::install_from_url::{InstallPluginError, InstallPluginFromUrl}; +use async_trait::async_trait; +use domain::access::permissions::PermissionSet; +use domain::plugins::events::PluginEventKind; +use domain::plugins::scope::{PluginInstallationStatus, PluginScope}; #[derive(Debug, Clone)] pub struct PluginManifestItem { pub id: String, pub name: Option, pub version: String, - pub scope: String, + pub scope: PluginScope, pub mounts: Vec, pub frontend: Value, pub permissions: Vec, @@ -61,6 +62,78 @@ pub struct PluginManagementService { plugin_installer: Arc, } +#[async_trait] +pub trait PluginManagementServiceFacade: Send + Sync { + async fn install_from_url( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + url: &str, + token: Option<&str>, + ) -> Result; + + async fn uninstall( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + plugin_id: &str, + ) -> Result<(), ServiceError>; + + async fn manifests_for_workspace( + &self, + workspace_id: Uuid, + user_id: Uuid, + ) -> Result, ServiceError>; + + async fn fetch_asset( + &self, + request: PluginAssetRequest<'_>, + ) -> Result; +} + +#[async_trait] +impl PluginManagementServiceFacade for PluginManagementService { + async fn install_from_url( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + url: &str, + token: Option<&str>, + ) -> Result { + self.install_from_url(workspace_id, user_id, permissions, url, token) + .await + } + + async fn uninstall( + &self, + workspace_id: Uuid, + user_id: Uuid, + permissions: &PermissionSet, + plugin_id: &str, + ) -> Result<(), ServiceError> { + self.uninstall(workspace_id, user_id, permissions, plugin_id) + .await + } + + async fn manifests_for_workspace( + &self, + workspace_id: Uuid, + user_id: Uuid, + ) -> Result, ServiceError> { + self.manifests_for_workspace(workspace_id, user_id).await + } + + async fn fetch_asset( + &self, + request: PluginAssetRequest<'_>, + ) -> Result { + self.fetch_asset(request).await + } +} + impl PluginManagementService { #[allow(clippy::too_many_arguments)] pub fn new( @@ -112,7 +185,12 @@ impl PluginManagementService { .list_latest_global_manifests() .await .map_err(ServiceError::from)?; - for (plugin_id, version, manifest) in global { + for LatestGlobalManifest { + plugin_id, + version, + manifest, + } in global + { if let Some(item) = self.build_manifest_item( &plugin_id, &version, @@ -129,7 +207,10 @@ impl PluginManagementService { .list_for_workspace(workspace_id) .await .map_err(ServiceError::from)?; - for inst in installs.into_iter().filter(|i| i.status == "enabled") { + for inst in installs + .into_iter() + .filter(|i| i.status == PluginInstallationStatus::Enabled) + { match self .assets .load_user_manifest(&workspace_id, &inst.plugin_id, &inst.version) @@ -160,8 +241,8 @@ impl PluginManagementService { } items.sort_by(|a, b| { - let scope_a = if a.scope == "user" { 0 } else { 1 }; - let scope_b = if b.scope == "user" { 0 } else { 1 }; + let scope_a = if a.scope == PluginScope::User { 0 } else { 1 }; + let scope_b = if b.scope == PluginScope::User { 0 } else { 1 }; scope_a .cmp(&scope_b) .then_with(|| a.id.cmp(&b.id)) @@ -201,7 +282,7 @@ impl PluginManagementService { user_id: Some(user_id), workspace_id: Some(workspace_id), payload: json!({ - "event": "uninstalled", + "event": PluginEventKind::Uninstalled.as_str(), "id": plugin_id, "workspace_id": workspace_id, }), @@ -261,7 +342,7 @@ impl PluginManagementService { if err.downcast_ref::().is_some() { ServiceError::NotFound } else { - ServiceError::Unexpected(err) + ServiceError::Unexpected(err.into()) } }) } @@ -320,7 +401,6 @@ impl PluginManagementService { .and_then(|x| x.as_str()) .map(|s| s.to_string()); - let scope_label = scope.as_str().to_string(); let signer_scope = match scope { ManifestScope::Global => AssetScope::Global, ManifestScope::User { user_id } => AssetScope::User { @@ -357,7 +437,7 @@ impl PluginManagementService { id: id.to_string(), name, version: version.to_string(), - scope: scope_label, + scope: scope.as_plugin_scope(), mounts, frontend, permissions, @@ -376,10 +456,10 @@ enum ManifestScope { } impl ManifestScope { - fn as_str(&self) -> &'static str { + fn as_plugin_scope(&self) -> PluginScope { match self { - ManifestScope::Global => "global", - ManifestScope::User { .. } => "user", + ManifestScope::Global => PluginScope::Global, + ManifestScope::User { .. } => PluginScope::User, } } } @@ -387,7 +467,7 @@ impl ManifestScope { pub fn validate_plugin_id(id: &str) -> Result<(), ServiceError> { const MAX_LEN: usize = 128; if id.is_empty() || id.len() > MAX_LEN { - return Err(ServiceError::BadRequest("invalid plugin id")); + return Err(ServiceError::BadRequest("invalid_plugin_id")); } if id .chars() @@ -395,14 +475,14 @@ pub fn validate_plugin_id(id: &str) -> Result<(), ServiceError> { { Ok(()) } else { - Err(ServiceError::BadRequest("invalid plugin id")) + Err(ServiceError::BadRequest("invalid_plugin_id")) } } pub fn validate_plugin_version(version: &str) -> Result<(), ServiceError> { const MAX_LEN: usize = 128; if version.is_empty() || version.len() > MAX_LEN { - return Err(ServiceError::BadRequest("invalid plugin version")); + return Err(ServiceError::BadRequest("invalid_plugin_version")); } if version .chars() @@ -410,7 +490,7 @@ pub fn validate_plugin_version(version: &str) -> Result<(), ServiceError> { { Ok(()) } else { - Err(ServiceError::BadRequest("invalid plugin version")) + Err(ServiceError::BadRequest("invalid_plugin_version")) } } @@ -421,13 +501,13 @@ pub fn normalize_manifest_path(raw: &str) -> Result { } trimmed = trimmed.trim_start_matches('/'); if trimmed.is_empty() || trimmed.contains("..") || trimmed.contains('\\') { - return Err(ServiceError::BadRequest("invalid manifest path")); + return Err(ServiceError::BadRequest("invalid_manifest_path")); } if trimmed .split('/') .any(|segment| segment.is_empty() || segment == "." || segment == "..") { - return Err(ServiceError::BadRequest("invalid manifest path")); + return Err(ServiceError::BadRequest("invalid_manifest_path")); } Ok(trimmed.to_string()) } diff --git a/api/src/application/services/plugins/mod.rs b/api/crates/application/src/plugins/services/mod.rs similarity index 100% rename from api/src/application/services/plugins/mod.rs rename to api/crates/application/src/plugins/services/mod.rs diff --git a/api/src/application/services/plugins/permissions.rs b/api/crates/application/src/plugins/services/permissions.rs similarity index 54% rename from api/src/application/services/plugins/permissions.rs rename to api/crates/application/src/plugins/services/permissions.rs index 1a421f67..3e1129c5 100644 --- a/api/src/application/services/plugins/permissions.rs +++ b/api/crates/application/src/plugins/services/permissions.rs @@ -2,13 +2,36 @@ use std::sync::Arc; use uuid::Uuid; -use crate::application::ports::plugin_runtime::PluginRuntime; -use crate::application::services::errors::ServiceError; +use crate::core::services::errors::ServiceError; +use crate::plugins::ports::plugin_runtime::PluginRuntime; +use async_trait::async_trait; pub struct PluginPermissionService { runtime: Arc, } +#[async_trait] +pub trait PluginPermissionServiceFacade: Send + Sync { + async fn ensure( + &self, + workspace_id: Option, + plugin_id: &str, + permission: &str, + ) -> Result<(), ServiceError>; +} + +#[async_trait] +impl PluginPermissionServiceFacade for PluginPermissionService { + async fn ensure( + &self, + workspace_id: Option, + plugin_id: &str, + permission: &str, + ) -> Result<(), ServiceError> { + self.ensure(workspace_id, plugin_id, permission).await + } +} + impl PluginPermissionService { pub fn new(runtime: Arc) -> Self { Self { runtime } diff --git a/api/src/application/use_cases/plugins/exec_action.rs b/api/crates/application/src/plugins/use_cases/exec_action.rs similarity index 65% rename from api/src/application/use_cases/plugins/exec_action.rs rename to api/crates/application/src/plugins/use_cases/exec_action.rs index 7f112cdb..3550a0a5 100644 --- a/api/src/application/use_cases/plugins/exec_action.rs +++ b/api/crates/application/src/plugins/use_cases/exec_action.rs @@ -2,14 +2,19 @@ use std::collections::HashSet; use uuid::Uuid; -use crate::application::dto::plugins::ExecResult; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::plugin_repository::PluginRepository; -use crate::application::ports::plugin_runtime::PluginRuntime; -use crate::domain::workspaces::permissions::{PERM_DOC_CREATE, PERM_DOC_EDIT, PermissionSet}; -use crate::{application::access, application::services::authorization::AuthorizationService}; - -const PERMISSION_DOC_WRITE: &str = "doc.write"; +use crate::core::services::access; +use crate::core::services::authorization::AuthorizationService; +use crate::documents::ports::document_repository::DocumentRepository; +use crate::documents::ports::document_repository::DocumentRepositoryError; +use crate::documents::use_cases::create_document::CreateDocument; +use crate::plugins::dtos::ExecResult; +use crate::plugins::ports::plugin_repository::PluginRepository; +use crate::plugins::ports::plugin_runtime::PluginRuntime; +use domain::access::permissions::{PERM_DOC_EDIT, PermissionSet}; +use domain::documents::doc_type::DocumentType; +use domain::documents::title::Title; +use domain::plugins::policy; +use domain::plugins::scope::{PluginRecordScope, PluginScope}; enum PluginEffectError { PermissionDenied { permission: String }, @@ -22,6 +27,22 @@ impl From for PluginEffectError { } } +impl From for PluginEffectError { + fn from(err: DocumentRepositoryError) -> Self { + Self::Other(err.into()) + } +} + +impl From for PluginEffectError { + fn from(err: policy::PluginPolicyError) -> Self { + match err { + policy::PluginPolicyError::PermissionDenied { permission } => { + PluginEffectError::PermissionDenied { permission } + } + } + } +} + pub struct ExecutePluginAction<'a, RT, PR, DR> where RT: PluginRuntime + ?Sized, @@ -40,6 +61,7 @@ where PR: PluginRepository + ?Sized, DR: DocumentRepository + ?Sized, { + #[allow(clippy::too_many_arguments)] pub async fn execute( &self, workspace_id: Uuid, @@ -113,6 +135,7 @@ where Ok(Some(res)) } + #[allow(clippy::too_many_arguments)] async fn apply_server_effects( &self, workspace_id: Uuid, @@ -138,45 +161,69 @@ where self.log_effect(effect); } "createDocument" => { - self.ensure_permission(permissions, PERMISSION_DOC_WRITE)?; - if !workspace_permissions.allows(PERM_DOC_CREATE) { - return Err(PluginEffectError::PermissionDenied { - permission: PERM_DOC_CREATE.to_string(), - }); - } + policy::ensure_plugin_permission( + permissions, + policy::PLUGIN_PERMISSION_DOC_WRITE, + )?; + policy::ensure_workspace_can_create_documents(workspace_permissions)?; let title = effect .get("title") .and_then(|v| v.as_str()) .unwrap_or("Untitled"); - let doc_type = effect + let title = Title::from_user_input(title); + let doc_type_str = effect .get("docType") .and_then(|v| v.as_str()) - .unwrap_or("document"); + .unwrap_or(DocumentType::Document.as_str()); + let doc_type = DocumentType::try_from(doc_type_str).map_err(|_| { + PluginEffectError::Other(anyhow::anyhow!("invalid_document_type")) + })?; let parent_id = effect .get("parentId") .and_then(|v| v.as_str()) .and_then(|s| Uuid::parse_str(s).ok()); - let doc = self - .document_repo - .create_for_user( + let parent_desired_path = if let Some(pid) = parent_id { + let meta = self + .document_repo + .get_meta_for_owner(pid, workspace_id) + .await + .map_err(PluginEffectError::from)? + .ok_or_else(|| { + PluginEffectError::Other(anyhow::anyhow!( + "parent_document_not_found" + )) + })?; + if meta.archived_at.is_some() { + return Err(PluginEffectError::Other(anyhow::anyhow!( + "parent_document_archived" + ))); + } + Some(meta.desired_path) + } else { + None + }; + let mut repo = self.document_repo; + let mut uc = CreateDocument { repo: &mut repo }; + let doc = uc + .execute( workspace_id, user_id, - title, + &title, parent_id, + parent_desired_path.as_ref(), doc_type, Some(plugin), ) .await .map_err(PluginEffectError::from)?; - doc_id_created = Some(doc.id); + doc_id_created = Some(doc.id()); } "putKv" => { - self.ensure_permission(permissions, PERMISSION_DOC_WRITE)?; - if !workspace_permissions.allows(PERM_DOC_EDIT) { - return Err(PluginEffectError::PermissionDenied { - permission: PERM_DOC_EDIT.to_string(), - }); - } + policy::ensure_plugin_permission( + permissions, + policy::PLUGIN_PERMISSION_DOC_WRITE, + )?; + policy::ensure_workspace_can_edit_documents(workspace_permissions)?; let Some(key) = effect.get("key").and_then(|v| v.as_str()) else { continue; }; @@ -200,18 +247,17 @@ where .await?; if let Some(did) = doc_id { self.plugin_repo - .kv_set(plugin, "doc", Some(did), key, &value) + .kv_set(plugin, PluginScope::Doc, Some(did), key, &value) .await - .map_err(PluginEffectError::from)?; + .map_err(|err| PluginEffectError::from(anyhow::Error::from(err)))?; } } "createRecord" => { - self.ensure_permission(permissions, PERMISSION_DOC_WRITE)?; - if !workspace_permissions.allows(PERM_DOC_EDIT) { - return Err(PluginEffectError::PermissionDenied { - permission: PERM_DOC_EDIT.to_string(), - }); - } + policy::ensure_plugin_permission( + permissions, + policy::PLUGIN_PERMISSION_DOC_WRITE, + )?; + policy::ensure_workspace_can_edit_documents(workspace_permissions)?; let Some(kind) = effect.get("kind").and_then(|v| v.as_str()) else { continue; }; @@ -236,18 +282,17 @@ where if let Some(did) = doc_id { let _ = self .plugin_repo - .insert_record(plugin, "doc", did, kind, &data) + .insert_record(plugin, PluginRecordScope::Doc, did, kind, &data) .await - .map_err(PluginEffectError::from)?; + .map_err(|err| PluginEffectError::from(anyhow::Error::from(err)))?; } } "updateRecord" => { - self.ensure_permission(permissions, PERMISSION_DOC_WRITE)?; - if !workspace_permissions.allows(PERM_DOC_EDIT) { - return Err(PluginEffectError::PermissionDenied { - permission: PERM_DOC_EDIT.to_string(), - }); - } + policy::ensure_plugin_permission( + permissions, + policy::PLUGIN_PERMISSION_DOC_WRITE, + )?; + policy::ensure_workspace_can_edit_documents(workspace_permissions)?; if let Some(record_id) = effect .get("recordId") .and_then(|v| v.as_str()) @@ -257,12 +302,11 @@ where .plugin_repo .get_record(record_id) .await - .map_err(PluginEffectError::from)? + .map_err(|err| PluginEffectError::from(anyhow::Error::from(err)))? { - if rec.plugin != plugin { - return Err(PluginEffectError::PermissionDenied { - permission: PERM_DOC_EDIT.to_string(), - }); + policy::ensure_record_owned_by_plugin(&rec.plugin, plugin)?; + if rec.scope != PluginRecordScope::Doc { + continue; } self.validate_doc_scope( workspace_id, @@ -282,16 +326,15 @@ where .plugin_repo .update_record_data(record_id, &patch) .await - .map_err(PluginEffectError::from)?; + .map_err(|err| PluginEffectError::from(anyhow::Error::from(err)))?; } } "deleteRecord" => { - self.ensure_permission(permissions, PERMISSION_DOC_WRITE)?; - if !workspace_permissions.allows(PERM_DOC_EDIT) { - return Err(PluginEffectError::PermissionDenied { - permission: PERM_DOC_EDIT.to_string(), - }); - } + policy::ensure_plugin_permission( + permissions, + policy::PLUGIN_PERMISSION_DOC_WRITE, + )?; + policy::ensure_workspace_can_edit_documents(workspace_permissions)?; if let Some(record_id) = effect .get("recordId") .and_then(|v| v.as_str()) @@ -301,12 +344,11 @@ where .plugin_repo .get_record(record_id) .await - .map_err(PluginEffectError::from)? + .map_err(|err| PluginEffectError::from(anyhow::Error::from(err)))? { - if rec.plugin != plugin { - return Err(PluginEffectError::PermissionDenied { - permission: PERM_DOC_EDIT.to_string(), - }); + policy::ensure_record_owned_by_plugin(&rec.plugin, plugin)?; + if rec.scope != PluginRecordScope::Doc { + continue; } self.validate_doc_scope( workspace_id, @@ -322,26 +364,25 @@ where .plugin_repo .delete_record(record_id) .await - .map_err(PluginEffectError::from)?; + .map_err(|err| PluginEffectError::from(anyhow::Error::from(err)))?; } } "navigate" => { - if let Some(doc_id) = doc_id_created { - if let Some(to) = effect.get("to").and_then(|v| v.as_str()) { - if to.contains(":createdDocId") { - let mut cloned = effect.clone(); - if let Some(obj) = cloned.as_object_mut() { - obj.insert( - "to".into(), - serde_json::Value::String( - to.replace(":createdDocId", &doc_id.to_string()), - ), - ); - } - passthrough.push(cloned); - continue; - } + if let Some(doc_id) = doc_id_created + && let Some(to) = effect.get("to").and_then(|v| v.as_str()) + && to.contains(":createdDocId") + { + let mut cloned = effect.clone(); + if let Some(obj) = cloned.as_object_mut() { + obj.insert( + "to".into(), + serde_json::Value::String( + to.replace(":createdDocId", &doc_id.to_string()), + ), + ); } + passthrough.push(cloned); + continue; } passthrough.push(effect.clone()); } @@ -370,16 +411,16 @@ where return Ok(None); }; if let Some(allowed) = allowed_doc_id { - if doc_id != allowed { - return Err(PluginEffectError::PermissionDenied { - permission: PERM_DOC_EDIT.to_string(), - }); - } + policy::ensure_doc_id_within_allowed_scope(doc_id, allowed)?; } if Some(doc_id) == doc_id_created { return Ok(Some(doc_id)); } - let capability = self.authorization.resolve_document(actor, doc_id).await; + let capability = self + .authorization + .resolve_document(actor, doc_id) + .await + .map_err(|err| PluginEffectError::Other(anyhow::Error::new(err)))?; let has_access = if require_edit { capability >= access::Capability::Edit } else { @@ -394,20 +435,6 @@ where } } - fn ensure_permission( - &self, - permissions: &HashSet, - permission: &str, - ) -> Result<(), PluginEffectError> { - if permissions.iter().any(|p| p == permission) { - Ok(()) - } else { - Err(PluginEffectError::PermissionDenied { - permission: permission.to_string(), - }) - } - } - fn log_only(&self, effects: &[serde_json::Value]) { for effect in effects { if effect.get("type").and_then(|v| v.as_str()) == Some("log") { diff --git a/api/src/application/use_cases/plugins/install_from_url.rs b/api/crates/application/src/plugins/use_cases/install_from_url.rs similarity index 73% rename from api/src/application/use_cases/plugins/install_from_url.rs rename to api/crates/application/src/plugins/use_cases/install_from_url.rs index 8332ae18..ab14b1f1 100644 --- a/api/src/application/use_cases/plugins/install_from_url.rs +++ b/api/crates/application/src/plugins/use_cases/install_from_url.rs @@ -1,9 +1,11 @@ -use crate::application::ports::plugin_event_publisher::{PluginEventPublisher, PluginScopedEvent}; -use crate::application::ports::plugin_installation_repository::PluginInstallationRepository; -use crate::application::ports::plugin_installer::{ +use crate::plugins::ports::plugin_event_publisher::{PluginEventPublisher, PluginScopedEvent}; +use crate::plugins::ports::plugin_installation_repository::PluginInstallationRepository; +use crate::plugins::ports::plugin_installer::{ InstalledPlugin, PluginInstallError, PluginInstaller, }; -use crate::application::ports::plugin_package_fetcher::PluginPackageFetcher; +use crate::plugins::ports::plugin_package_fetcher::PluginPackageFetcher; +use domain::plugins::events::PluginEventKind; +use domain::plugins::scope::{PluginInstallationStatus, PluginScope}; use uuid::Uuid; #[derive(thiserror::Error, Debug)] @@ -49,7 +51,7 @@ where .fetcher .fetch(url, token) .await - .map_err(InstallPluginError::Download)?; + .map_err(|err| InstallPluginError::Download(err.into()))?; let installed = self .installer .install_for_user(workspace_id, &bytes) @@ -61,18 +63,18 @@ where workspace_id, &installed.id, &installed.version, - "user", + PluginScope::User, Some(url), - "enabled", + PluginInstallationStatus::Enabled, ) .await - .map_err(InstallPluginError::Persist)?; + .map_err(|err| InstallPluginError::Persist(err.into()))?; let event = PluginScopedEvent { user_id: Some(user_id), workspace_id: Some(workspace_id), payload: serde_json::json!({ - "event": "installed", + "event": PluginEventKind::Installed.as_str(), "id": installed.id, "version": installed.version, "workspace_id": workspace_id, @@ -81,7 +83,7 @@ where self.events .publish(&event) .await - .map_err(InstallPluginError::Event)?; + .map_err(|err| InstallPluginError::Event(err.into()))?; Ok(installed) } } diff --git a/api/src/application/use_cases/plugins/kv.rs b/api/crates/application/src/plugins/use_cases/kv.rs similarity index 62% rename from api/src/application/use_cases/plugins/kv.rs rename to api/crates/application/src/plugins/use_cases/kv.rs index f125cc26..7e99f9dc 100644 --- a/api/src/application/use_cases/plugins/kv.rs +++ b/api/crates/application/src/plugins/use_cases/kv.rs @@ -1,6 +1,7 @@ use uuid::Uuid; -use crate::application::ports::plugin_repository::PluginRepository; +use crate::plugins::ports::plugin_repository::PluginRepository; +use domain::plugins::scope::PluginScope; pub struct GetPluginKv<'a, R: PluginRepository + ?Sized> { pub repo: &'a R, @@ -10,11 +11,14 @@ impl<'a, R: PluginRepository + ?Sized> GetPluginKv<'a, R> { pub async fn execute( &self, plugin: &str, - scope: &str, + scope: PluginScope, scope_id: Option, key: &str, ) -> anyhow::Result> { - self.repo.kv_get(plugin, scope, scope_id, key).await + self.repo + .kv_get(plugin, scope, scope_id, key) + .await + .map_err(Into::into) } } @@ -26,11 +30,14 @@ impl<'a, R: PluginRepository + ?Sized> PutPluginKv<'a, R> { pub async fn execute( &self, plugin: &str, - scope: &str, + scope: PluginScope, scope_id: Option, key: &str, value: &serde_json::Value, ) -> anyhow::Result<()> { - self.repo.kv_set(plugin, scope, scope_id, key, value).await + self.repo + .kv_set(plugin, scope, scope_id, key, value) + .await + .map_err(Into::into) } } diff --git a/api/src/application/use_cases/plugins/mod.rs b/api/crates/application/src/plugins/use_cases/mod.rs similarity index 100% rename from api/src/application/use_cases/plugins/mod.rs rename to api/crates/application/src/plugins/use_cases/mod.rs diff --git a/api/src/application/use_cases/plugins/records.rs b/api/crates/application/src/plugins/use_cases/records.rs similarity index 77% rename from api/src/application/use_cases/plugins/records.rs rename to api/crates/application/src/plugins/use_cases/records.rs index 01d2feda..473c7d81 100644 --- a/api/src/application/use_cases/plugins/records.rs +++ b/api/crates/application/src/plugins/use_cases/records.rs @@ -1,6 +1,7 @@ use uuid::Uuid; -use crate::application::ports::plugin_repository::{PluginRecord, PluginRepository}; +use crate::plugins::ports::plugin_repository::{PluginRecord, PluginRepository}; +use domain::plugins::scope::PluginRecordScope; pub struct ListPluginRecords<'a, R: PluginRepository + ?Sized> { pub repo: &'a R, @@ -10,7 +11,7 @@ impl<'a, R: PluginRepository + ?Sized> ListPluginRecords<'a, R> { pub async fn execute( &self, plugin: &str, - scope: &str, + scope: PluginRecordScope, scope_id: Uuid, kind: &str, limit: i64, @@ -19,6 +20,7 @@ impl<'a, R: PluginRepository + ?Sized> ListPluginRecords<'a, R> { self.repo .list_records(plugin, scope, scope_id, kind, limit, offset) .await + .map_err(Into::into) } } @@ -30,7 +32,7 @@ impl<'a, R: PluginRepository + ?Sized> CreatePluginRecord<'a, R> { pub async fn execute( &self, plugin: &str, - scope: &str, + scope: PluginRecordScope, scope_id: Uuid, kind: &str, data: &serde_json::Value, @@ -38,6 +40,7 @@ impl<'a, R: PluginRepository + ?Sized> CreatePluginRecord<'a, R> { self.repo .insert_record(plugin, scope, scope_id, kind, data) .await + .map_err(Into::into) } } @@ -51,7 +54,10 @@ impl<'a, R: PluginRepository + ?Sized> UpdatePluginRecord<'a, R> { record_id: Uuid, patch: &serde_json::Value, ) -> anyhow::Result> { - self.repo.update_record_data(record_id, patch).await + self.repo + .update_record_data(record_id, patch) + .await + .map_err(Into::into) } } @@ -61,7 +67,7 @@ pub struct DeletePluginRecord<'a, R: PluginRepository + ?Sized> { impl<'a, R: PluginRepository + ?Sized> DeletePluginRecord<'a, R> { pub async fn execute(&self, record_id: Uuid) -> anyhow::Result { - self.repo.delete_record(record_id).await + self.repo.delete_record(record_id).await.map_err(Into::into) } } @@ -71,6 +77,6 @@ pub struct GetPluginRecord<'a, R: PluginRepository + ?Sized> { impl<'a, R: PluginRepository + ?Sized> GetPluginRecord<'a, R> { pub async fn execute(&self, record_id: Uuid) -> anyhow::Result> { - self.repo.get_record(record_id).await + self.repo.get_record(record_id).await.map_err(Into::into) } } diff --git a/api/crates/application/src/workspaces/dtos/mod.rs b/api/crates/application/src/workspaces/dtos/mod.rs new file mode 100644 index 00000000..33573843 --- /dev/null +++ b/api/crates/application/src/workspaces/dtos/mod.rs @@ -0,0 +1 @@ +// Intentionally left empty for now. diff --git a/api/crates/application/src/workspaces/mod.rs b/api/crates/application/src/workspaces/mod.rs new file mode 100644 index 00000000..2e8e16cf --- /dev/null +++ b/api/crates/application/src/workspaces/mod.rs @@ -0,0 +1,4 @@ +pub mod dtos; +pub mod ports; +pub mod services; +pub mod use_cases; diff --git a/api/crates/application/src/workspaces/ports/mod.rs b/api/crates/application/src/workspaces/ports/mod.rs new file mode 100644 index 00000000..895c5f07 --- /dev/null +++ b/api/crates/application/src/workspaces/ports/mod.rs @@ -0,0 +1 @@ +pub mod workspace_repository; diff --git a/api/src/application/ports/workspace_repository.rs b/api/crates/application/src/workspaces/ports/workspace_repository.rs similarity index 62% rename from api/src/application/ports/workspace_repository.rs rename to api/crates/application/src/workspaces/ports/workspace_repository.rs index e2b2a5c6..c892c914 100644 --- a/api/src/application/ports/workspace_repository.rs +++ b/api/crates/application/src/workspaces/ports/workspace_repository.rs @@ -3,6 +3,10 @@ use async_trait::async_trait; use chrono::{DateTime, Utc}; use uuid::Uuid; +use crate::core::ports::errors::PortResult; +use domain::access::permissions::PermissionOverride; +use domain::workspaces::roles::{WorkspaceBaseRole, WorkspaceRoleKind, WorkspaceSystemRole}; + #[derive(Debug, Clone)] pub struct WorkspaceRow { pub id: Uuid, @@ -21,8 +25,8 @@ pub struct WorkspaceListItem { pub icon: Option, pub description: Option, pub is_personal: bool, - pub role_kind: String, - pub system_role: Option, + pub role_kind: WorkspaceRoleKind, + pub system_role: Option, pub custom_role_id: Option, pub is_default: bool, } @@ -31,8 +35,8 @@ pub struct WorkspaceListItem { pub struct WorkspaceMemberRow { pub workspace_id: Uuid, pub user_id: Uuid, - pub role_kind: String, - pub system_role: Option, + pub role_kind: WorkspaceRoleKind, + pub system_role: Option, pub custom_role_id: Option, pub is_default: bool, } @@ -41,8 +45,8 @@ pub struct WorkspaceMemberRow { pub struct WorkspaceMemberDetail { pub workspace_id: Uuid, pub user_id: Uuid, - pub role_kind: String, - pub system_role: Option, + pub role_kind: WorkspaceRoleKind, + pub system_role: Option, pub custom_role_id: Option, pub is_default: bool, pub user_email: String, @@ -53,11 +57,11 @@ pub struct WorkspaceMemberDetail { pub struct WorkspacePermissionRecord { pub workspace_id: Uuid, pub user_id: Uuid, - pub role_kind: String, - pub system_role: Option, + pub role_kind: WorkspaceRoleKind, + pub system_role: Option, pub custom_role_id: Option, - pub custom_base_role: Option, - pub overrides: Vec<(String, bool)>, + pub custom_base_role: Option, + pub overrides: Vec, } #[derive(Debug, Clone)] @@ -66,9 +70,9 @@ pub struct WorkspaceRoleRecord { pub workspace_id: Uuid, pub name: String, pub description: Option, - pub base_role: String, + pub base_role: WorkspaceBaseRole, pub priority: i32, - pub overrides: Vec<(String, bool)>, + pub overrides: Vec, } #[derive(Debug, Clone)] @@ -76,8 +80,8 @@ pub struct WorkspaceInvitationRecord { pub id: Uuid, pub workspace_id: Uuid, pub email: String, - pub role_kind: String, - pub system_role: Option, + pub role_kind: WorkspaceRoleKind, + pub system_role: Option, pub custom_role_id: Option, pub invited_by: Uuid, pub token: String, @@ -96,7 +100,7 @@ pub enum WorkspaceSetDefaultError { #[async_trait] pub trait WorkspaceRepository: Send + Sync { - async fn list_for_user(&self, user_id: Uuid) -> anyhow::Result>; + async fn list_for_user(&self, user_id: Uuid) -> PortResult>; async fn create_workspace( &self, creator_id: Uuid, @@ -105,8 +109,9 @@ pub trait WorkspaceRepository: Send + Sync { icon: Option<&str>, description: Option<&str>, is_personal: bool, - ) -> anyhow::Result; - async fn get_workspace(&self, workspace_id: Uuid) -> anyhow::Result>; + ) -> PortResult; + async fn get_workspace(&self, workspace_id: Uuid) -> PortResult>; + #[allow(clippy::too_many_arguments)] async fn create_workspace_with_id( &self, workspace_id: Uuid, @@ -116,120 +121,122 @@ pub trait WorkspaceRepository: Send + Sync { icon: Option<&str>, description: Option<&str>, is_personal: bool, - ) -> anyhow::Result; + ) -> PortResult; async fn add_member( &self, workspace_id: Uuid, user_id: Uuid, - role_kind: &str, - system_role: Option<&str>, + role_kind: WorkspaceRoleKind, + system_role: Option, custom_role_id: Option, - ) -> anyhow::Result; + ) -> PortResult; async fn set_default_workspace( &self, user_id: Uuid, workspace_id: Uuid, ) -> Result; - async fn list_members(&self, workspace_id: Uuid) -> anyhow::Result>; + async fn list_members(&self, workspace_id: Uuid) -> PortResult>; async fn get_member_detail( &self, workspace_id: Uuid, user_id: Uuid, - ) -> anyhow::Result>; + ) -> PortResult>; async fn update_member_role( &self, workspace_id: Uuid, user_id: Uuid, - role_kind: &str, - system_role: Option<&str>, + role_kind: WorkspaceRoleKind, + system_role: Option, custom_role_id: Option, - ) -> anyhow::Result; + ) -> PortResult; async fn get_member_with_permissions( &self, workspace_id: Uuid, user_id: Uuid, - ) -> anyhow::Result>; + ) -> PortResult>; async fn count_system_role_members( &self, workspace_id: Uuid, - system_role: &str, - ) -> anyhow::Result; + system_role: WorkspaceSystemRole, + ) -> PortResult; - async fn list_roles(&self, workspace_id: Uuid) -> anyhow::Result>; + async fn list_roles(&self, workspace_id: Uuid) -> PortResult>; async fn create_role( &self, workspace_id: Uuid, name: &str, - base_role: &str, + base_role: WorkspaceBaseRole, description: Option<&str>, priority: i32, - overrides: &[(String, bool)], - ) -> anyhow::Result; + overrides: &[PermissionOverride], + ) -> PortResult; + #[allow(clippy::too_many_arguments)] async fn update_role( &self, workspace_id: Uuid, role_id: Uuid, name: Option<&str>, - base_role: Option<&str>, + base_role: Option, description: Option<&str>, priority: Option, - overrides: Option<&[(String, bool)]>, - ) -> anyhow::Result; + overrides: Option<&[PermissionOverride]>, + ) -> PortResult; - async fn delete_role(&self, workspace_id: Uuid, role_id: Uuid) -> anyhow::Result; - async fn delete_workspace(&self, workspace_id: Uuid) -> anyhow::Result; + async fn delete_role(&self, workspace_id: Uuid, role_id: Uuid) -> PortResult; + async fn delete_workspace(&self, workspace_id: Uuid) -> PortResult; async fn get_role( &self, workspace_id: Uuid, role_id: Uuid, - ) -> anyhow::Result>; + ) -> PortResult>; - async fn delete_member(&self, workspace_id: Uuid, user_id: Uuid) -> anyhow::Result; + async fn delete_member(&self, workspace_id: Uuid, user_id: Uuid) -> PortResult; async fn update_workspace( &self, workspace_id: Uuid, name: Option<&str>, icon: Option<&str>, description: Option<&str>, - ) -> anyhow::Result>; + ) -> PortResult>; + #[allow(clippy::too_many_arguments)] async fn create_invitation( &self, workspace_id: Uuid, email: &str, - role_kind: &str, - system_role: Option<&str>, + role_kind: WorkspaceRoleKind, + system_role: Option, custom_role_id: Option, invited_by: Uuid, token: &str, expires_at: Option>, - ) -> anyhow::Result; + ) -> PortResult; async fn list_invitations( &self, workspace_id: Uuid, - ) -> anyhow::Result>; + ) -> PortResult>; async fn accept_invitation( &self, token: &str, user_id: Uuid, user_email: &str, - ) -> anyhow::Result; + ) -> PortResult; async fn revoke_invitation( &self, workspace_id: Uuid, invitation_id: Uuid, - ) -> anyhow::Result>; + ) -> PortResult>; - async fn list_all_workspace_ids(&self) -> anyhow::Result>; + async fn list_all_workspace_ids(&self) -> PortResult>; } diff --git a/api/src/application/services/workspaces/mod.rs b/api/crates/application/src/workspaces/services/mod.rs similarity index 54% rename from api/src/application/services/workspaces/mod.rs rename to api/crates/application/src/workspaces/services/mod.rs index 32baf9d0..d52b15cc 100644 --- a/api/src/application/services/workspaces/mod.rs +++ b/api/crates/application/src/workspaces/services/mod.rs @@ -5,17 +5,18 @@ use async_trait::async_trait; use chrono::{DateTime, Utc}; use uuid::Uuid; -use crate::domain::workspaces::permissions::{ - PermissionSet, apply_custom_overrides, system_role_permissions, +use domain::access::permissions::{ + PermissionOverride, PermissionSet, apply_custom_overrides, system_role_permissions, }; +use domain::workspaces::roles::{WorkspaceBaseRole, WorkspaceRoleKind, WorkspaceSystemRole}; pub mod permission_snapshot; mod slug; -use crate::application::ports::workspace_repository::{ +use crate::core::services::errors::ServiceError; +use crate::workspaces::ports::workspace_repository::{ WorkspaceInvitationRecord, WorkspaceListItem, WorkspaceMemberDetail, WorkspaceMemberRow, WorkspaceRepository, WorkspaceRoleRecord, WorkspaceRow, WorkspaceSetDefaultError, }; -use crate::application::services::errors::ServiceError; #[async_trait] pub trait WorkspacePermissionResolver: Send + Sync { @@ -30,9 +31,361 @@ pub struct WorkspaceService { repo: Arc, } +#[async_trait] +pub trait WorkspaceServiceFacade: Send + Sync { + async fn list_for_user(&self, user_id: Uuid) -> Result, ServiceError>; + + async fn create_workspace( + &self, + creator_id: Uuid, + name: &str, + icon: Option<&str>, + description: Option<&str>, + ) -> Result; + + async fn create_personal_workspace_shell( + &self, + user_id: Uuid, + name: &str, + ) -> Result; + + async fn get_workspace(&self, workspace_id: Uuid) + -> Result, ServiceError>; + + async fn update_workspace( + &self, + workspace_id: Uuid, + name: Option<&str>, + icon: Option<&str>, + description: Option<&str>, + ) -> Result, ServiceError>; + + async fn delete_workspace(&self, workspace_id: Uuid) -> Result; + + async fn list_members( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError>; + + async fn remove_member( + &self, + workspace_id: Uuid, + member_id: Uuid, + requested_by: Option, + ) -> Result<(), ServiceError>; + + async fn leave_workspace(&self, workspace_id: Uuid, user_id: Uuid) -> Result<(), ServiceError>; + + async fn set_default_workspace( + &self, + user_id: Uuid, + workspace_id: Uuid, + ) -> Result; + + async fn resolve_permission_set( + &self, + workspace_id: Uuid, + user_id: Uuid, + ) -> Result, ServiceError>; + + async fn update_member_role( + &self, + workspace_id: Uuid, + user_id: Uuid, + requested_by: Uuid, + role_kind: WorkspaceRoleKind, + system_role: Option, + custom_role_id: Option, + ) -> Result; + + async fn list_roles( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError>; + + #[allow(clippy::too_many_arguments)] + async fn create_role( + &self, + workspace_id: Uuid, + requested_by: Uuid, + name: &str, + base_role: WorkspaceBaseRole, + description: Option<&str>, + priority: i32, + overrides: &[PermissionOverride], + ) -> Result; + + #[allow(clippy::too_many_arguments)] + async fn update_role( + &self, + workspace_id: Uuid, + requested_by: Uuid, + role_id: Uuid, + name: Option<&str>, + base_role: Option, + description: Option<&str>, + priority: Option, + overrides: Option<&[PermissionOverride]>, + ) -> Result; + + async fn delete_role(&self, workspace_id: Uuid, role_id: Uuid) -> Result; + + async fn list_invitations( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError>; + + #[allow(clippy::too_many_arguments)] + async fn create_invitation( + &self, + workspace_id: Uuid, + invited_by: Uuid, + email: &str, + role_kind: WorkspaceRoleKind, + system_role: Option, + custom_role_id: Option, + expires_at: Option>, + ) -> Result; + + async fn revoke_invitation( + &self, + workspace_id: Uuid, + invitation_id: Uuid, + ) -> Result; + + async fn accept_invitation( + &self, + token: &str, + user_id: Uuid, + user_email: &str, + ) -> Result; + + async fn ensure_owner_membership( + &self, + workspace_id: Uuid, + user_id: Uuid, + ) -> Result<(), ServiceError>; +} + +#[async_trait] +impl WorkspaceServiceFacade for WorkspaceService { + async fn list_for_user(&self, user_id: Uuid) -> Result, ServiceError> { + self.list_for_user(user_id).await + } + + async fn create_workspace( + &self, + creator_id: Uuid, + name: &str, + icon: Option<&str>, + description: Option<&str>, + ) -> Result { + self.create_workspace(creator_id, name, icon, description) + .await + } + + async fn create_personal_workspace_shell( + &self, + user_id: Uuid, + name: &str, + ) -> Result { + self.create_personal_workspace_shell(user_id, name).await + } + + async fn get_workspace( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError> { + self.get_workspace(workspace_id).await + } + + async fn update_workspace( + &self, + workspace_id: Uuid, + name: Option<&str>, + icon: Option<&str>, + description: Option<&str>, + ) -> Result, ServiceError> { + self.update_workspace(workspace_id, name, icon, description) + .await + } + + async fn delete_workspace(&self, workspace_id: Uuid) -> Result { + self.delete_workspace(workspace_id).await + } + + async fn list_members( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError> { + self.list_members(workspace_id).await + } + + async fn remove_member( + &self, + workspace_id: Uuid, + member_id: Uuid, + requested_by: Option, + ) -> Result<(), ServiceError> { + self.remove_member(workspace_id, member_id, requested_by) + .await + } + + async fn leave_workspace(&self, workspace_id: Uuid, user_id: Uuid) -> Result<(), ServiceError> { + self.leave_workspace(workspace_id, user_id).await + } + + async fn set_default_workspace( + &self, + user_id: Uuid, + workspace_id: Uuid, + ) -> Result { + self.set_default_workspace(user_id, workspace_id).await + } + + async fn resolve_permission_set( + &self, + workspace_id: Uuid, + user_id: Uuid, + ) -> Result, ServiceError> { + self.resolve_permission_set(workspace_id, user_id).await + } + + async fn update_member_role( + &self, + workspace_id: Uuid, + user_id: Uuid, + requested_by: Uuid, + role_kind: WorkspaceRoleKind, + system_role: Option, + custom_role_id: Option, + ) -> Result { + self.update_member_role( + workspace_id, + user_id, + requested_by, + role_kind, + system_role, + custom_role_id, + ) + .await + } + + async fn list_roles( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError> { + self.list_roles(workspace_id).await + } + + async fn create_role( + &self, + workspace_id: Uuid, + requested_by: Uuid, + name: &str, + base_role: WorkspaceBaseRole, + description: Option<&str>, + priority: i32, + overrides: &[PermissionOverride], + ) -> Result { + self.create_role( + workspace_id, + requested_by, + name, + base_role, + description, + priority, + overrides, + ) + .await + } + + async fn update_role( + &self, + workspace_id: Uuid, + requested_by: Uuid, + role_id: Uuid, + name: Option<&str>, + base_role: Option, + description: Option<&str>, + priority: Option, + overrides: Option<&[PermissionOverride]>, + ) -> Result { + self.update_role( + workspace_id, + requested_by, + role_id, + name, + base_role, + description, + priority, + overrides, + ) + .await + } + + async fn delete_role(&self, workspace_id: Uuid, role_id: Uuid) -> Result { + self.delete_role(workspace_id, role_id).await + } + + async fn list_invitations( + &self, + workspace_id: Uuid, + ) -> Result, ServiceError> { + self.list_invitations(workspace_id).await + } + + async fn create_invitation( + &self, + workspace_id: Uuid, + invited_by: Uuid, + email: &str, + role_kind: WorkspaceRoleKind, + system_role: Option, + custom_role_id: Option, + expires_at: Option>, + ) -> Result { + self.create_invitation( + workspace_id, + invited_by, + email, + role_kind, + system_role, + custom_role_id, + expires_at, + ) + .await + } + + async fn revoke_invitation( + &self, + workspace_id: Uuid, + invitation_id: Uuid, + ) -> Result { + self.revoke_invitation(workspace_id, invitation_id).await + } + + async fn accept_invitation( + &self, + token: &str, + user_id: Uuid, + user_email: &str, + ) -> Result { + self.accept_invitation(token, user_id, user_email).await + } + + async fn ensure_owner_membership( + &self, + workspace_id: Uuid, + user_id: Uuid, + ) -> Result<(), ServiceError> { + self.ensure_owner_membership(workspace_id, user_id).await + } +} + struct NormalizedRoleSelection { - role_kind: String, - system_role: Option, + role_kind: WorkspaceRoleKind, + system_role: Option, custom_role_id: Option, permissions: PermissionSet, } @@ -68,7 +421,13 @@ impl WorkspaceService { // Creator becomes owner in the new workspace (default selection handled separately) let _member = self .repo - .add_member(workspace.id, creator_id, "system", Some("owner"), None) + .add_member( + workspace.id, + creator_id, + WorkspaceRoleKind::System, + Some(WorkspaceSystemRole::Owner), + None, + ) .await .map_err(ServiceError::from)?; Ok(workspace) @@ -102,7 +461,13 @@ impl WorkspaceService { user_id: Uuid, ) -> Result<(), ServiceError> { self.repo - .add_member(workspace_id, user_id, "system", Some("owner"), None) + .add_member( + workspace_id, + user_id, + WorkspaceRoleKind::System, + Some(WorkspaceSystemRole::Owner), + None, + ) .await .map_err(ServiceError::from)?; self.repo @@ -132,13 +497,14 @@ impl WorkspaceService { .map_err(ServiceError::from) } + #[allow(clippy::too_many_arguments)] pub async fn create_invitation( &self, workspace_id: Uuid, invited_by: Uuid, email: &str, - role_kind: &str, - system_role: Option<&str>, + role_kind: WorkspaceRoleKind, + system_role: Option, custom_role_id: Option, expires_at: Option>, ) -> Result { @@ -159,8 +525,8 @@ impl WorkspaceService { .create_invitation( workspace_id, &normalized_email, - &selection.role_kind, - selection.system_role.as_deref(), + selection.role_kind, + selection.system_role, selection.custom_role_id, invited_by, &token, @@ -242,16 +608,15 @@ impl WorkspaceService { let Some(record) = record else { return Ok(None); }; - let mut set = match record.role_kind.as_str() { - "system" => { - let role = record.system_role.as_deref().unwrap_or("viewer"); - system_role_permissions(role) + let mut set = match record.role_kind { + WorkspaceRoleKind::System => { + let role = record.system_role.unwrap_or(WorkspaceSystemRole::Viewer); + system_role_permissions(role.as_str()) } - "custom" => { - let base_role = record.custom_base_role.as_deref().unwrap_or("viewer"); - system_role_permissions(base_role) + WorkspaceRoleKind::Custom => { + let base_role = record.custom_base_role.unwrap_or(WorkspaceBaseRole::Viewer); + system_role_permissions(base_role.as_str()) } - _ => system_role_permissions("viewer"), }; if !record.overrides.is_empty() { set = apply_custom_overrides(set, record.overrides.clone()); @@ -307,12 +672,12 @@ impl WorkspaceService { if member.workspace_id == member.user_id { return Err(ServiceError::BadRequest("cannot_remove_owner")); } - let removing_owner = - member.role_kind == "system" && member.system_role.as_deref() == Some("owner"); + let removing_owner = member.role_kind == WorkspaceRoleKind::System + && member.system_role == Some(WorkspaceSystemRole::Owner); if removing_owner { let owner_count = self .repo - .count_system_role_members(workspace_id, "owner") + .count_system_role_members(workspace_id, WorkspaceSystemRole::Owner) .await .map_err(ServiceError::from)?; if owner_count <= 1 { @@ -343,8 +708,8 @@ impl WorkspaceService { workspace_id: Uuid, user_id: Uuid, requested_by: Uuid, - role_kind: &str, - system_role: Option<&str>, + role_kind: WorkspaceRoleKind, + system_role: Option, custom_role_id: Option, ) -> Result { let actor_permissions = self @@ -359,8 +724,8 @@ impl WorkspaceService { .update_member_role( workspace_id, user_id, - &selection.role_kind, - selection.system_role.as_deref(), + selection.role_kind, + selection.system_role, selection.custom_role_id, ) .await @@ -377,29 +742,28 @@ impl WorkspaceService { .map_err(ServiceError::from) } + #[allow(clippy::too_many_arguments)] pub async fn create_role( &self, workspace_id: Uuid, requested_by: Uuid, name: &str, - base_role: &str, + base_role: WorkspaceBaseRole, description: Option<&str>, priority: i32, - overrides: &[(String, bool)], + overrides: &[PermissionOverride], ) -> Result { - let normalized_base_role = base_role.trim(); let actor_permissions = self .resolve_permission_set(workspace_id, requested_by) .await? .ok_or(ServiceError::Forbidden)?; - let role_permissions = - Self::permission_set_from_definition(normalized_base_role, overrides)?; + let role_permissions = Self::permission_set_from_definition(base_role, overrides)?; Self::ensure_role_grant_allowed(&actor_permissions, &role_permissions)?; self.repo .create_role( workspace_id, name, - normalized_base_role, + base_role, description, priority, overrides, @@ -408,16 +772,17 @@ impl WorkspaceService { .map_err(ServiceError::from) } + #[allow(clippy::too_many_arguments)] pub async fn update_role( &self, workspace_id: Uuid, requested_by: Uuid, role_id: Uuid, name: Option<&str>, - base_role: Option<&str>, + base_role: Option, description: Option<&str>, priority: Option, - overrides: Option<&[(String, bool)]>, + overrides: Option<&[PermissionOverride]>, ) -> Result { let actor_permissions = self .resolve_permission_set(workspace_id, requested_by) @@ -429,10 +794,7 @@ impl WorkspaceService { .await .map_err(ServiceError::from)? .ok_or(ServiceError::NotFound)?; - let owned_base_role = base_role.map(|value| value.trim().to_string()); - let effective_base_role = owned_base_role - .as_deref() - .unwrap_or(existing.base_role.as_str()); + let effective_base_role = base_role.unwrap_or(existing.base_role); let effective_overrides = overrides .map(|items| items.to_vec()) .unwrap_or_else(|| existing.overrides.clone()); @@ -444,7 +806,7 @@ impl WorkspaceService { workspace_id, role_id, name, - owned_base_role.as_deref(), + base_role, description, priority, overrides, @@ -485,26 +847,29 @@ impl WorkspaceService { async fn resolve_role_selection( &self, workspace_id: Uuid, - role_kind: &str, - system_role: Option<&str>, + role_kind: WorkspaceRoleKind, + system_role: Option, custom_role_id: Option, ) -> Result { match role_kind { - "system" => { + WorkspaceRoleKind::System => { + if custom_role_id.is_some() { + return Err(ServiceError::BadRequest("unexpected_custom_role")); + } let Some(role) = system_role else { return Err(ServiceError::BadRequest("missing_system_role")); }; - if !matches!(role, "owner" | "admin" | "editor" | "viewer") { - return Err(ServiceError::BadRequest("invalid_system_role")); - } Ok(NormalizedRoleSelection { - role_kind: "system".to_string(), - system_role: Some(role.to_string()), + role_kind: WorkspaceRoleKind::System, + system_role: Some(role), custom_role_id: None, - permissions: system_role_permissions(role), + permissions: system_role_permissions(role.as_str()), }) } - "custom" => { + WorkspaceRoleKind::Custom => { + if system_role.is_some() { + return Err(ServiceError::BadRequest("unexpected_system_role")); + } let Some(role_id) = custom_role_id else { return Err(ServiceError::BadRequest("missing_custom_role")); }; @@ -521,33 +886,25 @@ impl WorkspaceService { permissions = apply_custom_overrides(permissions, record.overrides.clone()); } Ok(NormalizedRoleSelection { - role_kind: "custom".to_string(), + role_kind: WorkspaceRoleKind::Custom, system_role: None, custom_role_id: Some(role_id), permissions, }) } - _ => Err(ServiceError::BadRequest("invalid_role_kind")), } } fn permission_set_from_definition( - base_role: &str, - overrides: &[(String, bool)], + base_role: WorkspaceBaseRole, + overrides: &[PermissionOverride], ) -> Result { - if !Self::is_valid_base_role(base_role) { - return Err(ServiceError::BadRequest("invalid_base_role")); - } - let mut permissions = system_role_permissions(base_role); + let mut permissions = system_role_permissions(base_role.as_str()); if !overrides.is_empty() { permissions = apply_custom_overrides(permissions, overrides.to_vec()); } Ok(permissions) } - - fn is_valid_base_role(base_role: &str) -> bool { - matches!(base_role, "viewer" | "editor" | "admin") - } } #[async_trait] diff --git a/api/src/application/services/workspaces/permission_snapshot.rs b/api/crates/application/src/workspaces/services/permission_snapshot.rs similarity index 96% rename from api/src/application/services/workspaces/permission_snapshot.rs rename to api/crates/application/src/workspaces/services/permission_snapshot.rs index 4774c9c7..6ac23589 100644 --- a/api/src/application/services/workspaces/permission_snapshot.rs +++ b/api/crates/application/src/workspaces/services/permission_snapshot.rs @@ -3,7 +3,7 @@ use uuid::Uuid; use tracing::warn; -use crate::domain::workspaces::permissions::PermissionSet; +use domain::access::permissions::PermissionSet; pub fn permission_set_from_snapshot(snapshot: &[String]) -> PermissionSet { permission_set_from_snapshot_or_else(snapshot, PermissionSet::default) diff --git a/api/src/application/services/workspaces/slug.rs b/api/crates/application/src/workspaces/services/slug.rs similarity index 100% rename from api/src/application/services/workspaces/slug.rs rename to api/crates/application/src/workspaces/services/slug.rs diff --git a/api/crates/application/src/workspaces/use_cases/mod.rs b/api/crates/application/src/workspaces/use_cases/mod.rs new file mode 100644 index 00000000..33573843 --- /dev/null +++ b/api/crates/application/src/workspaces/use_cases/mod.rs @@ -0,0 +1 @@ +// Intentionally left empty for now. diff --git a/api/crates/bootstrap/Cargo.toml b/api/crates/bootstrap/Cargo.toml new file mode 100644 index 00000000..946068f6 --- /dev/null +++ b/api/crates/bootstrap/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "bootstrap" +version = "0.1.0" +edition = "2024" + +[dependencies] +application = { path = "../application" } +domain = { path = "../domain" } +infrastructure = { path = "../infrastructure" } +presentation = { path = "../presentation" } + +anyhow = "1" +axum = { version = "0.7", features = ["macros", "json", "multipart", "ws"] } +chrono = { version = "0.4", features = ["serde", "clock"] } +dotenvy = "0.15" +futures-util = { version = "0.3", features = ["sink"] } +http = "1" +once_cell = "1" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +serde_yaml = "0.9" +sqlx = { version = "0.8", features = ["runtime-tokio", "postgres", "uuid", "chrono", "macros"] } +tokio = { version = "1.46", features = ["rt-multi-thread", "macros", "signal", "process"] } +tokio-stream = { version = "0.1", features = ["sync"] } +tower-http = { version = "0.6", features = ["cors", "trace", "fs"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] } +utoipa = { version = "4", features = ["axum_extras", "chrono", "uuid"] } +utoipa-swagger-ui = { version = "7", features = ["axum"] } +uuid = { version = "1", features = ["v4", "serde"] } diff --git a/api/crates/bootstrap/src/app/build_runtime.rs b/api/crates/bootstrap/src/app/build_runtime.rs new file mode 100644 index 00000000..167036f5 --- /dev/null +++ b/api/crates/bootstrap/src/app/build_runtime.rs @@ -0,0 +1,493 @@ +use std::sync::Arc; + +use tokio::time::Duration; +use tracing::info; + +use application::core::ports::storage::storage_ingest_queue::StorageIngestQueue; +use application::core::ports::storage::storage_projection_queue::StorageProjectionQueue; +use application::core::ports::storage::storage_reconcile_jobs::StorageReconcileJobs; +use application::core::services::authorization::AuthorizationService; +use application::core::services::doc_events::{ + DocEventSubscriber, FanoutDocEventSubscriber, LoggingDocEventSubscriber, +}; +use application::core::services::health::HealthService; +use application::core::services::markdown_render::MarkdownRenderService; +use application::core::services::metrics::MetricsRegistry; +use application::core::services::storage::ingest::StorageIngestService; +use application::core::services::storage::ingest_enqueue::StorageIngestEnqueueService; +use application::core::services::storage::reconcile::StorageReconcileService; +use application::core::services::storage::reconcile_scheduler::StorageReconcileScheduler; +use application::documents::ports::doc_event_log::DocEventLog; +use application::documents::services::DocumentService; +use application::documents::services::files::FileService; +use application::documents::services::publishing::PublicService; +use application::documents::services::realtime::snapshot::MarkdownExportProvider; +use application::documents::services::sharing::ShareService; +use application::documents::services::tagging::TagService; +use application::identity::ports::secret_hasher::SecretHasher; +use application::identity::services::api_tokens::ApiTokenService; +use application::identity::services::auth::account::AccountService; +use application::identity::services::auth::token_validation::TokenValidationService; +use application::identity::services::user_shortcuts::UserShortcutService; +use application::plugins::ports::plugin_event_publisher::PluginEventPublisher; +use application::plugins::ports::plugin_event_subscriber::PluginEventSubscriber; +use application::plugins::services::asset_signer::AssetSigner; +use application::plugins::services::data::PluginDataService; +use application::plugins::services::execution::PluginExecutionService; +use application::plugins::services::management::PluginManagementService; +use application::plugins::services::permissions::PluginPermissionService; +use application::workspaces::services::{WorkspacePermissionResolver, WorkspaceService}; +use infrastructure::core::storage::{ + FsIngestWatcher, PgStorageIngestQueue, PgStorageReconcileJobs, StorageConsistencyMonitor, + StorageIngestWorker, StorageProjectionWorker, +}; +use infrastructure::documents::doc_event_log::PgDocEventLog; +use infrastructure::documents::event_poller::DocEventPoller; +use infrastructure::documents::exporter::DefaultDocumentExporter; +use infrastructure::documents::git_dirty_subscriber::GitDirtyDocEventSubscriber; +use infrastructure::identity::crypto::Argon2SecretHasher; +use presentation::context::{ + AppContext, AppServices, AppServicesDeps, CoreServicesDeps, DocumentServicesDeps, + GitServicesDeps, IdentityServicesDeps, PluginServicesDeps, PresentationConfig, + WorkspaceServicesDeps, +}; + +use crate::app::AppRuntime; +use crate::config::{Config, StorageBackend}; +use crate::jobs::{self, Jobs}; +use crate::{auth, git, plugins, realtime}; + +pub async fn build_runtime( + cfg: Config, + spawn_background_tasks: bool, +) -> anyhow::Result { + info!(?cfg, "Starting RefMD backend"); + + // Database + let pool = infrastructure::core::db::connect_pool(&cfg.database_url).await?; + infrastructure::core::db::migrate(&pool).await?; + + let secret_hasher: Arc = Arc::new(Argon2SecretHasher); + + let asset_signer = Arc::new(AssetSigner::new(&cfg.plugin_asset_sign_key)); + let uploads_root = std::path::PathBuf::from(&cfg.storage_root); + let (storage_resolver, storage_projection, reconcile_backend, reconcile_ingest_known_paths) = + crate::storage::build_storage_ports(&cfg, &pool).await?; + + let storage_job_queue: Arc = + crate::storage::build_storage_projection_queue(&pool); + let storage_ingest_queue: Arc = + Arc::new(PgStorageIngestQueue::new(pool.clone())); + let storage_ingest_enqueuer: Arc = Arc::new( + StorageIngestEnqueueService::new(storage_ingest_queue.clone()), + ); + let mut jobs = Jobs::new(); + + if cfg.storage_monitor_enabled { + let monitor = Arc::new(StorageConsistencyMonitor::new( + pool.clone(), + storage_resolver.clone(), + storage_job_queue.clone(), + storage_ingest_queue.clone(), + Duration::from_secs(cfg.storage_monitor_interval_secs), + cfg.storage_monitor_batch_size, + )); + tracing::info!( + interval_secs = cfg.storage_monitor_interval_secs, + batch_size = cfg.storage_monitor_batch_size, + "storage_consistency_monitor_enabled" + ); + jobs::spawn_storage_consistency_monitor(&mut jobs, true, spawn_background_tasks, monitor); + } else { + tracing::info!("storage_consistency_monitor_disabled"); + } + + let snapshot_archive_repo: Arc< + dyn application::documents::ports::document_snapshot_archive_repository::DocumentSnapshotArchiveRepository, + > = Arc::new( + infrastructure::documents::db::repositories::document_snapshot_archive_repository_sqlx::SqlxDocumentSnapshotArchiveRepository::new( + pool.clone(), + ), + ); + let document_repo = Arc::new( + infrastructure::documents::db::repositories::document_repository_sqlx::SqlxDocumentRepository::new( + pool.clone(), + ), + ); + let linkgraph_repo: Arc = + Arc::new( + infrastructure::documents::db::repositories::linkgraph_repository_sqlx::SqlxLinkGraphRepository::new( + pool.clone(), + ), + ); + let doc_event_log: Arc = Arc::new(PgDocEventLog::new(pool.clone())); + let metrics = Arc::new(MetricsRegistry::default()); + let storage_reconcile_jobs: Arc = + Arc::new(PgStorageReconcileJobs::new(pool.clone())); + let logging_subscriber: Arc = LoggingDocEventSubscriber::new(); + let git_dirty_subscriber: Arc = + GitDirtyDocEventSubscriber::new(pool.clone()); + let doc_event_subscriber: Arc = + FanoutDocEventSubscriber::new(vec![logging_subscriber.clone(), git_dirty_subscriber]); + if matches!(cfg.storage_backend, StorageBackend::Filesystem) { + use domain::storage::ingest_backend::StorageIngestBackend; + let watcher = Arc::new(FsIngestWatcher::new( + uploads_root.clone(), + storage_ingest_queue.clone(), + StorageIngestBackend::FsWatcher, + )); + jobs::spawn_fs_ingest_watcher(&mut jobs, spawn_background_tasks, watcher); + } + { + let poller = Arc::new(DocEventPoller::new( + pool.clone(), + doc_event_subscriber.clone(), + Duration::from_millis(500), + 200, + "doc_event_poller", + )); + jobs::spawn_doc_event_poller(&mut jobs, spawn_background_tasks, poller); + } + let shares_repo_impl = Arc::new( + infrastructure::documents::db::repositories::shares_repository_sqlx::SqlxSharesRepository::new( + pool.clone(), + ), + ); + let share_service = Arc::new(ShareService::new(shares_repo_impl.clone())); + let access_repo = Arc::new( + infrastructure::documents::db::repositories::access_repository_sqlx::SqlxAccessRepository::new( + pool.clone(), + ), + ); + let authorization_service = Arc::new(AuthorizationService::new( + access_repo.clone(), + shares_repo_impl.clone(), + )); + let files_repo = Arc::new( + infrastructure::documents::db::repositories::files_repository_sqlx::SqlxFilesRepository::new( + pool.clone(), + ), + ); + let documents_tx_runner: Arc = + Arc::new( + infrastructure::documents::tx_runner_sqlx::SqlxDocumentsTxRunner::new( + pool.clone(), + document_repo.clone(), + files_repo.clone(), + ), + ); + let public_repo = Arc::new( + infrastructure::documents::db::repositories::public_repository_sqlx::SqlxPublicRepository::new( + pool.clone(), + ), + ); + let user_repo = Arc::new( + infrastructure::identity::db::repositories::user_repository_sqlx::SqlxUserRepository::new( + pool.clone(), + ), + ); + let workspace_repo = Arc::new( + infrastructure::workspaces::db::repositories::workspace_repository_sqlx::SqlxWorkspaceRepository::new( + pool.clone(), + ), + ); + let workspace_service = Arc::new(WorkspaceService::new(workspace_repo.clone())); + let workspace_permissions: Arc = workspace_service.clone(); + { + let reconcile_service = Arc::new(StorageReconcileService::new( + storage_reconcile_jobs.clone(), + document_repo.clone(), + files_repo.clone(), + storage_ingest_queue.clone(), + storage_job_queue.clone(), + reconcile_backend.clone(), + reconcile_ingest_known_paths, + )); + jobs::spawn_storage_reconcile_worker(&mut jobs, spawn_background_tasks, reconcile_service); + let scheduler = + StorageReconcileScheduler::new(storage_reconcile_jobs.clone(), workspace_repo.clone()); + jobs::spawn_storage_reconcile_scheduler( + &mut jobs, + spawn_background_tasks, + scheduler, + Duration::from_secs(60 * 60), + ); + } + let tag_repo = Arc::new( + infrastructure::documents::db::repositories::tag_repository_sqlx::SqlxTagRepository::new( + pool.clone(), + ), + ); + let tag_service = Arc::new(TagService::new(tag_repo.clone())); + let api_token_repo = Arc::new( + infrastructure::identity::db::repositories::api_token_repository_sqlx::SqlxApiTokenRepository::new( + pool.clone(), + ), + ); + let api_token_service = Arc::new(ApiTokenService::new( + api_token_repo.clone(), + secret_hasher.clone(), + )); + let token_validation_service = Arc::new(TokenValidationService::new( + api_token_repo.clone(), + secret_hasher.clone(), + )); + let user_session_repo = Arc::new( + infrastructure::identity::db::repositories::user_session_repository_sqlx::SqlxUserSessionRepository::new( + pool.clone(), + ), + ); + let auth_stack = auth::build_auth_stack( + &cfg, + token_validation_service, + secret_hasher.clone(), + user_session_repo.clone(), + ) + .await?; + + jobs::spawn_session_cleanup( + &mut jobs, + spawn_background_tasks, + user_session_repo.clone(), + jobs::SESSION_CLEANUP_INTERVAL_SECS, + jobs::SESSION_CLEANUP_BATCH_SIZE, + ); + let user_shortcuts = Arc::new( + infrastructure::identity::db::repositories::user_shortcut_repository_sqlx::SqlxUserShortcutRepository::new( + pool.clone(), + ), + ); + let user_shortcut_service = + Arc::new(UserShortcutService::new(user_shortcuts.clone(), 32 * 1024)); + let realtime_stack = realtime::build_realtime_stack( + &cfg, + &pool, + storage_resolver.clone(), + storage_job_queue.clone(), + snapshot_archive_repo.clone(), + ) + .await?; + let local_hub = realtime_stack.local_hub.clone(); + let realtime_engine = realtime_stack.engine.clone(); + let snapshot_service_arc = realtime_stack.snapshot_service.clone(); + + let recent_projection_cache = Arc::new( + application::core::services::storage::projection_cache::RecentProjectionCache::new( + Duration::from_secs(5), + ), + ); + + { + let markdown_exporter: Arc = snapshot_service_arc.clone(); + let worker = Arc::new(StorageProjectionWorker::new( + storage_job_queue.clone(), + storage_projection.clone(), + storage_resolver.clone(), + markdown_exporter, + doc_event_log.clone(), + metrics.clone(), + workspace_permissions.clone(), + recent_projection_cache.clone(), + )); + jobs::spawn_storage_projection_worker(&mut jobs, spawn_background_tasks, worker); + } + + let crate::git::GitStack { + workspace: git_workspace, + service: git_service, + repo: git_repo, + rebuild, + rebuild_jobs: git_rebuild_jobs, + } = git::build_git_stack( + &cfg, + &pool, + storage_resolver.clone(), + snapshot_service_arc.clone(), + realtime_engine.clone(), + document_repo.clone(), + document_repo.clone(), + files_repo.clone(), + workspace_permissions.clone(), + metrics.clone(), + ) + .await?; + + jobs::spawn_git_rebuild_jobs(&mut jobs, spawn_background_tasks, rebuild); + let plugin_repo = Arc::new( + infrastructure::plugins::db::repositories::plugin_repository_sqlx::SqlxPluginRepository::new( + pool.clone(), + ), + ); + let plugin_data_service = Arc::new(PluginDataService::new(plugin_repo.clone())); + let plugin_installations = Arc::new( + infrastructure::plugins::db::repositories::plugin_installation_repository_sqlx::SqlxPluginInstallationRepository::new( + pool.clone(), + ), + ); + let plugin_limits = plugins::build_plugin_execution_limits(&cfg); + let (plugin_runtime, plugin_installer, plugin_assets, s3_plugin_store, plugin_fetcher) = + plugins::build_plugin_stack(&cfg, plugin_limits).await?; + let plugin_permission_service = Arc::new(PluginPermissionService::new(plugin_runtime.clone())); + let plugin_execution_service = Arc::new(PluginExecutionService::new( + plugin_repo.clone(), + document_repo.clone(), + plugin_runtime.clone(), + authorization_service.clone(), + )); + let account_service = Arc::new(AccountService::new( + user_repo.clone(), + secret_hasher.clone(), + document_repo.clone(), + files_repo.clone(), + plugin_installations.clone(), + plugin_repo.clone(), + plugin_assets.clone(), + git_repo.clone(), + git_workspace.clone(), + storage_job_queue.clone(), + workspace_service.clone(), + )); + let plugin_event_bus = Arc::new( + infrastructure::plugins::event_bus_pg::PgPluginEventBus::new(pool.clone(), "plugin_events"), + ); + if let Some(store) = &s3_plugin_store { + store.spawn_event_listener(plugin_event_bus.clone()); + + let installations = plugin_installations.clone(); + let assets = store.clone(); + jobs::spawn_plugin_prefetch(&mut jobs, spawn_background_tasks, installations, assets); + } + let plugin_event_publisher: Arc = plugin_event_bus.clone(); + let plugin_event_subscriber: Arc = plugin_event_bus.clone(); + + let document_exporter = Arc::new(DefaultDocumentExporter::new()); + + let document_service = Arc::new(DocumentService::new( + documents_tx_runner, + document_repo.clone(), + files_repo.clone(), + access_repo.clone(), + shares_repo_impl.clone(), + linkgraph_repo.clone(), + storage_resolver.clone(), + doc_event_log.clone(), + realtime_engine.clone(), + snapshot_service_arc.clone(), + document_exporter.clone(), + )); + + { + let handler = Arc::new(StorageIngestService::new( + document_repo.clone(), + document_repo.clone(), + files_repo.clone(), + realtime_engine.clone(), + storage_resolver.clone(), + storage_projection.clone(), + doc_event_log.clone(), + document_service.clone(), + workspace_permissions.clone(), + recent_projection_cache.clone(), + )); + let worker = Arc::new(StorageIngestWorker::new( + storage_ingest_queue.clone(), + handler, + metrics.clone(), + )); + jobs::spawn_storage_ingest_worker(&mut jobs, spawn_background_tasks, worker); + } + let file_service = Arc::new(FileService::new( + files_repo.clone(), + storage_resolver.clone(), + access_repo.clone(), + shares_repo_impl.clone(), + doc_event_log.clone(), + )); + let public_service = Arc::new(PublicService::new( + public_repo.clone(), + realtime_engine.clone(), + )); + let plugin_management_service = Arc::new(PluginManagementService::new( + plugin_installations.clone(), + plugin_assets.clone(), + plugin_event_publisher.clone(), + asset_signer.clone(), + cfg.plugin_asset_url_ttl_secs, + plugin_fetcher.clone(), + plugin_installer.clone(), + )); + let markdown_renderer = Arc::new(infrastructure::core::markdown::ComrakMarkdownRenderer::new()); + let markdown_render_service = Arc::new(MarkdownRenderService::new( + plugin_assets.clone(), + plugin_installations.clone(), + plugin_runtime.clone(), + markdown_renderer, + asset_signer.clone(), + cfg.plugin_asset_url_ttl_secs, + )); + + let health_probe = + infrastructure::core::health::db_probe::DatabaseHealthProbe::new(pool.clone()); + let health_service = Arc::new(HealthService::new(health_probe)); + + let external_auth_registry = auth_stack.external_auth.clone(); + + let services = AppServices::new(AppServicesDeps { + core: CoreServicesDeps { + authorization: authorization_service, + markdown_render_service: markdown_render_service.clone(), + storage_ingest_queue: storage_ingest_queue.clone(), + storage_ingest_enqueuer: storage_ingest_enqueuer.clone(), + health_service: health_service.clone(), + }, + documents: DocumentServicesDeps { + document_service: document_service.clone(), + share_service: share_service.clone(), + file_service: file_service.clone(), + public_service: public_service.clone(), + tag_service: tag_service.clone(), + realtime_engine: realtime_engine.clone(), + }, + git: GitServicesDeps { + git_service: git_service.clone(), + }, + identity: IdentityServicesDeps { + api_token_service: api_token_service.clone(), + user_shortcut_service: user_shortcut_service.clone(), + account_service: account_service.clone(), + auth_service: auth_stack.auth_service.clone(), + session_service: auth_stack.session_service.clone(), + external_auth: external_auth_registry.clone(), + }, + plugins: PluginServicesDeps { + plugin_execution_service: plugin_execution_service.clone(), + plugin_management_service: plugin_management_service.clone(), + plugin_permission_service: plugin_permission_service.clone(), + plugin_data_service: plugin_data_service.clone(), + plugin_event_subscriber, + }, + workspaces: WorkspaceServicesDeps { + workspace_service: workspace_service.clone(), + }, + }); + + let presentation_cfg = PresentationConfig { + frontend_url: cfg.frontend_url.clone(), + upload_max_bytes: cfg.upload_max_bytes, + public_base_url: cfg.public_base_url.clone(), + session_cookie_secure: auth_stack.cookie_secure, + }; + let ctx = AppContext::new(presentation_cfg, services, metrics.clone()); + + Ok(AppRuntime { + cfg, + pool, + ctx, + local_hub, + jobs, + storage_job_queue, + storage_reconcile_jobs, + git_rebuild_jobs, + plugin_assets, + }) +} diff --git a/api/crates/bootstrap/src/app/builder.rs b/api/crates/bootstrap/src/app/builder.rs new file mode 100644 index 00000000..bf3c26ff --- /dev/null +++ b/api/crates/bootstrap/src/app/builder.rs @@ -0,0 +1,39 @@ +use dotenvy::dotenv; + +use crate::{app, telemetry}; + +pub struct AppBuilder { + cfg: crate::config::Config, + spawn_background_tasks: bool, +} + +impl AppBuilder { + pub fn from_env() -> anyhow::Result { + dotenv().ok(); + + telemetry::init_tracing(); + + let cfg = crate::config::Config::from_env()?; + Ok(Self { + cfg, + spawn_background_tasks: true, + }) + } + + pub fn new(cfg: crate::config::Config) -> Self { + Self { + cfg, + spawn_background_tasks: true, + } + } + + /// Enable or disable background tasks (useful for CLI/tests). + pub fn with_background_tasks(mut self, enabled: bool) -> Self { + self.spawn_background_tasks = enabled; + self + } + + pub async fn build(self) -> anyhow::Result { + app::build_runtime(self.cfg, self.spawn_background_tasks).await + } +} diff --git a/api/crates/bootstrap/src/app/mod.rs b/api/crates/bootstrap/src/app/mod.rs new file mode 100644 index 00000000..7f929441 --- /dev/null +++ b/api/crates/bootstrap/src/app/mod.rs @@ -0,0 +1,38 @@ +mod build_runtime; +mod builder; +mod runtime; + +pub use builder::AppBuilder; + +use std::sync::Arc; + +use application::core::ports::storage::storage_projection_queue::StorageProjectionQueue; +use application::core::ports::storage::storage_reconcile_jobs::StorageReconcileJobs; +use application::git::ports::git_rebuild_job_queue::GitRebuildJobQueue; +use application::plugins::ports::plugin_asset_store::PluginAssetStore; +use infrastructure::core::db::PgPool; +use presentation::context::AppContext; + +pub struct AppRuntime { + cfg: crate::config::Config, + pool: PgPool, + ctx: AppContext, + local_hub: Option, + jobs: crate::jobs::Jobs, + storage_job_queue: Arc, + storage_reconcile_jobs: Arc, + git_rebuild_jobs: Arc, + plugin_assets: Arc, +} + +pub async fn run() -> anyhow::Result<()> { + AppBuilder::from_env()?.build().await?.serve().await +} + +/// Build the application runtime (infrastructure + services) without starting servers. +pub async fn build_runtime( + cfg: crate::config::Config, + spawn_background_tasks: bool, +) -> anyhow::Result { + build_runtime::build_runtime(cfg, spawn_background_tasks).await +} diff --git a/api/crates/bootstrap/src/app/runtime.rs b/api/crates/bootstrap/src/app/runtime.rs new file mode 100644 index 00000000..b4dc2709 --- /dev/null +++ b/api/crates/bootstrap/src/app/runtime.rs @@ -0,0 +1,88 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use tracing::{error, info}; + +use application::core::ports::storage::storage_projection_queue::StorageProjectionQueue; +use application::core::ports::storage::storage_reconcile_jobs::StorageReconcileJobs; +use application::git::ports::git_rebuild_job_queue::GitRebuildJobQueue; +use application::plugins::ports::plugin_asset_store::PluginAssetStore; +use infrastructure::core::db::PgPool; +use presentation::context::AppContext; + +use crate::jobs::{self, Jobs}; +use crate::{app::AppRuntime, http}; + +type AppRuntimeParts = ( + crate::config::Config, + PgPool, + AppContext, + Option, + Jobs, + Arc, + Arc, + Arc, + Arc, +); + +impl AppRuntime { + /// Consume the runtime and return owned parts for reuse. + pub fn into_parts(self) -> AppRuntimeParts { + ( + self.cfg, + self.pool, + self.ctx, + self.local_hub, + self.jobs, + self.storage_job_queue, + self.storage_reconcile_jobs, + self.git_rebuild_jobs, + self.plugin_assets, + ) + } + + pub async fn serve(self) -> anyhow::Result<()> { + let AppRuntime { + cfg, + ctx, + pool, + local_hub, + mut jobs, + storage_job_queue: _, + storage_reconcile_jobs: _, + git_rebuild_jobs: _, + plugin_assets: _, + } = self; + + let api_router = http::build_api_router(&cfg, ctx.clone()).await?; + + // Mount WS endpoint on the same port as HTTP + + // Compose final app for HTTP + let api_addr = SocketAddr::from(([0, 0, 0, 0], cfg.api_port)); + info!(%api_addr, "HTTP API listening"); + let listener = tokio::net::TcpListener::bind(api_addr).await?; + let ws_router = http::build_ws_router(ctx.clone()); + + let app = api_router.merge(ws_router); + + jobs::spawn_snapshot_loop( + &mut jobs, + true, + local_hub.clone(), + cfg.clone(), + pool.clone(), + ); + + let server = + axum::serve(listener, app).with_graceful_shutdown(jobs::wait_for_shutdown_signal()); + match server.await { + Ok(()) => {} + Err(e) => error!(?e, "API server failed"), + } + + // Abort background jobs on exit. + jobs.shutdown().await; + Ok(()) + } +} diff --git a/api/crates/bootstrap/src/auth.rs b/api/crates/bootstrap/src/auth.rs new file mode 100644 index 00000000..2f063e0d --- /dev/null +++ b/api/crates/bootstrap/src/auth.rs @@ -0,0 +1,115 @@ +use std::sync::Arc; + +use tracing::{info, warn}; + +use crate::config::Config; +use application::identity::ports::jwt_codec::JwtCodec; +use application::identity::ports::secret_hasher::SecretHasher; +use application::identity::services::auth::auth_service::AuthService; +use application::identity::services::auth::external::{ExternalAuthRegistry, ExternalAuthVerifier}; +use application::identity::services::auth::token_validation::TokenValidationService; +use application::identity::services::auth::user_sessions::UserSessionService; +use infrastructure::identity::auth::github::GithubOAuthProvider; +use infrastructure::identity::auth::google::GoogleIdentityProvider; +use infrastructure::identity::auth::oidc::{OidcIdentityProvider, OidcOAuthProviderConfig}; +use infrastructure::identity::jwt::Hs256JwtCodec; + +pub struct AuthStack { + pub auth_service: Arc, + pub session_service: Arc, + pub external_auth: Arc, + pub cookie_secure: bool, +} + +pub async fn build_auth_stack( + cfg: &Config, + token_validation_service: Arc, + secret_hasher: Arc, + user_session_repo: Arc< + dyn application::identity::ports::user_session_repository::UserSessionRepository, + >, +) -> anyhow::Result { + let external_auth = build_external_auth_registry(cfg).await?; + + let cookie_secure = cfg + .frontend_url + .as_deref() + .map(|u| u.starts_with("https://")) + .unwrap_or(false); + + let jwt: Arc = Arc::new(Hs256JwtCodec::new(cfg.jwt_secret_pem.clone())); + let auth_service = Arc::new(AuthService::new( + jwt, + token_validation_service, + cfg.jwt_expires_secs as usize, + )); + let session_service = Arc::new(UserSessionService::new( + user_session_repo, + secret_hasher, + auth_service.clone(), + cfg.session_refresh_ttl_secs, + cfg.session_refresh_remember_ttl_secs, + )); + + Ok(AuthStack { + auth_service, + session_service, + external_auth, + cookie_secure, + }) +} + +async fn build_external_auth_registry(cfg: &Config) -> anyhow::Result> { + let mut external_auth_providers: Vec> = Vec::new(); + + if let Some(google_cfg) = cfg.google_oauth.clone() { + match GoogleIdentityProvider::new(google_cfg.client_ids.clone()) { + Ok(provider) => { + info!("google_oauth_provider_enabled"); + external_auth_providers.push(Arc::new(provider)); + } + Err(err) => { + warn!(error = ?err, "google_oauth_provider_init_failed"); + } + } + } + + if let Some(github_cfg) = cfg.github_oauth.clone() { + match GithubOAuthProvider::new( + github_cfg.client_id.clone(), + github_cfg.client_secret.clone(), + github_cfg.redirect_uri.clone(), + ) { + Ok(provider) => { + info!("github_oauth_provider_enabled"); + external_auth_providers.push(Arc::new(provider)); + } + Err(err) => { + warn!(error = ?err, "github_oauth_provider_init_failed"); + } + } + } + + if let Some(oidc_cfg) = cfg.oidc_oauth.clone() { + let cfg = OidcOAuthProviderConfig { + issuer_url: oidc_cfg.issuer_url, + discovery_url: oidc_cfg.discovery_url, + client_id: oidc_cfg.client_id, + client_secret: oidc_cfg.client_secret, + redirect_uri: oidc_cfg.redirect_uri, + scopes: oidc_cfg.scopes, + display_name: oidc_cfg.display_name, + }; + match OidcIdentityProvider::discover(cfg).await { + Ok(provider) => { + info!("oidc_oauth_provider_enabled"); + external_auth_providers.push(Arc::new(provider)); + } + Err(err) => { + warn!(error = ?err, "oidc_oauth_provider_init_failed"); + } + } + } + + Ok(Arc::new(ExternalAuthRegistry::new(external_auth_providers))) +} diff --git a/api/src/bootstrap/config.rs b/api/crates/bootstrap/src/config.rs similarity index 74% rename from api/src/bootstrap/config.rs rename to api/crates/bootstrap/src/config.rs index b1a48547..ae5f70a7 100644 --- a/api/src/bootstrap/config.rs +++ b/api/crates/bootstrap/src/config.rs @@ -1,12 +1,13 @@ use std::env; +use std::fmt; use std::str::FromStr; fn env_var(keys: &[&str]) -> Option { for key in keys { - if let Ok(value) = env::var(key) { - if !value.trim().is_empty() { - return Some(value); - } + if let Ok(value) = env::var(key) + && !value.trim().is_empty() + { + return Some(value); } } None @@ -30,7 +31,7 @@ impl FromStr for StorageBackend { } } -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct Config { pub api_port: u16, pub frontend_url: Option, @@ -78,19 +79,19 @@ pub struct Config { pub oidc_oauth: Option, } -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct GoogleOAuthConfig { pub client_ids: Vec, } -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct GithubOAuthConfig { pub client_id: String, pub client_secret: String, pub redirect_uri: Option, } -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct OidcOAuthConfig { pub issuer_url: String, pub discovery_url: Option, @@ -101,6 +102,114 @@ pub struct OidcOAuthConfig { pub display_name: Option, } +fn redact() -> &'static str { + "" +} + +impl fmt::Debug for GoogleOAuthConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GoogleOAuthConfig") + .field("client_ids_count", &self.client_ids.len()) + .finish() + } +} + +impl fmt::Debug for GithubOAuthConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GithubOAuthConfig") + .field("client_id", &self.client_id) + .field("client_secret", &redact()) + .field("redirect_uri", &self.redirect_uri) + .finish() + } +} + +impl fmt::Debug for OidcOAuthConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OidcOAuthConfig") + .field("issuer_url", &self.issuer_url) + .field("discovery_url", &self.discovery_url) + .field("client_id", &self.client_id) + .field("client_secret", &redact()) + .field("redirect_uri", &self.redirect_uri) + .field("scopes", &self.scopes) + .field("display_name", &self.display_name) + .finish() + } +} + +impl fmt::Debug for Config { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Config") + .field("api_port", &self.api_port) + .field("frontend_url", &self.frontend_url) + .field("database_url", &redact()) + .field("jwt_secret_pem", &redact()) + .field("jwt_expires_secs", &self.jwt_expires_secs) + .field("session_refresh_ttl_secs", &self.session_refresh_ttl_secs) + .field( + "session_refresh_remember_ttl_secs", + &self.session_refresh_remember_ttl_secs, + ) + .field("snapshot_interval_secs", &self.snapshot_interval_secs) + .field("snapshot_keep_versions", &self.snapshot_keep_versions) + .field("updates_keep_window", &self.updates_keep_window) + .field("storage_backend", &self.storage_backend) + .field("storage_root", &self.storage_root) + .field("storage_monitor_enabled", &self.storage_monitor_enabled) + .field( + "storage_monitor_interval_secs", + &self.storage_monitor_interval_secs, + ) + .field( + "storage_monitor_batch_size", + &self.storage_monitor_batch_size, + ) + .field("s3_endpoint", &self.s3_endpoint) + .field("s3_bucket", &self.s3_bucket) + .field("s3_region", &self.s3_region) + .field( + "s3_access_key", + &self.s3_access_key.as_ref().map(|_| redact()), + ) + .field( + "s3_secret_key", + &self.s3_secret_key.as_ref().map(|_| redact()), + ) + .field("s3_use_path_style", &self.s3_use_path_style) + .field("plugin_dir", &self.plugin_dir) + .field("plugin_timeout_secs", &self.plugin_timeout_secs) + .field("plugin_memory_max_mb", &self.plugin_memory_max_mb) + .field("plugin_fuel_limit", &self.plugin_fuel_limit) + .field("plugin_asset_sign_key", &redact()) + .field("plugin_asset_url_ttl_secs", &self.plugin_asset_url_ttl_secs) + .field("encryption_key", &redact()) + .field("upload_max_bytes", &self.upload_max_bytes) + .field("public_base_url", &self.public_base_url) + .field("is_production", &self.is_production) + .field("cluster_mode", &self.cluster_mode) + .field("redis_url", &self.redis_url.as_ref().map(|_| redact())) + .field("redis_stream_prefix", &self.redis_stream_prefix) + .field( + "redis_min_message_lifetime_ms", + &self.redis_min_message_lifetime_ms, + ) + .field("redis_task_debounce_ms", &self.redis_task_debounce_ms) + .field("redis_awareness_ttl_ms", &self.redis_awareness_ttl_ms) + .field("redis_stream_max_len", &self.redis_stream_max_len) + .field( + "snapshot_archive_interval_secs", + &self.snapshot_archive_interval_secs, + ) + .field("git_rebuild_enabled", &self.git_rebuild_enabled) + .field("git_rebuild_interval_secs", &self.git_rebuild_interval_secs) + .field("google_oauth", &self.google_oauth) + .field("github_oauth", &self.github_oauth) + .field("oidc_oauth", &self.oidc_oauth) + .finish() + } +} + impl Config { pub fn from_env() -> anyhow::Result { let api_port = env_var(&["API_PORT", "PORT"]) @@ -275,11 +384,9 @@ impl Config { // Production hardening: require proper FRONTEND_URL and robust secrets if is_production { - if frontend_url + if !frontend_url .as_deref() - .map(|u| u.starts_with("http")) - .unwrap_or(false) - == false + .is_some_and(|u| u.starts_with("http")) { anyhow::bail!( "FRONTEND_URL must be set to a full origin in production (e.g., https://app.example.com)" diff --git a/api/crates/bootstrap/src/git.rs b/api/crates/bootstrap/src/git.rs new file mode 100644 index 00000000..229d224c --- /dev/null +++ b/api/crates/bootstrap/src/git.rs @@ -0,0 +1,144 @@ +use std::sync::Arc; + +use anyhow::Context; +use tracing::info; + +use crate::config::{Config, StorageBackend}; +use application::core::ports::storage::storage_port::StorageResolverPort; +use application::core::services::metrics::MetricsRegistry; +use application::documents::services::realtime::snapshot::SnapshotService; +use application::git::ports::git_rebuild_job_queue::GitRebuildJobQueue; +use application::git::services::GitService; +use application::git::services::rebuild::GitRebuildService; +use application::git::services::rebuild_scheduler::GitRebuildScheduler; +use application::workspaces::services::WorkspacePermissionResolver; +use infrastructure::core::db::PgPool; +use infrastructure::git::PgGitRebuildJobQueue; +use infrastructure::git::storage::{GitStorageDriverConfig, build_git_storage}; +use infrastructure::git::workspace::GitWorkspaceService; + +pub struct GitRebuildStack { + pub service: Arc, + pub scheduler: GitRebuildScheduler, + pub interval: std::time::Duration, +} + +pub struct GitStack { + pub workspace: Arc, + pub service: Arc, + pub repo: Arc, + pub rebuild: Option, + pub rebuild_jobs: Arc, +} + +pub fn git_storage_driver_config(cfg: &Config) -> anyhow::Result { + let uploads_root = std::path::PathBuf::from(&cfg.storage_root); + let config = match cfg.storage_backend { + StorageBackend::Filesystem => GitStorageDriverConfig::Filesystem { + root: uploads_root.clone(), + }, + StorageBackend::S3 => { + let s3_settings = infrastructure::git::storage::S3GitStorageConfig { + storage_root_prefix: cfg.storage_root.clone(), + bucket: cfg + .s3_bucket + .clone() + .context("S3_BUCKET must be configured when using S3 storage backend")?, + region: cfg.s3_region.clone(), + endpoint: cfg.s3_endpoint.clone(), + access_key: cfg.s3_access_key.clone(), + secret_key: cfg.s3_secret_key.clone(), + use_path_style: cfg.s3_use_path_style, + }; + GitStorageDriverConfig::S3(s3_settings) + } + }; + Ok(config) +} + +#[allow(clippy::too_many_arguments)] +pub async fn build_git_stack( + cfg: &Config, + pool: &PgPool, + storage_resolver: Arc, + snapshot_service: Arc, + realtime_engine: Arc< + dyn application::documents::ports::realtime::realtime_port::RealtimeEngine, + >, + document_repo: Arc, + document_paths: Arc< + dyn application::documents::ports::document_path_repository::DocumentPathRepository, + >, + files_repo: Arc, + workspace_permissions: Arc, + metrics: Arc, +) -> anyhow::Result { + let git_rebuild_jobs: Arc = + Arc::new(PgGitRebuildJobQueue::new(pool.clone())); + + let git_repo = Arc::new( + infrastructure::git::db::repositories::git_repository_sqlx::SqlxGitRepository::new( + pool.clone(), + cfg.encryption_key.clone(), + ), + ); + let git_pull_sessions = Arc::new( + infrastructure::git::db::repositories::git_pull_session_repository_sqlx::GitPullSessionRepositorySqlx::new( + pool.clone(), + ), + ); + let git_storage_cfg = git_storage_driver_config(cfg)?; + let git_storage = build_git_storage(git_storage_cfg).await?; + let gitignore_port = Arc::new(infrastructure::core::storage::gitignore::FsGitignorePort); + let git_workspace = Arc::new(GitWorkspaceService::new( + pool.clone(), + git_storage.clone(), + storage_resolver.clone(), + snapshot_service.clone(), + realtime_engine.clone(), + document_repo.clone(), + document_paths.clone(), + )?); + let git_service = Arc::new(GitService::new( + git_repo.clone(), + storage_resolver.clone(), + files_repo.clone(), + document_repo.clone(), + gitignore_port.clone(), + git_workspace.clone(), + git_pull_sessions.clone(), + )); + + let rebuild = if cfg.git_rebuild_enabled { + let rebuild_service = Arc::new(GitRebuildService::new( + git_rebuild_jobs.clone(), + git_workspace.clone(), + git_repo.clone(), + metrics.clone(), + workspace_permissions, + )); + let interval = std::time::Duration::from_secs(cfg.git_rebuild_interval_secs); + let rebuild_scheduler = GitRebuildScheduler::new( + git_rebuild_jobs.clone(), + git_repo.clone(), + git_workspace.clone(), + ); + info!("git_rebuild_scheduler_enabled"); + Some(GitRebuildStack { + service: rebuild_service, + scheduler: rebuild_scheduler, + interval, + }) + } else { + info!("git_rebuild_scheduler_disabled"); + None + }; + + Ok(GitStack { + workspace: git_workspace, + service: git_service, + repo: git_repo, + rebuild, + rebuild_jobs: git_rebuild_jobs, + }) +} diff --git a/api/crates/bootstrap/src/http.rs b/api/crates/bootstrap/src/http.rs new file mode 100644 index 00000000..2b682ae9 --- /dev/null +++ b/api/crates/bootstrap/src/http.rs @@ -0,0 +1,193 @@ +use axum::extract::FromRef; +use axum::extract::{DefaultBodyLimit, MatchedPath}; +use axum::{Router, middleware, routing::get}; +use http::HeaderValue; +use tower_http::cors::{AllowOrigin, CorsLayer}; +use tower_http::trace::TraceLayer; +use utoipa_swagger_ui::SwaggerUi; + +use crate::config::Config; +use presentation::context::{AppContext, IdentityContext}; +use presentation::openapi::ApiDoc; +use utoipa::OpenApi; + +pub async fn build_api_router(cfg: &Config, ctx: AppContext) -> anyhow::Result { + let cors = build_cors(cfg)?; + let identity_ctx = IdentityContext::from_ref(&ctx); + + // Ensure uploads dir exists even when using S3 backend (local staging is still required) + if let Err(e) = tokio::fs::create_dir_all(&cfg.storage_root).await { + tracing::warn!(error=?e, dir=%cfg.storage_root, "Failed to create uploads dir"); + } + + // Build upload router with state + let upload_router = Router::new() + .route( + "/*path", + get(presentation::http::documents::files::serve_upload), + ) + .with_state(ctx.clone()); + + // Build API router + let api_router = Router::new() + .nest( + "/api", + presentation::http::core::health::routes(ctx.clone()), + ) + .nest("/api", presentation::http::documents::routes(ctx.clone())) + .nest( + "/api/auth", + presentation::http::identity::auth::routes(ctx.clone()), + ) + .nest( + "/api", + presentation::http::documents::sharing::routes(ctx.clone()), + ) + .nest( + "/api", + presentation::http::documents::files::routes(ctx.clone()), + ) + .nest( + "/api", + presentation::http::documents::tagging::routes(ctx.clone()), + ) + .nest("/api", presentation::http::git::routes(ctx.clone())) + .nest( + "/api", + presentation::http::core::markdown::routes(ctx.clone()), + ) + .nest("/api", presentation::http::plugins::routes(ctx.clone())) + .nest( + "/api", + presentation::http::identity::api_tokens::routes(ctx.clone()), + ) + .nest( + "/api", + presentation::http::core::storage_ingest::routes(ctx.clone()), + ) + .nest("/api", presentation::http::workspaces::routes(ctx.clone())) + .nest( + "/api", + presentation::http::identity::shortcuts::routes(ctx.clone()), + ) + .nest( + "/api/public", + presentation::http::documents::publishing::routes(ctx.clone()), + ) + .merge(SwaggerUi::new("/api/docs").url("/api/openapi.json", ApiDoc::openapi())) + .layer(middleware::from_fn_with_state( + identity_ctx.clone(), + presentation::http::identity::auth::refresh_middleware, + )) + .layer(middleware::from_fn( + presentation::http::identity::auth::request_status::middleware, + )) + .layer(cors) + // Global body size limit for uploads (configurable) + .layer(DefaultBodyLimit::max(cfg.upload_max_bytes)) + .layer( + TraceLayer::new_for_http().make_span_with(|req: &http::Request<_>| { + let method = req.method().clone(); + let uri = req.uri().clone(); + let matched = req + .extensions() + .get::() + .map(|p| p.as_str().to_string()) + .unwrap_or_default(); + tracing::info_span!("http", %method, %uri, matched_path = %matched) + }), + ); + + let metrics_router = Router::new() + .route( + "/metrics", + get(presentation::http::core::metrics::metrics_handler), + ) + .with_state(ctx.clone()); + let api_router = api_router.merge(metrics_router); + + let api_router = api_router.nest("/api/uploads", upload_router); + + Ok(api_router) +} + +pub fn build_ws_router(ctx: AppContext) -> Router { + let identity_ctx = IdentityContext::from_ref(&ctx); + Router::new() + .route( + "/api/yjs/:id", + get(presentation::ws::documents::yjs::axum_ws_entry), + ) + .with_state(ctx.clone()) + .layer(middleware::from_fn_with_state( + identity_ctx.clone(), + presentation::http::identity::auth::refresh_middleware, + )) + .layer(middleware::from_fn( + presentation::http::identity::auth::request_status::middleware, + )) +} + +fn build_cors(cfg: &Config) -> anyhow::Result { + let frontend_origin = if let Some(origin) = cfg.frontend_url.clone() { + Some(HeaderValue::from_str(&origin).map_err(|_| { + anyhow::anyhow!("FRONTEND_URL must be a valid origin (e.g., https://app.example.com)") + })?) + } else { + None + }; + + let cors_allow_headers = [ + http::header::CONTENT_TYPE, + http::header::AUTHORIZATION, + http::header::HeaderName::from_static("x-workspace-id"), + ]; + let cors_expose_headers = [http::header::WWW_AUTHENTICATE]; + let cors = if let Some(origin) = frontend_origin.clone() { + CorsLayer::new() + .allow_origin(origin) + .allow_methods([ + http::Method::GET, + http::Method::POST, + http::Method::PUT, + http::Method::DELETE, + http::Method::PATCH, + http::Method::OPTIONS, + ]) + .allow_headers(cors_allow_headers.clone()) + .expose_headers(cors_expose_headers.clone()) + .allow_credentials(true) + } else if cfg.is_production { + // In production, FRONTEND_URL is mandatory (enforced earlier), but fallback defensively to deny all + CorsLayer::new() + .allow_origin(AllowOrigin::exact(HeaderValue::from_static( + "http://invalid", + ))) + .allow_methods([ + http::Method::GET, + http::Method::POST, + http::Method::PUT, + http::Method::DELETE, + http::Method::PATCH, + http::Method::OPTIONS, + ]) + .allow_headers(cors_allow_headers.clone()) + .expose_headers(cors_expose_headers.clone()) + } else { + // Development convenience + CorsLayer::new() + .allow_origin(AllowOrigin::mirror_request()) + .allow_methods([ + http::Method::GET, + http::Method::POST, + http::Method::PUT, + http::Method::DELETE, + http::Method::PATCH, + http::Method::OPTIONS, + ]) + .allow_headers(cors_allow_headers.clone()) + .expose_headers(cors_expose_headers.clone()) + .allow_credentials(true) + }; + Ok(cors) +} diff --git a/api/crates/bootstrap/src/jobs.rs b/api/crates/bootstrap/src/jobs.rs new file mode 100644 index 00000000..0b229fdc --- /dev/null +++ b/api/crates/bootstrap/src/jobs.rs @@ -0,0 +1,354 @@ +use std::panic::AssertUnwindSafe; +use std::sync::Arc; +use std::time::Duration; + +use chrono::Utc; +use futures_util::FutureExt; +use tokio::task::JoinHandle; +use tokio::time::sleep; +use tracing::{debug, error, info, warn}; + +use crate::config::Config; +use crate::git::GitRebuildStack; +use application::core::services::storage::reconcile::StorageReconcileService; +use application::core::services::storage::reconcile_scheduler::StorageReconcileScheduler; +use application::core::services::worker::WorkerTick; +use application::identity::ports::user_session_repository::UserSessionRepository; +use application::plugins::ports::plugin_asset_store::PluginAssetStore; +use application::plugins::ports::plugin_installation_repository::PluginInstallationRepository; +use domain::plugins::scope::PluginInstallationStatus; +use infrastructure::core::db::PgPool; +use infrastructure::core::db::advisory_lock::AdvisoryLock; +use infrastructure::core::storage::{ + FsIngestWatcher, StorageConsistencyMonitor, StorageIngestWorker, StorageProjectionWorker, +}; +use infrastructure::documents::event_poller::DocEventPoller; +use infrastructure::documents::realtime::Hub; +use infrastructure::plugins::s3_store::S3BackedPluginStore; + +/// Handle to a background task. +pub struct JobHandle { + pub name: &'static str, + pub handle: JoinHandle<()>, +} + +/// Small registry to keep track of spawned background jobs. +#[derive(Default)] +pub struct Jobs { + handles: Vec, +} + +impl Jobs { + pub fn new() -> Self { + Self::default() + } + + /// Spawn a background task and record its handle. + pub fn spawn(&mut self, name: &'static str, fut: F) + where + F: std::future::Future + Send + 'static, + { + let handle = tokio::spawn(async move { + if let Err(panic) = AssertUnwindSafe(fut).catch_unwind().await { + error!(?panic, job = name, "background_job_panicked"); + } + }); + self.handles.push(JobHandle { name, handle }); + } + + /// Expose handles for inspection or later coordination. + pub fn handles(&self) -> &[JobHandle] { + &self.handles + } + + /// Abort all tracked jobs and await their termination. + pub async fn shutdown(self) { + for JobHandle { name, handle } in self.handles { + handle.abort(); + match handle.await { + Ok(()) => {} + Err(err) if err.is_cancelled() => { + debug!(job = name, "background_job_cancelled"); + } + Err(err) => { + error!(job = name, error = ?err, "background_job_join_failed"); + } + } + } + } +} + +/// Wait for Ctrl+C or SIGTERM and log which signal was received. +pub async fn wait_for_shutdown_signal() { + #[cfg(unix)] + { + let mut sigterm = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) + .expect("create SIGTERM listener"); + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("shutdown_signal_received: ctrl_c"); + } + _ = sigterm.recv() => { + info!("shutdown_signal_received: sigterm"); + } + } + } + + #[cfg(not(unix))] + { + let _ = tokio::signal::ctrl_c().await; + info!("shutdown_signal_received: ctrl_c"); + } +} + +pub const SESSION_CLEANUP_INTERVAL_SECS: u64 = 15 * 60; +pub const SESSION_CLEANUP_BATCH_SIZE: i64 = 500; +const SNAPSHOT_LOCK_KEY: i64 = i64::from_be_bytes(*b"REFSNAP1"); + +pub fn spawn_storage_consistency_monitor( + jobs: &mut Jobs, + enabled: bool, + spawn_background_tasks: bool, + monitor: Arc, +) { + if enabled && spawn_background_tasks { + jobs.spawn("storage_consistency_monitor", async move { + monitor.run().await; + }); + } +} + +pub fn spawn_fs_ingest_watcher( + jobs: &mut Jobs, + spawn_background_tasks: bool, + watcher: Arc, +) { + if spawn_background_tasks { + jobs.spawn("fs_ingest_watcher", async move { + watcher.run().await; + }); + } +} + +pub fn spawn_doc_event_poller( + jobs: &mut Jobs, + spawn_background_tasks: bool, + poller: Arc, +) { + if spawn_background_tasks { + jobs.spawn("doc_event_poller", async move { + poller.run().await; + }); + } +} + +pub fn spawn_storage_reconcile_worker( + jobs: &mut Jobs, + spawn_background_tasks: bool, + svc: Arc, +) { + if spawn_background_tasks { + jobs.spawn("storage_reconcile_worker", async move { + let idle = Duration::from_secs(2); + loop { + match svc.tick().await { + Ok(WorkerTick::Processed) => continue, + Ok(WorkerTick::Idle) => sleep(idle).await, + Err(err) => { + error!(error = ?err, "storage_reconcile_worker_tick_failed"); + sleep(idle).await; + } + } + } + }); + } +} + +pub fn spawn_storage_reconcile_scheduler( + jobs: &mut Jobs, + spawn_background_tasks: bool, + scheduler: StorageReconcileScheduler, + interval: Duration, +) { + if spawn_background_tasks { + jobs.spawn("storage_reconcile_scheduler", async move { + loop { + scheduler.tick().await; + sleep(interval).await; + } + }); + } +} + +pub fn spawn_storage_projection_worker( + jobs: &mut Jobs, + spawn_background_tasks: bool, + worker: Arc, +) { + if spawn_background_tasks { + jobs.spawn("storage_projection_worker", async move { + worker.run().await; + }); + } +} + +pub fn spawn_storage_ingest_worker( + jobs: &mut Jobs, + spawn_background_tasks: bool, + worker: Arc, +) { + if spawn_background_tasks { + jobs.spawn("storage_ingest_worker", async move { + worker.run().await; + }); + } +} + +pub fn spawn_git_rebuild_jobs( + jobs: &mut Jobs, + spawn_background_tasks: bool, + rebuild: Option, +) { + if !spawn_background_tasks { + return; + } + if let Some(rebuild) = rebuild { + let svc = rebuild.service.clone(); + jobs.spawn("git_rebuild_worker", async move { + let idle = Duration::from_secs(1); + loop { + match svc.tick().await { + Ok(WorkerTick::Processed) => continue, + Ok(WorkerTick::Idle) => sleep(idle).await, + Err(err) => { + error!(error = ?err, "git_rebuild_worker_tick_failed"); + sleep(idle).await; + } + } + } + }); + jobs.spawn("git_rebuild_scheduler", async move { + loop { + rebuild.scheduler.tick().await; + sleep(rebuild.interval).await; + } + }); + } +} + +pub fn spawn_plugin_prefetch( + jobs: &mut Jobs, + spawn_background_tasks: bool, + installations: Arc, + assets: Arc, +) { + if spawn_background_tasks { + jobs.spawn("plugin_prefetch", async move { + match installations.list_all().await { + Ok(installs) => { + for inst in installs + .into_iter() + .filter(|i| i.status == PluginInstallationStatus::Enabled) + { + if let Err(err) = assets + .load_user_manifest(&inst.workspace_id, &inst.plugin_id, &inst.version) + .await + { + warn!( + error = ?err, + workspace_id = %inst.workspace_id, + plugin = inst.plugin_id.as_str(), + version = inst.version.as_str(), + "prefetch_user_plugin_failed" + ); + } + } + } + Err(err) => { + warn!(error = ?err, "list_all_plugin_installations_failed"); + } + } + }); + } +} + +pub fn spawn_session_cleanup( + jobs: &mut Jobs, + spawn_background_tasks: bool, + repo: Arc, + interval_secs: u64, + batch_size: i64, +) { + if !spawn_background_tasks { + return; + } + jobs.spawn("session_cleanup", async move { + let mut ticker = tokio::time::interval(Duration::from_secs(interval_secs)); + loop { + ticker.tick().await; + let cutoff = Utc::now(); + let mut total_removed: u64 = 0; + loop { + match repo.delete_expired(cutoff, batch_size).await { + Ok(removed) => { + if removed == 0 { + break; + } + total_removed += removed; + if removed < batch_size as u64 { + break; + } + } + Err(err) => { + warn!(error = ?err, "user_session_cleanup_failed"); + break; + } + } + } + if total_removed > 0 { + debug!(removed = total_removed, "user_session_cleanup_deleted"); + } + } + }); +} + +pub fn spawn_snapshot_loop( + jobs: &mut Jobs, + spawn_background_tasks: bool, + hub: Option, + cfg: Config, + pool: PgPool, +) { + if !spawn_background_tasks { + return; + } + if let Some(hub_for_snap) = hub { + jobs.spawn("snapshot_loop", async move { + let interval = Duration::from_secs(cfg.snapshot_interval_secs); + loop { + match AdvisoryLock::try_acquire(&pool, SNAPSHOT_LOCK_KEY).await { + Ok(Some(lock)) => { + let snapshot_result = hub_for_snap + .snapshot_all(cfg.snapshot_keep_versions, cfg.updates_keep_window) + .await; + + if let Err(e) = lock.release().await { + error!(error = ?e, "snapshot_lock_release_failed"); + } + + if let Err(e) = snapshot_result { + error!(error = ?e, "snapshot_loop_failed"); + } + } + Ok(None) => { + debug!("snapshot_loop_skipped_lock_held"); + } + Err(e) => { + error!(error = ?e, "snapshot_lock_error"); + } + } + sleep(interval).await; + } + }); + } +} diff --git a/api/crates/bootstrap/src/lib.rs b/api/crates/bootstrap/src/lib.rs new file mode 100644 index 00000000..495507b7 --- /dev/null +++ b/api/crates/bootstrap/src/lib.rs @@ -0,0 +1,15 @@ +pub mod app; +pub mod auth; +pub mod config; +pub mod git; +pub mod http; +pub mod jobs; +pub mod plugins; +pub mod realtime; +pub mod storage; +pub mod telemetry; + +pub use application; +pub use domain; +pub use infrastructure; +pub use presentation; diff --git a/api/crates/bootstrap/src/plugins.rs b/api/crates/bootstrap/src/plugins.rs new file mode 100644 index 00000000..8e299946 --- /dev/null +++ b/api/crates/bootstrap/src/plugins.rs @@ -0,0 +1,155 @@ +use std::sync::Arc; + +use anyhow::Context; + +use crate::config::{Config, StorageBackend}; +use application::plugins::ports::plugin_asset_store::PluginAssetStore; +use application::plugins::ports::plugin_installer::PluginInstaller; +use application::plugins::ports::plugin_package_fetcher::PluginPackageFetcher; +use application::plugins::ports::plugin_runtime::PluginRuntime; +use infrastructure::plugins::filesystem_store::{FilesystemPluginStore, PluginExecutionLimits}; + +pub type PluginStack = ( + Arc, + Arc, + Arc, + Option>, + Arc, +); + +pub fn build_plugin_execution_limits(cfg: &Config) -> PluginExecutionLimits { + let timeout = if cfg.plugin_timeout_secs == 0 { + None + } else { + Some(std::time::Duration::from_secs(cfg.plugin_timeout_secs)) + }; + let memory_pages_raw = cfg.plugin_memory_max_mb.saturating_mul(16); + let memory_max_pages = if memory_pages_raw == 0 { + None + } else { + Some(memory_pages_raw.min(u32::MAX as u64) as u32) + }; + let fuel_limit = cfg + .plugin_fuel_limit + .and_then(|limit| if limit == 0 { None } else { Some(limit) }); + PluginExecutionLimits::new(timeout, memory_max_pages, fuel_limit) +} + +pub async fn build_plugin_stack( + cfg: &Config, + plugin_limits: PluginExecutionLimits, +) -> anyhow::Result { + let mut s3_plugin_store: Option> = + None; + + let (plugin_runtime, plugin_installer, plugin_assets): ( + Arc, + Arc, + Arc, + ) = match cfg.storage_backend { + StorageBackend::Filesystem => { + let store = Arc::new(FilesystemPluginStore::new(&cfg.plugin_dir, plugin_limits)?); + let runtime: Arc = store.clone(); + let installer: Arc = store.clone(); + let assets: Arc = store.clone(); + (runtime, installer, assets) + } + StorageBackend::S3 => { + let s3_store_cfg = infrastructure::plugins::s3_store::S3PluginStoreConfig { + plugin_dir: cfg.plugin_dir.clone(), + bucket: cfg + .s3_bucket + .clone() + .context("S3_BUCKET must be configured when using S3 storage backend")?, + region: cfg.s3_region.clone(), + endpoint: cfg.s3_endpoint.clone(), + access_key: cfg.s3_access_key.clone(), + secret_key: cfg.s3_secret_key.clone(), + use_path_style: cfg.s3_use_path_style, + }; + let store = Arc::new( + infrastructure::plugins::s3_store::S3BackedPluginStore::new( + &s3_store_cfg, + plugin_limits, + ) + .await?, + ); + s3_plugin_store = Some(store.clone()); + let runtime: Arc = store.clone(); + let installer: Arc = store.clone(); + let assets: Arc = store.clone(); + (runtime, installer, assets) + } + }; + let plugin_fetcher: Arc = Arc::new( + infrastructure::plugins::package_fetcher_reqwest::ReqwestPluginPackageFetcher::new(), + ); + + Ok(( + plugin_runtime, + plugin_installer, + plugin_assets, + s3_plugin_store, + plugin_fetcher, + )) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn limits_allow_zero_values() { + let cfg = Config { + plugin_timeout_secs: 0, + plugin_memory_max_mb: 0, + plugin_fuel_limit: Some(0), + // irrelevant defaults + api_port: 0, + frontend_url: None, + database_url: "".into(), + jwt_secret_pem: "".into(), + jwt_expires_secs: 0, + session_refresh_ttl_secs: 0, + session_refresh_remember_ttl_secs: 0, + snapshot_interval_secs: 0, + snapshot_keep_versions: 0, + updates_keep_window: 0, + storage_backend: StorageBackend::Filesystem, + storage_root: "".into(), + storage_monitor_enabled: false, + storage_monitor_interval_secs: 0, + storage_monitor_batch_size: 0, + s3_endpoint: None, + s3_bucket: None, + s3_region: None, + s3_access_key: None, + s3_secret_key: None, + s3_use_path_style: false, + plugin_dir: "".into(), + plugin_asset_sign_key: "".into(), + plugin_asset_url_ttl_secs: 0, + encryption_key: "".into(), + upload_max_bytes: 0, + public_base_url: None, + is_production: false, + cluster_mode: false, + redis_url: None, + redis_stream_prefix: "".into(), + redis_min_message_lifetime_ms: 0, + redis_task_debounce_ms: 0, + redis_awareness_ttl_ms: 0, + redis_stream_max_len: 0, + snapshot_archive_interval_secs: 0, + git_rebuild_enabled: false, + git_rebuild_interval_secs: 0, + google_oauth: None, + github_oauth: None, + oidc_oauth: None, + }; + let limits = build_plugin_execution_limits(&cfg); + assert_eq!(limits.timeout, None); + assert_eq!(limits.memory_max_pages, None); + assert_eq!(limits.fuel_limit, None); + } +} diff --git a/api/crates/bootstrap/src/realtime.rs b/api/crates/bootstrap/src/realtime.rs new file mode 100644 index 00000000..21fe9f0a --- /dev/null +++ b/api/crates/bootstrap/src/realtime.rs @@ -0,0 +1,113 @@ +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Context; +use tracing::info; + +use crate::config::Config; +use application::core::ports::storage::storage_port::StorageResolverPort; +use application::core::ports::storage::storage_projection_queue::StorageProjectionQueue; +use application::documents::ports::document_snapshot_archive_repository::DocumentSnapshotArchiveRepository; +use application::documents::ports::linkgraph_repository::LinkGraphRepository; +use application::documents::ports::realtime::realtime_port::RealtimeEngine; +use application::documents::services::realtime::doc_hydration::DocHydrationService; +use application::documents::services::realtime::snapshot::SnapshotService; +use infrastructure::core::db::PgPool; + +pub struct RealtimeStack { + pub engine: Arc, + pub snapshot_service: Arc, + pub local_hub: Option, +} + +#[allow(clippy::too_many_arguments)] +pub async fn build_realtime_stack( + cfg: &Config, + pool: &PgPool, + storage_resolver: Arc, + storage_job_queue: Arc, + snapshot_archive_repo: Arc, +) -> anyhow::Result { + let auto_archive_interval = Duration::from_secs(cfg.snapshot_archive_interval_secs); + + if cfg.cluster_mode { + info!("cluster_mode_enabled"); + let redis_settings = infrastructure::documents::realtime::RedisRealtimeConfig { + redis_url: cfg + .redis_url + .clone() + .context("REDIS_URL must be set when CLUSTER_MODE=1")?, + stream_prefix: cfg.redis_stream_prefix.clone(), + stream_max_len: cfg.redis_stream_max_len, + task_debounce_ms: cfg.redis_task_debounce_ms, + min_message_lifetime_ms: cfg.redis_min_message_lifetime_ms, + awareness_ttl_ms: cfg.redis_awareness_ttl_ms, + snapshot_archive_interval_secs: cfg.snapshot_archive_interval_secs, + spawn_persistence_worker: true, + }; + let engine = Arc::new( + infrastructure::documents::realtime::RedisRealtimeEngine::from_config( + redis_settings, + pool.clone(), + storage_resolver.clone(), + storage_job_queue.clone(), + )?, + ); + let snapshot_service = engine.snapshot_service(); + let engine_trait: Arc = engine.clone(); + return Ok(RealtimeStack { + engine: engine_trait, + snapshot_service, + local_hub: None, + }); + } + + info!("cluster_mode_disabled_using_local_hub"); + let doc_state_reader: Arc< + dyn application::documents::ports::realtime::realtime_hydration_port::DocStateReader, + > = Arc::new(infrastructure::documents::realtime::SqlxDocStateReader::new(pool.clone())); + let backlog_reader: Arc< + dyn application::documents::ports::realtime::realtime_hydration_port::RealtimeBacklogReader, + > = Arc::new(infrastructure::documents::realtime::NoopBacklogReader); + let doc_persistence: Arc< + dyn application::documents::ports::realtime::realtime_persistence_port::DocPersistencePort, + > = Arc::new(infrastructure::documents::realtime::SqlxDocPersistenceAdapter::new(pool.clone())); + let linkgraph_repo: Arc = Arc::new( + infrastructure::documents::db::repositories::linkgraph_repository_sqlx::SqlxLinkGraphRepository::new( + pool.clone(), + ), + ); + let tagging_repo: Arc = + Arc::new( + infrastructure::documents::db::repositories::tagging_repository_sqlx::SqlxTaggingRepository::new( + pool.clone(), + ), + ); + let hydration_service = Arc::new(DocHydrationService::new( + doc_state_reader.clone(), + backlog_reader, + storage_resolver.clone(), + )); + let snapshot_service = Arc::new(SnapshotService::new( + doc_state_reader.clone(), + doc_persistence.clone(), + linkgraph_repo, + tagging_repo, + snapshot_archive_repo.clone(), + storage_job_queue.clone(), + )); + let hub = infrastructure::documents::realtime::Hub::new( + hydration_service, + snapshot_service.clone(), + doc_persistence, + auto_archive_interval, + ); + let engine = + Arc::new(infrastructure::documents::realtime::LocalRealtimeEngine { hub: hub.clone() }); + let engine_trait: Arc = engine.clone(); + Ok(RealtimeStack { + engine: engine_trait, + snapshot_service, + local_hub: Some(hub), + }) +} diff --git a/api/crates/bootstrap/src/storage.rs b/api/crates/bootstrap/src/storage.rs new file mode 100644 index 00000000..ac13b696 --- /dev/null +++ b/api/crates/bootstrap/src/storage.rs @@ -0,0 +1,82 @@ +use std::sync::Arc; + +use anyhow::Context; + +use crate::config::{Config, StorageBackend}; +use application::core::ports::storage::storage_port::{StorageProjectionPort, StorageResolverPort}; +use application::core::ports::storage::storage_reconcile_backend::StorageReconcileBackend; +use infrastructure::core::db::PgPool; +use infrastructure::core::storage::s3::S3StorageConfig; +use infrastructure::core::storage::s3::S3StoragePort; +use infrastructure::core::storage::{ + FsReconcileBackend, PgStorageProjectionQueue, S3ReconcileBackend, +}; + +pub type StoragePorts = ( + Arc, + Arc, + Arc, + bool, +); + +pub async fn build_storage_ports(cfg: &Config, pool: &PgPool) -> anyhow::Result { + let uploads_root = std::path::PathBuf::from(&cfg.storage_root); + let ports = match cfg.storage_backend { + StorageBackend::Filesystem => { + let port = Arc::new(infrastructure::core::storage::port_impl::FsStoragePort { + pool: pool.clone(), + uploads_root: uploads_root.clone(), + }); + let backend: Arc = + FsReconcileBackend::new(uploads_root) as Arc; + ( + port.clone() as Arc, + port as Arc, + backend, + false, + ) + } + StorageBackend::S3 => { + let s3_settings = S3StorageConfig { + uploads_root: uploads_root.clone(), + bucket: cfg + .s3_bucket + .clone() + .context("S3_BUCKET must be configured when using S3 storage backend")?, + region: cfg.s3_region.clone(), + endpoint: cfg.s3_endpoint.clone(), + access_key: cfg.s3_access_key.clone(), + secret_key: cfg.s3_secret_key.clone(), + use_path_style: cfg.s3_use_path_style, + }; + let port = Arc::new(S3StoragePort::new(pool.clone(), &s3_settings).await?); + let backend: Arc = + S3ReconcileBackend::new(&s3_settings).await? as Arc; + ( + port.clone() as Arc, + port as Arc, + backend, + true, + ) + } + }; + Ok(ports) +} + +pub fn build_storage_projection_queue(pool: &PgPool) -> Arc { + Arc::new(PgStorageProjectionQueue::new(pool.clone())) +} + +#[cfg(test)] +mod tests { + use crate::config::StorageBackend; + + #[test] + fn storage_backend_parse() { + assert_eq!( + "fs".parse::().unwrap(), + StorageBackend::Filesystem + ); + assert!("s3".parse::().is_ok()); + } +} diff --git a/api/crates/bootstrap/src/telemetry.rs b/api/crates/bootstrap/src/telemetry.rs new file mode 100644 index 00000000..67c1262b --- /dev/null +++ b/api/crates/bootstrap/src/telemetry.rs @@ -0,0 +1,18 @@ +pub fn init_tracing() { + tracing_subscriber::fmt() + .with_env_filter( + std::env::var("RUST_LOG") + .unwrap_or_else(|_| "api=debug,warp=info,axum=info,tower_http=info".into()), + ) + .init(); +} + +#[cfg(test)] +mod tests { + #[test] + fn init_tracing_idempotent() { + // Should not panic on multiple init calls in tests. + let _ = std::panic::catch_unwind(super::init_tracing); + let _ = std::panic::catch_unwind(super::init_tracing); + } +} diff --git a/api/crates/cli/Cargo.toml b/api/crates/cli/Cargo.toml new file mode 100644 index 00000000..b32aadf6 --- /dev/null +++ b/api/crates/cli/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "cli" +version = "0.1.0" +edition = "2024" + +[dependencies] +anyhow = "1" +async-trait = "0.1" +chrono = { version = "0.4", features = ["serde", "clock"] } +clap = { version = "4.5", features = ["derive"] } +dotenvy = "0.15" +serde_json = "1.0" +sqlx = { version = "0.8", features = ["runtime-tokio", "postgres", "uuid", "chrono", "macros", "json"] } +tokio = { version = "1.46", features = ["rt-multi-thread", "macros"] } +uuid = { version = "1", features = ["v4", "serde"] } +utoipa = "4" + +bootstrap = { path = "../bootstrap" } diff --git a/api/crates/cli/src/cli.rs b/api/crates/cli/src/cli.rs new file mode 100644 index 00000000..7974d5a9 --- /dev/null +++ b/api/crates/cli/src/cli.rs @@ -0,0 +1,302 @@ +use clap::{Parser, Subcommand, ValueEnum}; +use uuid::Uuid; + +use bootstrap::application::core::ports::storage::storage_ingest_queue::StorageIngestKind; + +#[derive(Parser)] +#[command(name = "refmd", about = "Admin CLI for managing a refmd node", version)] +pub(crate) struct Cli { + /// Override the database URL (defaults to DATABASE_URL env / config) + #[arg(long)] + pub(crate) database_url: Option, + + #[command(subcommand)] + pub(crate) command: Command, +} + +#[derive(Subcommand)] +pub(crate) enum Command { + /// User lifecycle and session management + Users { + #[command(subcommand)] + command: UserCommand, + }, + /// Queue-level maintenance and enqueue helpers + Jobs { + #[command(subcommand)] + command: JobsCommand, + }, + /// Workspace lifecycle and membership helpers + Workspaces { + #[command(subcommand)] + command: WorkspaceCommand, + }, + /// Git workspace helpers + Git { + #[command(subcommand)] + command: GitCommand, + }, + /// Plugin asset utilities + Plugins { + #[command(subcommand)] + command: PluginCommand, + }, + /// API token management + Tokens { + #[command(subcommand)] + command: TokenCommand, + }, + /// Share management + Shares { + #[command(subcommand)] + command: ShareCommand, + }, + /// OpenAPI utilities + Openapi { + #[command(subcommand)] + command: OpenapiCommand, + }, +} + +#[derive(Subcommand)] +pub(crate) enum OpenapiCommand { + /// Print OpenAPI JSON to stdout + Export, +} + +#[derive(Subcommand)] +pub(crate) enum UserCommand { + /// List all users with their default workspace IDs + List, + /// Create a new user and provision their personal workspace + Create { + #[arg(long)] + email: String, + #[arg(long)] + name: String, + #[arg(long)] + password: String, + /// Optional explicit user ID (defaults to a new UUID v4) + #[arg(long)] + user_id: Option, + }, + /// Update a user's password hash (optionally revoking active sessions) + SetPassword { + #[arg(long)] + user_id: Uuid, + #[arg(long)] + password: String, + #[arg(long, default_value_t = false)] + revoke_sessions: bool, + }, + /// Delete a user (runs full account deletion path) + Delete { + #[arg(long)] + user_id: Uuid, + }, + /// List sessions for a user + Sessions { + #[arg(long)] + user_id: Uuid, + }, + /// Revoke all active sessions for a user + RevokeSessions { + #[arg(long)] + user_id: Uuid, + }, +} + +#[derive(Subcommand)] +pub(crate) enum JobsCommand { + /// Storage ingest queue operations + Ingest { + #[command(subcommand)] + command: IngestCommand, + }, + /// Storage projection job operations + Projection { + #[command(subcommand)] + command: ProjectionCommand, + }, + /// Storage reconcile job operations + Reconcile { + #[command(subcommand)] + command: ReconcileCommand, + }, + /// Git rebuild job operations + GitRebuild { + #[command(subcommand)] + command: GitRebuildCommand, + }, +} + +#[derive(Subcommand)] +pub(crate) enum IngestCommand { + /// Print queue depth and age metrics + Stats, + /// Enqueue an ingest event for a workspace path + Enqueue { + #[arg(long)] + workspace_id: Uuid, + #[arg(long)] + user_id: Uuid, + #[arg(long)] + repo_path: String, + #[arg(long, default_value = "fs")] + backend: String, + #[arg(long, value_enum)] + kind: IngestKindArg, + #[arg(long)] + content_hash: Option, + /// Optional actor ID to attribute enqueueing + #[arg(long)] + actor_id: Option, + }, +} + +#[derive(Clone, Copy, ValueEnum, Debug)] +pub(crate) enum IngestKindArg { + Upsert, + Delete, +} + +impl From for StorageIngestKind { + fn from(value: IngestKindArg) -> StorageIngestKind { + match value { + IngestKindArg::Upsert => StorageIngestKind::Upsert, + IngestKindArg::Delete => StorageIngestKind::Delete, + } + } +} + +#[derive(Subcommand)] +pub(crate) enum ProjectionCommand { + /// Print projection queue metrics + Stats, +} + +#[derive(Subcommand)] +pub(crate) enum ReconcileCommand { + /// Print reconcile queue metrics + Stats, + /// Enqueue a reconcile job for a workspace and scope (e.g. "full") + Enqueue { + #[arg(long)] + workspace_id: Uuid, + #[arg(long)] + scope: String, + }, +} + +#[derive(Subcommand)] +pub(crate) enum GitRebuildCommand { + /// Print git rebuild queue metrics + Stats, + /// Enqueue a git rebuild job for a workspace + Enqueue { + #[arg(long)] + workspace_id: Uuid, + #[arg(long)] + actor_id: Option, + }, +} + +#[derive(Subcommand)] +pub(crate) enum WorkspaceCommand { + /// List all workspaces + List, + /// Show members for a workspace + Members { + #[arg(long)] + workspace_id: Uuid, + }, + /// Delete a workspace (cascades documents/files/shares) + Delete { + #[arg(long)] + workspace_id: Uuid, + }, +} + +#[derive(Subcommand)] +pub(crate) enum TokenCommand { + /// List API tokens for a workspace + List { + #[arg(long)] + workspace_id: Uuid, + }, + /// Create a new API token (prints plaintext once) + Create { + #[arg(long)] + workspace_id: Uuid, + #[arg(long)] + owner_id: Uuid, + #[arg(long)] + name: Option, + }, + /// Revoke an API token + Revoke { + #[arg(long)] + workspace_id: Uuid, + #[arg(long)] + token_id: Uuid, + }, +} + +#[derive(Subcommand)] +pub(crate) enum ShareCommand { + /// List shares for a document + List { + #[arg(long)] + workspace_id: Uuid, + #[arg(long)] + document_id: Uuid, + }, + /// Revoke a share token + Revoke { + #[arg(long)] + workspace_id: Uuid, + #[arg(long)] + token: String, + }, +} + +#[derive(Subcommand)] +pub(crate) enum GitCommand { + /// Show git workspace status summary + Status { + #[arg(long)] + workspace_id: Uuid, + }, + /// List dirty changes tracked for a workspace + Changes { + #[arg(long)] + workspace_id: Uuid, + }, + /// Remove git workspace data (DB + storage) + Remove { + #[arg(long)] + workspace_id: Uuid, + }, +} + +#[derive(Subcommand)] +pub(crate) enum PluginCommand { + /// List latest global plugin manifests + ListGlobal, + /// Load a user-scoped plugin manifest + UserManifest { + #[arg(long)] + user_id: Uuid, + #[arg(long)] + plugin_id: String, + #[arg(long)] + version: String, + }, + /// Remove a user's plugin directory for a plugin + RemoveUserDir { + #[arg(long)] + user_id: Uuid, + #[arg(long)] + plugin_id: String, + }, +} diff --git a/api/crates/cli/src/commands/git.rs b/api/crates/cli/src/commands/git.rs new file mode 100644 index 00000000..99c94a90 --- /dev/null +++ b/api/crates/cli/src/commands/git.rs @@ -0,0 +1,36 @@ +use anyhow::Result; + +use application::git::ports::git_workspace::GitWorkspacePort; +use bootstrap::application; + +use crate::cli::GitCommand; +use crate::deps::Deps; + +pub(crate) async fn handle(deps: &Deps, cmd: GitCommand) -> Result<()> { + match cmd { + GitCommand::Status { workspace_id } => { + let status = deps.git_workspace.status(workspace_id).await?; + println!( + "initialized={} branch={:?} uncommitted_changes={} untracked_files={}", + status.repository_initialized, + status.current_branch, + status.uncommitted_changes, + status.untracked_files + ); + Ok(()) + } + GitCommand::Changes { workspace_id } => { + let changes = deps.git_workspace.list_changes(workspace_id).await?; + println!("{} change(s)", changes.len()); + for c in changes { + println!("{} {}", c.status, c.path); + } + Ok(()) + } + GitCommand::Remove { workspace_id } => { + deps.git_workspace.remove_repository(workspace_id).await?; + println!("removed git workspace {}", workspace_id); + Ok(()) + } + } +} diff --git a/api/crates/cli/src/commands/jobs.rs b/api/crates/cli/src/commands/jobs.rs new file mode 100644 index 00000000..57b667d4 --- /dev/null +++ b/api/crates/cli/src/commands/jobs.rs @@ -0,0 +1,243 @@ +use anyhow::Result; +use chrono::{DateTime, Utc}; +use sqlx::Row; +use uuid::Uuid; + +use bootstrap::{application, domain, infrastructure}; + +use domain::storage::ingest_backend::StorageIngestBackend; +use domain::workspaces::permissions::PermissionSet; +use infrastructure::core::db::PgPool; + +use application::core::ports::storage::storage_ingest_queue::StorageIngestQueue; + +use crate::cli::{ + GitRebuildCommand, IngestCommand, IngestKindArg, JobsCommand, ProjectionCommand, + ReconcileCommand, +}; +use crate::deps::Deps; + +pub(crate) async fn handle(deps: &Deps, cmd: JobsCommand) -> Result<()> { + match cmd { + JobsCommand::Ingest { command } => match command { + IngestCommand::Stats => print_ingest_stats(deps.ingest_queue.as_ref()).await, + IngestCommand::Enqueue { + workspace_id, + user_id, + repo_path, + backend, + kind, + content_hash, + actor_id, + } => { + enqueue_ingest( + deps.ingest_queue.as_ref(), + workspace_id, + user_id, + actor_id, + repo_path, + backend, + kind, + content_hash, + ) + .await + } + }, + JobsCommand::Projection { command } => match command { + ProjectionCommand::Stats => print_projection_stats(&deps.pool).await, + }, + JobsCommand::Reconcile { command } => match command { + ReconcileCommand::Stats => print_reconcile_stats(&deps.pool).await, + ReconcileCommand::Enqueue { + workspace_id, + scope, + } => { + deps.reconcile_jobs + .enqueue(workspace_id, scope.trim()) + .await?; + println!( + "enqueued reconcile job workspace={workspace_id} scope={}", + scope.trim() + ); + Ok(()) + } + }, + JobsCommand::GitRebuild { command } => match command { + GitRebuildCommand::Stats => print_git_rebuild_stats(&deps.pool).await, + GitRebuildCommand::Enqueue { + workspace_id, + actor_id, + } => { + let permissions = PermissionSet::all().to_vec(); + deps.git_rebuild_jobs + .enqueue(workspace_id, actor_id, &permissions) + .await?; + println!( + "enqueued git rebuild workspace={} actor_id={:?}", + workspace_id, actor_id + ); + Ok(()) + } + }, + } +} + +async fn print_ingest_stats(queue: &dyn StorageIngestQueue) -> Result<()> { + let stats = queue.stats().await?; + println!("storage_ingest.pending={}", stats.pending); + println!("storage_ingest.locked={}", stats.locked); + println!("storage_ingest.distinct_users={}", stats.distinct_users); + match stats.oldest_created_at { + Some(ts) => println!( + "storage_ingest.oldest_pending_age_secs={}", + (Utc::now() - ts).num_seconds() + ), + None => println!("storage_ingest.oldest_pending_age_secs=-"), + } + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +async fn enqueue_ingest( + queue: &dyn StorageIngestQueue, + workspace_id: Uuid, + user_id: Uuid, + actor_id: Option, + repo_path: String, + backend: String, + kind: IngestKindArg, + content_hash: Option, +) -> Result<()> { + let permissions = PermissionSet::all().to_vec(); + queue + .enqueue_event( + workspace_id, + user_id, + actor_id.or(Some(user_id)), + repo_path.trim(), + StorageIngestBackend::parse(backend.trim()), + kind.into(), + content_hash.as_deref(), + None, + &permissions, + ) + .await?; + + println!( + "enqueued ingest workspace={} user={} repo_path={} backend={} kind={:?}", + workspace_id, + user_id, + repo_path.trim(), + backend.trim(), + kind + ); + Ok(()) +} + +async fn print_projection_stats(pool: &PgPool) -> Result<()> { + let row = sqlx::query( + r#"SELECT + COUNT(*) FILTER (WHERE locked_at IS NULL) AS pending, + COUNT(*) FILTER (WHERE locked_at IS NOT NULL) AS locked, + COUNT(*) FILTER (WHERE pending_retry) AS retrying, + COUNT(*) AS total, + MIN(created_at) FILTER (WHERE locked_at IS NULL) AS oldest_created_at + FROM storage_projection_jobs"#, + ) + .fetch_one(pool) + .await?; + + let pending: i64 = row.try_get("pending").unwrap_or(0); + let locked: i64 = row.try_get("locked").unwrap_or(0); + let retrying: i64 = row.try_get("retrying").unwrap_or(0); + let total: i64 = row.try_get("total").unwrap_or(0); + let oldest_created_at: Option> = row.try_get("oldest_created_at").ok(); + + println!("storage_projection.total={total}"); + println!("storage_projection.pending={pending}"); + println!("storage_projection.locked={locked}"); + println!("storage_projection.retrying={retrying}"); + match oldest_created_at { + Some(ts) => println!( + "storage_projection.oldest_pending_age_secs={}", + (Utc::now() - ts).num_seconds() + ), + None => println!("storage_projection.oldest_pending_age_secs=-"), + } + Ok(()) +} + +async fn print_reconcile_stats(pool: &PgPool) -> Result<()> { + let row = sqlx::query( + r#"SELECT + COUNT(*) FILTER (WHERE locked_at IS NULL) AS pending, + COUNT(*) FILTER (WHERE locked_at IS NOT NULL) AS locked, + COUNT(*) FILTER (WHERE pending_retry) AS retrying, + COUNT(*) AS total, + MIN(created_at) FILTER (WHERE locked_at IS NULL) AS oldest_created_at + FROM storage_reconcile_jobs"#, + ) + .fetch_one(pool) + .await?; + + let pending: i64 = row.try_get("pending").unwrap_or(0); + let locked: i64 = row.try_get("locked").unwrap_or(0); + let retrying: i64 = row.try_get("retrying").unwrap_or(0); + let total: i64 = row.try_get("total").unwrap_or(0); + let oldest_created_at: Option> = row.try_get("oldest_created_at").ok(); + + println!("storage_reconcile.total={total}"); + println!("storage_reconcile.pending={pending}"); + println!("storage_reconcile.locked={locked}"); + println!("storage_reconcile.retrying={retrying}"); + match oldest_created_at { + Some(ts) => println!( + "storage_reconcile.oldest_pending_age_secs={}", + (Utc::now() - ts).num_seconds() + ), + None => println!("storage_reconcile.oldest_pending_age_secs=-"), + } + Ok(()) +} + +async fn print_git_rebuild_stats(pool: &PgPool) -> Result<()> { + let row = sqlx::query( + r#"SELECT + COUNT(*) FILTER (WHERE locked_at IS NULL) AS pending, + COUNT(*) FILTER (WHERE locked_at IS NOT NULL) AS locked, + COUNT(*) FILTER (WHERE pending_retry) AS retrying, + COUNT(*) AS total, + MIN(updated_at) FILTER (WHERE locked_at IS NOT NULL) AS oldest_locked_at, + MIN(created_at) FILTER (WHERE locked_at IS NULL) AS oldest_pending_created + FROM git_rebuild_jobs"#, + ) + .fetch_one(pool) + .await?; + + let pending: i64 = row.try_get("pending").unwrap_or(0); + let locked: i64 = row.try_get("locked").unwrap_or(0); + let retrying: i64 = row.try_get("retrying").unwrap_or(0); + let total: i64 = row.try_get("total").unwrap_or(0); + let oldest_locked_at: Option> = row.try_get("oldest_locked_at").ok(); + let oldest_pending: Option> = row.try_get("oldest_pending_created").ok(); + + println!("git_rebuild.total={total}"); + println!("git_rebuild.pending={pending}"); + println!("git_rebuild.locked={locked}"); + println!("git_rebuild.retrying={retrying}"); + match oldest_pending { + Some(ts) => println!( + "git_rebuild.oldest_pending_age_secs={}", + (Utc::now() - ts).num_seconds() + ), + None => println!("git_rebuild.oldest_pending_age_secs=-"), + } + match oldest_locked_at { + Some(ts) => println!( + "git_rebuild.oldest_locked_age_secs={}", + (Utc::now() - ts).num_seconds() + ), + None => println!("git_rebuild.oldest_locked_age_secs=-"), + } + Ok(()) +} diff --git a/api/crates/cli/src/commands/mod.rs b/api/crates/cli/src/commands/mod.rs new file mode 100644 index 00000000..899dbf04 --- /dev/null +++ b/api/crates/cli/src/commands/mod.rs @@ -0,0 +1,30 @@ +use anyhow::Result; + +use super::cli::Command; +use super::deps::Deps; + +mod git; +mod jobs; +mod openapi; +mod plugins; +mod shares; +mod tokens; +mod users; +mod workspaces; + +pub(crate) fn run_openapi(command: super::cli::OpenapiCommand) -> Result<()> { + openapi::handle(command) +} + +pub(crate) async fn run(deps: &Deps, command: Command) -> Result<()> { + match command { + Command::Users { command } => users::handle(deps, command).await, + Command::Jobs { command } => jobs::handle(deps, command).await, + Command::Workspaces { command } => workspaces::handle(deps, command).await, + Command::Git { command } => git::handle(deps, command).await, + Command::Plugins { command } => plugins::handle(deps, command).await, + Command::Tokens { command } => tokens::handle(deps, command).await, + Command::Shares { command } => shares::handle(deps, command).await, + Command::Openapi { command } => openapi::handle(command), + } +} diff --git a/api/crates/cli/src/commands/openapi.rs b/api/crates/cli/src/commands/openapi.rs new file mode 100644 index 00000000..82ef9a09 --- /dev/null +++ b/api/crates/cli/src/commands/openapi.rs @@ -0,0 +1,24 @@ +use anyhow::Result; +use std::io::Write; + +use crate::cli::OpenapiCommand; +use bootstrap::presentation; +use utoipa::OpenApi; + +pub(crate) fn handle(command: OpenapiCommand) -> Result<()> { + match command { + OpenapiCommand::Export => { + let json = presentation::openapi::ApiDoc::openapi() + .to_json() + .expect("serialize OpenAPI JSON"); + let mut stdout = std::io::stdout().lock(); + match stdout.write_all(json.as_bytes()) { + Ok(()) => {} + Err(e) if e.kind() == std::io::ErrorKind::BrokenPipe => return Ok(()), + Err(e) => return Err(e.into()), + } + let _ = stdout.write_all(b"\n"); + Ok(()) + } + } +} diff --git a/api/crates/cli/src/commands/plugins.rs b/api/crates/cli/src/commands/plugins.rs new file mode 100644 index 00000000..8fdff67a --- /dev/null +++ b/api/crates/cli/src/commands/plugins.rs @@ -0,0 +1,57 @@ +use anyhow::Result; + +use crate::cli::PluginCommand; +use crate::deps::Deps; + +pub(crate) async fn handle(deps: &Deps, cmd: PluginCommand) -> Result<()> { + match cmd { + PluginCommand::ListGlobal => { + let manifests = deps.plugin_assets.list_latest_global_manifests().await?; + println!("{} global plugin(s)", manifests.len()); + for item in manifests { + println!( + "{}@{} manifest={}", + item.plugin_id, + item.version, + serde_json::to_string(&item.manifest)? + ); + } + Ok(()) + } + PluginCommand::UserManifest { + user_id, + plugin_id, + version, + } => { + match deps + .plugin_assets + .load_user_manifest(&user_id, &plugin_id, &version) + .await? + { + Some(manifest) => { + println!( + "manifest for {} user {}:\n{}", + plugin_id, + user_id, + serde_json::to_string_pretty(&manifest)? + ); + } + None => println!( + "manifest not found for plugin={} user={} version={}", + plugin_id, user_id, version + ), + } + Ok(()) + } + PluginCommand::RemoveUserDir { user_id, plugin_id } => { + deps.plugin_assets + .remove_user_plugin_dir(&user_id, &plugin_id) + .await?; + println!( + "removed plugin data for user {} plugin {}", + user_id, plugin_id + ); + Ok(()) + } + } +} diff --git a/api/crates/cli/src/commands/shares.rs b/api/crates/cli/src/commands/shares.rs new file mode 100644 index 00000000..e111fe5d --- /dev/null +++ b/api/crates/cli/src/commands/shares.rs @@ -0,0 +1,60 @@ +use anyhow::Result; +use uuid::Uuid; + +use bootstrap::{application, infrastructure}; + +use application::documents::ports::sharing::shares_repository::SharesRepository; +use infrastructure::documents::db::repositories::shares_repository_sqlx::SqlxSharesRepository; + +use crate::cli::ShareCommand; +use crate::deps::Deps; + +pub(crate) async fn handle(deps: &Deps, cmd: ShareCommand) -> Result<()> { + match cmd { + ShareCommand::List { + workspace_id, + document_id, + } => list_shares(&deps.shares_repo, workspace_id, document_id).await, + ShareCommand::Revoke { + workspace_id, + token, + } => { + let removed = deps + .shares_repo + .delete_share(workspace_id, token.trim()) + .await?; + if removed { + println!("revoked share token {}", token.trim()); + } else { + println!("share token {} not found", token.trim()); + } + Ok(()) + } + } +} + +async fn list_shares( + repo: &SqlxSharesRepository, + workspace_id: Uuid, + document_id: Uuid, +) -> Result<()> { + let shares = repo.list_document_shares(workspace_id, document_id).await?; + println!( + "{} share(s) for document {} in workspace {}", + shares.len(), + document_id, + workspace_id + ); + for s in shares { + println!( + "{} | token={} | perm={} | expires_at={:?} | parent_share_id={:?} | created_at={}", + s.id, + s.token, + s.permission, + s.expires_at.map(|d| d.to_rfc3339()), + s.parent_share_id, + s.created_at.to_rfc3339() + ); + } + Ok(()) +} diff --git a/api/crates/cli/src/commands/tokens.rs b/api/crates/cli/src/commands/tokens.rs new file mode 100644 index 00000000..43b03869 --- /dev/null +++ b/api/crates/cli/src/commands/tokens.rs @@ -0,0 +1,75 @@ +use anyhow::Result; +use uuid::Uuid; + +use bootstrap::{application, infrastructure}; + +use application::identity::ports::api_token_repository::ApiTokenRepository; +use application::identity::services::api_tokens::generate_api_token; +use infrastructure::identity::crypto::Argon2SecretHasher; +use infrastructure::identity::db::repositories::api_token_repository_sqlx::SqlxApiTokenRepository; + +use crate::cli::TokenCommand; +use crate::deps::Deps; + +pub(crate) async fn handle(deps: &Deps, cmd: TokenCommand) -> Result<()> { + match cmd { + TokenCommand::List { workspace_id } => list_tokens(&deps.api_tokens, workspace_id).await, + TokenCommand::Create { + workspace_id, + owner_id, + name, + } => create_token(&deps.api_tokens, workspace_id, owner_id, name.as_deref()).await, + TokenCommand::Revoke { + workspace_id, + token_id, + } => { + let revoked = deps.api_tokens.revoke(workspace_id, token_id).await?; + if revoked { + println!("revoked token {}", token_id); + } else { + println!("token {} not found or already revoked", token_id); + } + Ok(()) + } + } +} + +async fn list_tokens(repo: &SqlxApiTokenRepository, workspace_id: Uuid) -> Result<()> { + let tokens = repo.list_active(workspace_id).await?; + println!("{} token(s) in workspace {}", tokens.len(), workspace_id); + for t in tokens { + println!( + "{} | name={} | owner={} | created_at={} | last_used={:?} | revoked={:?}", + t.id, + t.name, + t.owner_id, + t.created_at.to_rfc3339(), + t.last_used_at.map(|d| d.to_rfc3339()), + t.revoked_at.map(|d| d.to_rfc3339()) + ); + } + Ok(()) +} + +async fn create_token( + repo: &SqlxApiTokenRepository, + workspace_id: Uuid, + owner_id: Uuid, + name: Option<&str>, +) -> Result<()> { + let hasher = Argon2SecretHasher; + let generated = generate_api_token(&hasher)?; + let stored = repo + .create( + workspace_id, + owner_id, + name.unwrap_or("cli-token"), + &generated.token_hash, + &generated.token_digest, + ) + .await?; + println!("created token {} name={}", stored.id, stored.name); + println!("plaintext={}", generated.plaintext); + println!("digest={}", generated.token_digest); + Ok(()) +} diff --git a/api/crates/cli/src/commands/users.rs b/api/crates/cli/src/commands/users.rs new file mode 100644 index 00000000..32f11613 --- /dev/null +++ b/api/crates/cli/src/commands/users.rs @@ -0,0 +1,209 @@ +use anyhow::{Result, anyhow, bail, ensure}; +use chrono::{DateTime, Utc}; +use sqlx::Row; +use uuid::Uuid; + +use bootstrap::{application, infrastructure}; + +use application::identity::ports::secret_hasher::SecretHasher; +use application::identity::ports::user_session_repository::UserSessionRepository; +use application::identity::use_cases::auth::delete_account::DeleteAccount; +use application::identity::use_cases::auth::register::{Register, RegisterRequest}; +use application::workspaces::services::WorkspaceServiceFacade; +use infrastructure::core::db::PgPool; +use infrastructure::identity::crypto::Argon2SecretHasher; +use infrastructure::identity::db::repositories::user_repository_sqlx::SqlxUserRepository; +use infrastructure::identity::db::repositories::user_session_repository_sqlx::SqlxUserSessionRepository; + +use crate::cli::UserCommand; +use crate::deps::Deps; + +pub(crate) async fn handle(deps: &Deps, cmd: UserCommand) -> Result<()> { + let hasher = Argon2SecretHasher; + match cmd { + UserCommand::List => list_users(&deps.pool).await, + UserCommand::Create { + email, + name, + password, + user_id, + } => { + create_user( + &deps.user_repo, + deps.workspace_service.as_ref(), + &hasher, + email, + name, + password, + user_id, + ) + .await + } + UserCommand::SetPassword { + user_id, + password, + revoke_sessions, + } => { + set_password( + &deps.pool, + &deps.session_repo, + &hasher, + user_id, + password, + revoke_sessions, + ) + .await + } + UserCommand::Delete { user_id } => delete_user(deps, user_id).await, + UserCommand::Sessions { user_id } => list_sessions(&deps.session_repo, user_id).await, + UserCommand::RevokeSessions { user_id } => { + deps.session_repo.revoke_all_for_user(user_id).await?; + println!("revoked sessions for user {user_id}"); + Ok(()) + } + } +} + +async fn list_users(pool: &PgPool) -> Result<()> { + let rows = sqlx::query( + r#"SELECT id, email, name, default_workspace_id, created_at + FROM users + ORDER BY created_at"#, + ) + .fetch_all(pool) + .await?; + + println!("{} user(s)", rows.len()); + for row in rows { + let id: Uuid = row.get("id"); + let email: String = row.get("email"); + let name: String = row.get("name"); + let workspace_id: Uuid = row.get("default_workspace_id"); + let created_at: DateTime = row.get("created_at"); + println!( + "{id} | {email} | {name} | default_ws={workspace_id} | created_at={}", + created_at.to_rfc3339() + ); + } + Ok(()) +} + +async fn list_sessions(repo: &SqlxUserSessionRepository, user_id: Uuid) -> Result<()> { + let sessions = repo.list_for_user(user_id).await?; + println!("{} session(s) for user {}", sessions.len(), user_id); + for s in sessions { + println!( + "{} | workspace={} | remember={} | last_seen={} | created_at={} | revoked_at={}", + s.id, + s.workspace_id, + s.remember_me, + s.last_seen_at.to_rfc3339(), + s.created_at.to_rfc3339(), + s.revoked_at + .map(|t| t.to_rfc3339()) + .unwrap_or_else(|| "-".to_string()) + ); + } + Ok(()) +} + +async fn create_user( + user_repo: &SqlxUserRepository, + workspace_service: &dyn WorkspaceServiceFacade, + hasher: &dyn SecretHasher, + email: String, + name: String, + password: String, + explicit_user_id: Option, +) -> Result<()> { + let normalized_email = email.trim(); + ensure!(!normalized_email.is_empty(), "email must not be empty"); + ensure!(!password.trim().is_empty(), "password must not be empty"); + + let user_id = explicit_user_id.unwrap_or_else(Uuid::new_v4); + workspace_service + .create_personal_workspace_shell(user_id, name.trim()) + .await?; + + let register = Register { + repo: user_repo, + hasher, + }; + let req = RegisterRequest { + id: user_id, + email: normalized_email.to_string(), + name: name.trim().to_string(), + password, + default_workspace_id: user_id, + }; + + let user = match register.execute(&req).await { + Ok(user) => user, + Err(err) => { + let _ = workspace_service.delete_workspace(user_id).await; + return Err(err.context("failed to create user")); + } + }; + + workspace_service + .ensure_owner_membership(user_id, user_id) + .await?; + + println!( + "created user id={} email={} default_workspace={}", + user.id, user.email, user_id + ); + Ok(()) +} + +async fn delete_user(deps: &Deps, user_id: Uuid) -> Result<()> { + let uc = DeleteAccount { + user_repo: &deps.user_repo, + document_repo: &deps.document_repo, + plugin_installations: &deps.plugin_installations, + plugin_repo: &deps.plugin_repo, + plugin_assets: deps.plugin_assets.clone(), + git_repo: &deps.git_repo, + git_workspace: deps.git_workspace.as_ref(), + storage_jobs: deps.storage_jobs.as_ref(), + files_repo: &deps.files_repo, + }; + uc.execute(user_id).await?; + let _ = deps.workspace_service.delete_workspace(user_id).await?; + println!("deleted user {}", user_id); + Ok(()) +} + +async fn set_password( + pool: &PgPool, + session_repo: &SqlxUserSessionRepository, + hasher: &dyn SecretHasher, + user_id: Uuid, + password: String, + revoke_sessions: bool, +) -> Result<()> { + ensure!(!password.trim().is_empty(), "password must not be empty"); + + let hash = hasher + .hash_secret(&password) + .map_err(|e| anyhow!(e.to_string()))?; + + let res = sqlx::query("UPDATE users SET password_hash = $2, updated_at = now() WHERE id = $1") + .bind(user_id) + .bind(hash) + .execute(pool) + .await?; + + if res.rows_affected() == 0 { + bail!("user not found"); + } + + if revoke_sessions { + session_repo.revoke_all_for_user(user_id).await?; + println!("password updated and sessions revoked for user {user_id}"); + } else { + println!("password updated for user {user_id}"); + } + + Ok(()) +} diff --git a/api/crates/cli/src/commands/workspaces.rs b/api/crates/cli/src/commands/workspaces.rs new file mode 100644 index 00000000..5f740872 --- /dev/null +++ b/api/crates/cli/src/commands/workspaces.rs @@ -0,0 +1,93 @@ +use anyhow::Result; +use chrono::{DateTime, Utc}; +use sqlx::Row; +use uuid::Uuid; + +use bootstrap::infrastructure; +use infrastructure::core::db::PgPool; + +use crate::cli::WorkspaceCommand; +use crate::deps::Deps; + +pub(crate) async fn handle(deps: &Deps, cmd: WorkspaceCommand) -> Result<()> { + match cmd { + WorkspaceCommand::List => list_workspaces(&deps.pool).await, + WorkspaceCommand::Members { workspace_id } => { + list_workspace_members(&deps.pool, workspace_id).await + } + WorkspaceCommand::Delete { workspace_id } => { + match deps + .workspace_service + .delete_workspace(workspace_id) + .await? + { + true => println!("deleted workspace {}", workspace_id), + false => println!("workspace {} not found", workspace_id), + } + Ok(()) + } + } +} + +async fn list_workspaces(pool: &PgPool) -> Result<()> { + let rows = sqlx::query( + r#"SELECT id, name, slug, is_personal, created_at + FROM workspaces + ORDER BY created_at"#, + ) + .fetch_all(pool) + .await?; + println!("{} workspace(s)", rows.len()); + for row in rows { + let id: Uuid = row.get("id"); + let name: String = row.get("name"); + let slug: String = row.get("slug"); + let is_personal: bool = row.get("is_personal"); + let created_at: DateTime = row.get("created_at"); + println!( + "{} | {} | slug={} | personal={} | created_at={}", + id, + name, + slug, + is_personal, + created_at.to_rfc3339() + ); + } + Ok(()) +} + +async fn list_workspace_members(pool: &PgPool, workspace_id: Uuid) -> Result<()> { + let rows = sqlx::query( + r#"SELECT m.user_id, u.email, u.name, m.role_kind, m.system_role, m.custom_role_id, m.is_default, m.joined_at + FROM workspace_members m + JOIN users u ON u.id = m.user_id + WHERE m.workspace_id = $1 + ORDER BY m.joined_at"#, + ) + .bind(workspace_id) + .fetch_all(pool) + .await?; + println!("{} member(s) for workspace {}", rows.len(), workspace_id); + for row in rows { + let user_id: Uuid = row.get("user_id"); + let email: String = row.get("email"); + let name: String = row.get("name"); + let role_kind: String = row.get("role_kind"); + let system_role: Option = row.try_get("system_role").ok(); + let custom_role_id: Option = row.try_get("custom_role_id").ok(); + let is_default: bool = row.get("is_default"); + let joined_at: DateTime = row.get("joined_at"); + println!( + "{} | {} | {} | role_kind={} system_role={:?} custom_role_id={:?} default={} joined_at={}", + user_id, + email, + name, + role_kind, + system_role, + custom_role_id, + is_default, + joined_at.to_rfc3339() + ); + } + Ok(()) +} diff --git a/api/crates/cli/src/deps.rs b/api/crates/cli/src/deps.rs new file mode 100644 index 00000000..006a0977 --- /dev/null +++ b/api/crates/cli/src/deps.rs @@ -0,0 +1,108 @@ +use std::sync::Arc; + +use anyhow::Result; + +use bootstrap::app::AppBuilder; +use bootstrap::application::core::ports::storage::storage_ingest_queue::StorageIngestQueue; +use bootstrap::application::core::ports::storage::storage_projection_queue::StorageProjectionQueue; +use bootstrap::application::core::ports::storage::storage_reconcile_jobs::StorageReconcileJobs; +use bootstrap::application::git::ports::git_rebuild_job_queue::GitRebuildJobQueue; +use bootstrap::application::plugins::ports::plugin_asset_store::PluginAssetStore; +use bootstrap::application::workspaces::services::WorkspaceServiceFacade; +use bootstrap::config::Config; +use bootstrap::git::git_storage_driver_config; +use bootstrap::infrastructure::core::db::PgPool; +use bootstrap::infrastructure::documents::db::repositories::document_repository_sqlx::SqlxDocumentRepository; +use bootstrap::infrastructure::documents::db::repositories::files_repository_sqlx::SqlxFilesRepository; +use bootstrap::infrastructure::documents::db::repositories::shares_repository_sqlx::SqlxSharesRepository; +use bootstrap::infrastructure::git::storage::build_git_storage; +use bootstrap::infrastructure::identity::db::repositories::api_token_repository_sqlx::SqlxApiTokenRepository; +use bootstrap::infrastructure::identity::db::repositories::user_repository_sqlx::SqlxUserRepository; +use bootstrap::infrastructure::identity::db::repositories::user_session_repository_sqlx::SqlxUserSessionRepository; +use bootstrap::infrastructure::plugins::db::repositories::plugin_installation_repository_sqlx::SqlxPluginInstallationRepository; +use bootstrap::infrastructure::plugins::db::repositories::plugin_repository_sqlx::SqlxPluginRepository; + +use super::git_workspace::CliGitWorkspace; + +pub(crate) struct Deps { + pub(crate) pool: PgPool, + pub(crate) user_repo: SqlxUserRepository, + pub(crate) workspace_service: Arc, + pub(crate) ingest_queue: Arc, + pub(crate) reconcile_jobs: Arc, + pub(crate) git_rebuild_jobs: Arc, + pub(crate) session_repo: SqlxUserSessionRepository, + pub(crate) document_repo: SqlxDocumentRepository, + pub(crate) files_repo: SqlxFilesRepository, + pub(crate) plugin_installations: SqlxPluginInstallationRepository, + pub(crate) plugin_repo: SqlxPluginRepository, + pub(crate) api_tokens: SqlxApiTokenRepository, + pub(crate) shares_repo: SqlxSharesRepository, + pub(crate) plugin_assets: Arc, + pub(crate) git_repo: + bootstrap::infrastructure::git::db::repositories::git_repository_sqlx::SqlxGitRepository, + pub(crate) storage_jobs: Arc, + pub(crate) git_workspace: Arc, +} + +pub(crate) async fn build(database_url: Option) -> Result { + let mut cfg = Config::from_env()?; + if let Some(db_url) = database_url { + cfg.database_url = db_url; + } + + let runtime = AppBuilder::new(cfg.clone()) + .with_background_tasks(false) + .build() + .await?; + let ( + cfg, + pool, + ctx, + _hub, + _jobs, + storage_jobs, + reconcile_jobs, + git_rebuild_jobs, + plugin_assets, + ) = runtime.into_parts(); + + let user_repo = SqlxUserRepository::new(pool.clone()); + let workspace_service = ctx.workspace_service(); + let ingest_queue = ctx.storage_ingest_queue(); + let session_repo = SqlxUserSessionRepository::new(pool.clone()); + let document_repo = SqlxDocumentRepository::new(pool.clone()); + let files_repo = SqlxFilesRepository::new(pool.clone()); + let plugin_installations = SqlxPluginInstallationRepository::new(pool.clone()); + let plugin_repo = SqlxPluginRepository::new(pool.clone()); + let api_tokens = SqlxApiTokenRepository::new(pool.clone()); + let shares_repo = SqlxSharesRepository::new(pool.clone()); + let git_repo = + bootstrap::infrastructure::git::db::repositories::git_repository_sqlx::SqlxGitRepository::new( + pool.clone(), + cfg.encryption_key.clone(), + ); + let git_storage_cfg = git_storage_driver_config(&cfg)?; + let git_storage = build_git_storage(git_storage_cfg).await?; + let git_workspace = Arc::new(CliGitWorkspace::new(pool.clone(), git_storage.clone())); + + Ok(Deps { + pool, + user_repo, + workspace_service, + ingest_queue, + storage_jobs, + reconcile_jobs, + git_rebuild_jobs, + session_repo, + document_repo, + files_repo, + plugin_installations, + plugin_repo, + api_tokens, + shares_repo, + plugin_assets, + git_repo, + git_workspace, + }) +} diff --git a/api/crates/cli/src/git_workspace.rs b/api/crates/cli/src/git_workspace.rs new file mode 100644 index 00000000..72e0a872 --- /dev/null +++ b/api/crates/cli/src/git_workspace.rs @@ -0,0 +1,365 @@ +use std::sync::Arc; + +use chrono::{DateTime, Utc}; +use sqlx::{Row, types::Json}; +use uuid::Uuid; + +use bootstrap::{application, infrastructure}; + +use application::core::ports::errors::PortResult; +use application::git::ports::git_storage::GitStorage; +use application::git::ports::git_workspace::GitWorkspacePort; +use infrastructure::core::db::PgPool; + +pub(crate) struct CliGitWorkspace { + pool: PgPool, + git_storage: Arc, +} + +impl CliGitWorkspace { + pub(crate) fn new(pool: PgPool, git_storage: Arc) -> Self { + Self { pool, git_storage } + } + + async fn load_repository_state( + &self, + workspace_id: Uuid, + ) -> anyhow::Result> { + let row = sqlx::query( + "SELECT initialized, default_branch FROM git_repository_state WHERE workspace_id = $1", + ) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| (r.get("initialized"), r.get("default_branch")))) + } + + async fn latest_commit_meta( + &self, + workspace_id: Uuid, + ) -> anyhow::Result> { + let row = sqlx::query( + r#"SELECT commit_id, parent_commit_id, message, author_name, author_email, + committed_at, pack_key, file_hash_index + FROM git_commits + WHERE workspace_id = $1 + ORDER BY committed_at DESC + LIMIT 1"#, + ) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + + row.map(row_to_commit_meta).transpose() + } + + async fn fetch_dirty(&self, workspace_id: Uuid) -> anyhow::Result> { + let rows = sqlx::query( + r#"SELECT path, is_text, op, content_hash + FROM git_dirty_files + WHERE workspace_id = $1 + ORDER BY created_at ASC"#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + + let mut out = Vec::new(); + for r in rows { + let path: String = r.get("path"); + let op: String = r.get("op"); + let content_hash: Option = r.try_get("content_hash").ok(); + out.push(DirtyRow { + path, + op, + content_hash, + }); + } + Ok(out) + } +} + +struct DirtyRow { + path: String, + op: String, + content_hash: Option, +} + +#[async_trait::async_trait] +impl GitWorkspacePort for CliGitWorkspace { + async fn ensure_repository( + &self, + _workspace_id: Uuid, + _default_branch: &str, + ) -> PortResult<()> { + Err(anyhow::anyhow!("ensure_repository not supported in refmd CLI").into()) + } + + async fn remove_repository(&self, workspace_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let mut tx = self.pool.begin().await?; + sqlx::query("DELETE FROM git_dirty_files WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&mut *tx) + .await?; + sqlx::query("DELETE FROM git_commits WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&mut *tx) + .await?; + sqlx::query( + "UPDATE git_repository_state SET initialized = false, updated_at = now() WHERE workspace_id = $1", + ) + .bind(workspace_id) + .execute(&mut *tx) + .await?; + tx.commit().await?; + self.git_storage.delete_all(workspace_id).await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn status( + &self, + workspace_id: Uuid, + ) -> PortResult { + let out: anyhow::Result = async { + let state = self.load_repository_state(workspace_id).await?; + let Some((initialized, branch)) = state else { + return Ok(application::git::dtos::GitWorkspaceStatus { + repository_initialized: false, + current_branch: None, + uncommitted_changes: 0, + untracked_files: 0, + }); + }; + if !initialized { + return Ok(application::git::dtos::GitWorkspaceStatus { + repository_initialized: false, + current_branch: Some(branch), + uncommitted_changes: 0, + untracked_files: 0, + }); + } + + let latest = self.latest_commit_meta(workspace_id).await?; + let previous_index: std::collections::HashMap = latest + .as_ref() + .map(|c| c.file_hash_index.clone()) + .unwrap_or_default(); + + let dirty = self.fetch_dirty(workspace_id).await?; + let mut added: u32 = 0; + let mut modified: u32 = 0; + let mut deleted: u32 = 0; + + for d in dirty.iter() { + match d.op.as_str() { + "upsert" => { + if let Some(prev_hash) = previous_index.get(&d.path) { + match d.content_hash.as_ref() { + Some(h) if h == prev_hash => {} + _ => modified += 1, + } + } else { + added += 1; + } + } + "delete" => { + deleted += 1; + } + _ => {} + } + } + + Ok(application::git::dtos::GitWorkspaceStatus { + repository_initialized: true, + current_branch: Some(branch), + uncommitted_changes: modified + deleted, + untracked_files: added, + }) + } + .await; + out.map_err(Into::into) + } + + async fn list_changes( + &self, + workspace_id: Uuid, + ) -> PortResult> { + let out: anyhow::Result> = async { + if let Some((initialized, _)) = self.load_repository_state(workspace_id).await? { + if !initialized { + return Ok(Vec::new()); + } + } else { + return Ok(Vec::new()); + } + + let latest = self.latest_commit_meta(workspace_id).await?; + let previous_index: std::collections::HashMap = latest + .as_ref() + .map(|c| c.file_hash_index.clone()) + .unwrap_or_default(); + let dirty = self.fetch_dirty(workspace_id).await?; + + let mut out = Vec::new(); + for d in dirty { + let status = match d.op.as_str() { + "delete" => "deleted", + "upsert" => { + if previous_index.contains_key(&d.path) { + "modified" + } else { + "added" + } + } + _ => "unknown", + }; + out.push(application::git::dtos::GitChangeItem { + path: d.path, + status: status.to_string(), + }); + } + Ok(out) + } + .await; + out.map_err(Into::into) + } + + async fn working_diff( + &self, + _workspace_id: Uuid, + ) -> PortResult> { + Err(anyhow::anyhow!("working_diff not supported in refmd CLI").into()) + } + + async fn commit_diff( + &self, + _workspace_id: Uuid, + _from: &str, + _to: &str, + ) -> PortResult> { + Err(anyhow::anyhow!("commit_diff not supported in refmd CLI").into()) + } + + async fn history( + &self, + _workspace_id: Uuid, + ) -> PortResult> { + Err(anyhow::anyhow!("history not supported in refmd CLI").into()) + } + + async fn sync( + &self, + _workspace_id: Uuid, + _req: &application::git::dtos::GitSyncRequestDto, + _cfg: Option<&application::git::ports::git_repository::UserGitCfg>, + ) -> PortResult { + Err(anyhow::anyhow!("sync not supported in refmd CLI").into()) + } + + async fn pull( + &self, + _workspace_id: Uuid, + _actor_id: Uuid, + _req: &application::git::dtos::GitPullRequestDto, + _cfg: &application::git::ports::git_repository::UserGitCfg, + ) -> PortResult { + Err(anyhow::anyhow!("pull not supported in refmd CLI").into()) + } + + async fn import_repository( + &self, + _workspace_id: Uuid, + _actor_id: Uuid, + _cfg: &application::git::ports::git_repository::UserGitCfg, + ) -> PortResult { + Err(anyhow::anyhow!("import not supported in refmd CLI").into()) + } + + async fn head_commit(&self, workspace_id: Uuid) -> PortResult>> { + let out: anyhow::Result>> = async { + Ok(self + .latest_commit_meta(workspace_id) + .await? + .map(|m| m.commit_id)) + } + .await; + out.map_err(Into::into) + } + + async fn remote_head( + &self, + _workspace_id: Uuid, + _cfg: &application::git::ports::git_repository::UserGitCfg, + ) -> PortResult>> { + Ok(None) + } + + async fn has_pending_changes(&self, workspace_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let dirty_rows = self.fetch_dirty(workspace_id).await?; + Ok(!dirty_rows.is_empty()) + } + .await; + out.map_err(Into::into) + } + + async fn drift_since_commit(&self, workspace_id: Uuid, base_commit: &[u8]) -> PortResult { + let out: anyhow::Result = async { + // CLI helper: fallback to dirty check when full state comparison is not available. + if self.has_pending_changes(workspace_id).await? { + return Ok(true); + } + // If the base commit is not the latest, consider it stale. + let latest = self.latest_commit_meta(workspace_id).await?; + if let Some(meta) = latest + && meta.commit_id.as_slice() != base_commit + { + return Ok(true); + } + Ok(false) + } + .await; + out.map_err(Into::into) + } + + async fn check_remote( + &self, + _workspace_id: Uuid, + _cfg: &application::git::ports::git_repository::UserGitCfg, + ) -> PortResult { + Ok(application::git::dtos::GitRemoteCheckDto { + ok: false, + message: "remote check not supported in CLI".to_string(), + reason: Some("unsupported".to_string()), + }) + } +} + +fn row_to_commit_meta( + row: sqlx::postgres::PgRow, +) -> anyhow::Result { + let commit_id: Vec = row.get("commit_id"); + let parent_commit_id: Option> = row.try_get("parent_commit_id").ok(); + let message: Option = row.try_get("message").ok(); + let author_name: Option = row.try_get("author_name").ok(); + let author_email: Option = row.try_get("author_email").ok(); + let committed_at: DateTime = row.get("committed_at"); + let pack_key: String = row.get("pack_key"); + let file_hash_index: Json> = + row.get("file_hash_index"); + + Ok(application::git::ports::git_storage::CommitMeta { + commit_id, + parent_commit_id, + message, + author_name, + author_email, + committed_at, + pack_key, + file_hash_index: file_hash_index.0, + }) +} diff --git a/api/crates/cli/src/lib.rs b/api/crates/cli/src/lib.rs new file mode 100644 index 00000000..ae058049 --- /dev/null +++ b/api/crates/cli/src/lib.rs @@ -0,0 +1,23 @@ +mod cli; + +mod commands; +mod deps; +mod git_workspace; + +use anyhow::Result; +use clap::Parser; + +pub async fn run() -> Result<()> { + dotenvy::dotenv().ok(); + let cli::Cli { + database_url, + command, + } = cli::Cli::parse(); + + if let cli::Command::Openapi { command } = command { + return commands::run_openapi(command); + } + + let deps = deps::build(database_url).await?; + commands::run(&deps, command).await +} diff --git a/api/crates/contracts/Cargo.toml b/api/crates/contracts/Cargo.toml new file mode 100644 index 00000000..e62275d6 --- /dev/null +++ b/api/crates/contracts/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "contracts" +version = "0.1.0" +edition = "2024" + +[features] +openapi = ["dep:utoipa"] + +[dependencies] +serde = { version = "1", features = ["derive"] } +utoipa = { version = "4", features = ["axum_extras", "chrono", "uuid"], optional = true } + diff --git a/api/src/application/dto/diff.rs b/api/crates/contracts/src/core/dtos/diff.rs similarity index 60% rename from api/src/application/dto/diff.rs rename to api/crates/contracts/src/core/dtos/diff.rs index 14a25598..c61fb030 100644 --- a/api/src/application/dto/diff.rs +++ b/api/crates/contracts/src/core/dtos/diff.rs @@ -1,7 +1,9 @@ use serde::{Deserialize, Serialize}; +#[cfg(feature = "openapi")] use utoipa::ToSchema; -#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +#[derive(Debug, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "openapi", derive(ToSchema))] #[serde(rename_all = "lowercase")] pub enum TextDiffLineType { Added, @@ -9,7 +11,8 @@ pub enum TextDiffLineType { Context, } -#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +#[derive(Debug, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "openapi", derive(ToSchema))] pub struct TextDiffLine { pub line_type: TextDiffLineType, pub old_line_number: Option, @@ -17,7 +20,8 @@ pub struct TextDiffLine { pub content: String, } -#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +#[derive(Debug, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "openapi", derive(ToSchema))] pub struct TextDiffResult { pub file_path: String, pub diff_lines: Vec, diff --git a/api/crates/contracts/src/core/dtos/mod.rs b/api/crates/contracts/src/core/dtos/mod.rs new file mode 100644 index 00000000..9258f80c --- /dev/null +++ b/api/crates/contracts/src/core/dtos/mod.rs @@ -0,0 +1,3 @@ +mod diff; + +pub use diff::{TextDiffLine, TextDiffLineType, TextDiffResult}; diff --git a/api/crates/contracts/src/core/mod.rs b/api/crates/contracts/src/core/mod.rs new file mode 100644 index 00000000..6556ec43 --- /dev/null +++ b/api/crates/contracts/src/core/mod.rs @@ -0,0 +1 @@ +pub mod dtos; diff --git a/api/crates/contracts/src/lib.rs b/api/crates/contracts/src/lib.rs new file mode 100644 index 00000000..5a7ca06a --- /dev/null +++ b/api/crates/contracts/src/lib.rs @@ -0,0 +1 @@ +pub mod core; diff --git a/api/crates/domain/Cargo.toml b/api/crates/domain/Cargo.toml new file mode 100644 index 00000000..b02cfd32 --- /dev/null +++ b/api/crates/domain/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "domain" +version = "0.1.0" +edition = "2024" + +[dependencies] +chrono = { version = "0.4", features = ["serde", "clock"] } +serde = { version = "1", features = ["derive"] } +uuid = { version = "1", features = ["v4", "serde"] } diff --git a/api/src/domain/workspaces/mod.rs b/api/crates/domain/src/access/mod.rs similarity index 100% rename from api/src/domain/workspaces/mod.rs rename to api/crates/domain/src/access/mod.rs diff --git a/api/src/domain/workspaces/permissions.rs b/api/crates/domain/src/access/permissions.rs similarity index 89% rename from api/src/domain/workspaces/permissions.rs rename to api/crates/domain/src/access/permissions.rs index fbdbd1c3..8e8918db 100644 --- a/api/src/domain/workspaces/permissions.rs +++ b/api/crates/domain/src/access/permissions.rs @@ -65,6 +65,21 @@ pub struct PermissionSet { allowed: BTreeSet, } +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PermissionOverride { + pub permission: String, + pub allowed: bool, +} + +impl PermissionOverride { + pub fn new(permission: impl Into, allowed: bool) -> Self { + Self { + permission: permission.into(), + allowed, + } + } +} + impl PermissionSet { pub fn is_empty(&self) -> bool { self.allowed.is_empty() @@ -166,17 +181,15 @@ pub fn system_role_permissions(role: &str) -> PermissionSet { } } -pub fn apply_custom_overrides(mut base: PermissionSet, overrides: I) -> PermissionSet +pub fn apply_custom_overrides(mut base: PermissionSet, overrides: I) -> PermissionSet where - I: IntoIterator, - S: AsRef, + I: IntoIterator, { - for (permission, allowed) in overrides { - let key = permission.as_ref(); - if allowed { - base.insert(key.to_string()); + for item in overrides { + if item.allowed { + base.insert(item.permission); } else { - base.remove(key); + base.remove(&item.permission); } } base diff --git a/api/crates/domain/src/core/mod.rs b/api/crates/domain/src/core/mod.rs new file mode 100644 index 00000000..74357c67 --- /dev/null +++ b/api/crates/domain/src/core/mod.rs @@ -0,0 +1 @@ +// Cross-cutting domain types live here. diff --git a/api/crates/domain/src/documents/access_policy.rs b/api/crates/domain/src/documents/access_policy.rs new file mode 100644 index 00000000..607e2a19 --- /dev/null +++ b/api/crates/domain/src/documents/access_policy.rs @@ -0,0 +1,193 @@ +use chrono::{DateTime, Utc}; +use uuid::Uuid; + +use crate::access::permissions::{PERM_DOC_EDIT, PERM_DOC_VIEW, PermissionSet}; +use crate::documents::doc_type::DocumentType; +use crate::documents::share::{self, ShareContext, SharePermission}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub enum Capability { + None, + View, + Edit, +} + +pub fn capability_for_user_document(permissions: &PermissionSet, is_archived: bool) -> Capability { + if !permissions.allows(PERM_DOC_VIEW) { + return Capability::None; + } + if is_archived { + Capability::View + } else if permissions.allows(PERM_DOC_EDIT) { + Capability::Edit + } else { + Capability::View + } +} + +pub fn capability_for_public_document(is_public: bool) -> Capability { + if is_public { + Capability::View + } else { + Capability::None + } +} + +pub fn capability_for_share_token( + ctx: &ShareContext, + doc_id: Uuid, + now: DateTime, + is_doc_archived: bool, + materialized_permission: Option, +) -> Capability { + if is_doc_archived { + return Capability::None; + } + if share::is_expired(ctx.expires_at.as_ref(), now) { + return Capability::None; + } + + if ctx.shared_type == DocumentType::Folder { + match materialized_permission { + Some(p) => { + if p.allows_edit() { + Capability::Edit + } else { + Capability::View + } + } + None => Capability::None, + } + } else if ctx.shared_id == doc_id { + if ctx.permission.allows_edit() { + Capability::Edit + } else { + Capability::View + } + } else { + Capability::None + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::documents::share::SharePermission; + + #[test] + fn user_document_requires_view_and_archived_is_view_only() { + let perms = PermissionSet::default(); + assert_eq!( + capability_for_user_document(&perms, false), + Capability::None + ); + + let perms = PermissionSet::from_slice(&[PERM_DOC_VIEW]); + assert_eq!( + capability_for_user_document(&perms, false), + Capability::View + ); + + let perms = PermissionSet::from_slice(&[PERM_DOC_VIEW, PERM_DOC_EDIT]); + assert_eq!( + capability_for_user_document(&perms, false), + Capability::Edit + ); + assert_eq!(capability_for_user_document(&perms, true), Capability::View); + } + + #[test] + fn public_document_is_view_only_when_published() { + assert_eq!(capability_for_public_document(false), Capability::None); + assert_eq!(capability_for_public_document(true), Capability::View); + } + + #[test] + fn share_token_denies_archived_and_expired() { + let now = Utc::now(); + let ctx = ShareContext { + share_id: Uuid::new_v4(), + permission: SharePermission::View, + expires_at: Some(now), + shared_id: Uuid::new_v4(), + shared_type: DocumentType::Document, + workspace_id: Uuid::new_v4(), + }; + assert_eq!( + capability_for_share_token(&ctx, ctx.shared_id, now, false, None), + Capability::None + ); + assert_eq!( + capability_for_share_token( + &ShareContext { + expires_at: None, + ..ctx + }, + ctx.shared_id, + now, + true, + None + ), + Capability::None + ); + } + + #[test] + fn share_token_document_grants_on_id_match() { + let now = Utc::now(); + let doc_id = Uuid::new_v4(); + let ctx = ShareContext { + share_id: Uuid::new_v4(), + permission: SharePermission::Edit, + expires_at: None, + shared_id: doc_id, + shared_type: DocumentType::Document, + workspace_id: Uuid::new_v4(), + }; + assert_eq!( + capability_for_share_token(&ctx, doc_id, now, false, None), + Capability::Edit + ); + assert_eq!( + capability_for_share_token(&ctx, Uuid::new_v4(), now, false, None), + Capability::None + ); + } + + #[test] + fn share_token_folder_uses_materialized_permission() { + let now = Utc::now(); + let ctx = ShareContext { + share_id: Uuid::new_v4(), + permission: SharePermission::View, + expires_at: None, + shared_id: Uuid::new_v4(), + shared_type: DocumentType::Folder, + workspace_id: Uuid::new_v4(), + }; + assert_eq!( + capability_for_share_token(&ctx, Uuid::new_v4(), now, false, None), + Capability::None + ); + assert_eq!( + capability_for_share_token( + &ctx, + Uuid::new_v4(), + now, + false, + Some(SharePermission::View) + ), + Capability::View + ); + assert_eq!( + capability_for_share_token( + &ctx, + Uuid::new_v4(), + now, + false, + Some(SharePermission::Edit) + ), + Capability::Edit + ); + } +} diff --git a/api/crates/domain/src/documents/delete_plan.rs b/api/crates/domain/src/documents/delete_plan.rs new file mode 100644 index 00000000..0d67eb30 --- /dev/null +++ b/api/crates/domain/src/documents/delete_plan.rs @@ -0,0 +1,165 @@ +use uuid::Uuid; + +use crate::documents::doc_type::DocumentType; +use crate::documents::meta::DocMeta; + +#[derive(Debug, Clone)] +pub struct DeleteNode { + pub id: Uuid, + pub doc_type: DocumentType, + pub meta: DocMeta, + pub attachments: Vec, +} + +#[derive(Debug, Clone)] +pub struct DeleteEntry { + pub doc_id: Uuid, + pub doc_type: DocumentType, + pub meta: DocMeta, + pub attachments: Vec, + pub reason: &'static str, +} + +pub fn build_delete_plan( + root_id: Uuid, + root_meta: DocMeta, + nodes: Vec, +) -> Vec { + if root_meta.doc_type != DocumentType::Folder { + let attachments = nodes + .into_iter() + .find(|n| n.id == root_id) + .map(|n| n.attachments) + .unwrap_or_default(); + return vec![DeleteEntry { + doc_id: root_id, + doc_type: root_meta.doc_type, + meta: root_meta, + attachments, + reason: "delete_document", + }]; + } + + let mut entries = Vec::new(); + for node in nodes { + let meta = if node.id == root_id { + root_meta.clone() + } else { + node.meta + }; + let reason = if node.id == root_id { + "delete_folder" + } else if node.doc_type == DocumentType::Folder { + "delete_folder_descendant" + } else { + "delete_document_descendant" + }; + entries.push(DeleteEntry { + doc_id: node.id, + doc_type: node.doc_type, + meta, + attachments: node.attachments, + reason, + }); + } + entries.sort_by(|a, b| { + let depth_a = path_depth(a.meta.desired_path.as_str()); + let depth_b = path_depth(b.meta.desired_path.as_str()); + depth_b + .cmp(&depth_a) + .then_with(|| is_folder(a.doc_type).cmp(&is_folder(b.doc_type))) + }); + entries +} + +fn path_depth(path: &str) -> usize { + path.split('/') + .filter(|segment| !segment.is_empty()) + .count() +} + +fn is_folder(doc_type: DocumentType) -> usize { + if doc_type.is_folder() { 1 } else { 0 } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::documents::doc_type::DocumentType; + + #[test] + fn sorts_by_depth_desc_then_folder_last() { + let root_id = Uuid::new_v4(); + let workspace_id = Uuid::new_v4(); + let root_meta = DocMeta { + workspace_id, + doc_type: DocumentType::Folder, + path: Some(format!("{}/", workspace_id)), + slug: crate::documents::path::Slug::new("root".to_string()).unwrap(), + desired_path: crate::documents::path::DesiredPath::root(), + title: crate::documents::title::Title::new("root"), + archived_at: None, + }; + let doc1 = Uuid::new_v4(); + let folder = Uuid::new_v4(); + let leaf = Uuid::new_v4(); + let nodes = vec![ + DeleteNode { + id: root_id, + doc_type: DocumentType::Folder, + meta: root_meta.clone(), + attachments: vec![], + }, + DeleteNode { + id: doc1, + doc_type: DocumentType::Document, + meta: DocMeta { + workspace_id, + doc_type: DocumentType::Document, + path: Some(format!("{}/doc1", workspace_id)), + slug: crate::documents::path::Slug::new("doc1".to_string()).unwrap(), + desired_path: crate::documents::path::DesiredPath::new("doc1").unwrap(), + title: crate::documents::title::Title::new("doc1"), + archived_at: None, + }, + attachments: vec![], + }, + DeleteNode { + id: folder, + doc_type: DocumentType::Folder, + meta: DocMeta { + workspace_id, + doc_type: DocumentType::Folder, + path: Some(format!("{}/folder", workspace_id)), + slug: crate::documents::path::Slug::new("folder".to_string()).unwrap(), + desired_path: crate::documents::path::DesiredPath::new("folder").unwrap(), + title: crate::documents::title::Title::new("folder"), + archived_at: None, + }, + attachments: vec![], + }, + DeleteNode { + id: leaf, + doc_type: DocumentType::Document, + meta: DocMeta { + workspace_id, + doc_type: DocumentType::Document, + path: Some(format!("{}/folder/leaf", workspace_id)), + slug: crate::documents::path::Slug::new("leaf".to_string()).unwrap(), + desired_path: crate::documents::path::DesiredPath::new("folder/leaf").unwrap(), + title: crate::documents::title::Title::new("leaf"), + archived_at: None, + }, + attachments: vec![], + }, + ]; + + let entries = build_delete_plan(root_id, root_meta, nodes); + + // Expected order: deepest doc leaf, sibling doc, folder, root folder last + assert_eq!(entries[0].doc_id, leaf); + assert_eq!(entries[1].doc_id, doc1); + assert_eq!(entries[2].doc_id, folder); + assert_eq!(entries[3].doc_id, root_id); + } +} diff --git a/api/crates/domain/src/documents/doc_type.rs b/api/crates/domain/src/documents/doc_type.rs new file mode 100644 index 00000000..dc15b82e --- /dev/null +++ b/api/crates/domain/src/documents/doc_type.rs @@ -0,0 +1,87 @@ +use std::fmt; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; + +pub const DOC_TYPE_FOLDER: &str = "folder"; +pub const DOC_TYPE_DOCUMENT: &str = "document"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum DocumentType { + Folder, + Document, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidDocumentType; + +impl fmt::Display for InvalidDocumentType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid document type") + } +} + +impl std::error::Error for InvalidDocumentType {} + +impl DocumentType { + pub fn parse(doc_type: &str) -> Option { + match doc_type.trim() { + DOC_TYPE_FOLDER => Some(Self::Folder), + DOC_TYPE_DOCUMENT => Some(Self::Document), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::Folder => DOC_TYPE_FOLDER, + Self::Document => DOC_TYPE_DOCUMENT, + } + } + + pub const fn is_folder(self) -> bool { + matches!(self, Self::Folder) + } +} + +impl TryFrom<&str> for DocumentType { + type Error = InvalidDocumentType; + + fn try_from(value: &str) -> Result { + value.parse() + } +} + +impl FromStr for DocumentType { + type Err = InvalidDocumentType; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidDocumentType) + } +} + +impl fmt::Display for DocumentType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_and_formats() { + assert_eq!(DocumentType::parse("folder"), Some(DocumentType::Folder)); + assert_eq!( + DocumentType::parse(" document "), + Some(DocumentType::Document) + ); + assert_eq!(DocumentType::parse("nope"), None); + assert_eq!(DocumentType::Folder.as_str(), DOC_TYPE_FOLDER); + assert_eq!(DocumentType::Document.to_string(), DOC_TYPE_DOCUMENT); + assert!(DocumentType::Folder.is_folder()); + assert!(!DocumentType::Document.is_folder()); + } +} diff --git a/api/crates/domain/src/documents/document.rs b/api/crates/domain/src/documents/document.rs new file mode 100644 index 00000000..6faea782 --- /dev/null +++ b/api/crates/domain/src/documents/document.rs @@ -0,0 +1,162 @@ +use uuid::Uuid; + +use crate::documents::doc_type::DocumentType; +use crate::documents::path::{DesiredPath, Slug}; +use crate::documents::title::Title; + +#[derive(Debug, Clone)] +pub struct Document { + id: Uuid, + owner_user_id: Option, + workspace_id: Uuid, + title: Title, + parent_id: Option, + doc_type: DocumentType, + created_at: chrono::DateTime, + updated_at: chrono::DateTime, + created_by_plugin: Option, + slug: Slug, + desired_path: DesiredPath, + path: Option, + created_by: Option, + archived_at: Option>, + archived_by: Option, + archived_parent_id: Option, +} + +impl Document { + #[allow(clippy::too_many_arguments)] + pub fn rehydrate( + id: Uuid, + owner_user_id: Option, + workspace_id: Uuid, + title: Title, + parent_id: Option, + doc_type: DocumentType, + created_at: chrono::DateTime, + updated_at: chrono::DateTime, + created_by_plugin: Option, + slug: Slug, + desired_path: DesiredPath, + path: Option, + created_by: Option, + archived_at: Option>, + archived_by: Option, + archived_parent_id: Option, + ) -> Self { + Self { + id, + owner_user_id, + workspace_id, + title, + parent_id, + doc_type, + created_at, + updated_at, + created_by_plugin, + slug, + desired_path, + path, + created_by, + archived_at, + archived_by, + archived_parent_id, + } + } + + pub fn id(&self) -> Uuid { + self.id + } + + pub fn owner_user_id(&self) -> Option { + self.owner_user_id + } + + pub fn workspace_id(&self) -> Uuid { + self.workspace_id + } + + pub fn title(&self) -> &Title { + &self.title + } + + pub fn parent_id(&self) -> Option { + self.parent_id + } + + pub fn doc_type(&self) -> DocumentType { + self.doc_type + } + + pub fn created_at(&self) -> chrono::DateTime { + self.created_at + } + + pub fn updated_at(&self) -> chrono::DateTime { + self.updated_at + } + + pub fn created_by_plugin(&self) -> Option<&str> { + self.created_by_plugin.as_deref() + } + + pub fn slug(&self) -> &Slug { + &self.slug + } + + pub fn desired_path(&self) -> &DesiredPath { + &self.desired_path + } + + pub fn path(&self) -> Option<&str> { + self.path.as_deref() + } + + pub fn created_by(&self) -> Option { + self.created_by + } + + pub fn archived_at(&self) -> Option> { + self.archived_at + } + + pub fn archived_by(&self) -> Option { + self.archived_by + } + + pub fn archived_parent_id(&self) -> Option { + self.archived_parent_id + } +} + +#[derive(Debug, Clone)] +pub struct SearchHit { + pub id: Uuid, + pub title: Title, + pub doc_type: DocumentType, + pub path: Option, + pub updated_at: chrono::DateTime, +} + +#[derive(Debug, Clone)] +pub struct BacklinkInfo { + pub document_id: Uuid, + pub title: Title, + pub document_type: DocumentType, + pub file_path: Option, + pub link_type: String, + pub link_text: Option, + pub link_count: i64, +} + +#[derive(Debug, Clone)] +pub struct OutgoingLink { + pub document_id: Uuid, + pub title: Title, + pub document_type: DocumentType, + pub file_path: Option, + pub link_type: String, + pub link_text: Option, + pub position_start: Option, + pub position_end: Option, +} diff --git a/api/crates/domain/src/documents/hierarchy.rs b/api/crates/domain/src/documents/hierarchy.rs new file mode 100644 index 00000000..fed1aa00 --- /dev/null +++ b/api/crates/domain/src/documents/hierarchy.rs @@ -0,0 +1,55 @@ +use chrono::{DateTime, Utc}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ParentValidationError { + NotFound, + Archived, +} + +#[derive(Debug, Clone)] +pub struct ParentMeta { + pub archived_at: Option>, +} + +pub fn ensure_active_parent(meta: Option) -> Result<(), ParentValidationError> { + match meta { + Some(pm) => { + if pm.archived_at.is_some() { + Err(ParentValidationError::Archived) + } else { + Ok(()) + } + } + None => Err(ParentValidationError::NotFound), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn active_parent_ok() { + let pm = ParentMeta { archived_at: None }; + assert_eq!(ensure_active_parent(Some(pm)), Ok(())); + } + + #[test] + fn archived_parent_rejected() { + let pm = ParentMeta { + archived_at: Some(Utc::now()), + }; + assert_eq!( + ensure_active_parent(Some(pm)), + Err(ParentValidationError::Archived) + ); + } + + #[test] + fn missing_parent_rejected() { + assert_eq!( + ensure_active_parent(None), + Err(ParentValidationError::NotFound) + ); + } +} diff --git a/api/crates/domain/src/documents/meta.rs b/api/crates/domain/src/documents/meta.rs new file mode 100644 index 00000000..47ec75ed --- /dev/null +++ b/api/crates/domain/src/documents/meta.rs @@ -0,0 +1,17 @@ +use chrono::{DateTime, Utc}; +use uuid::Uuid; + +use crate::documents::doc_type::DocumentType; +use crate::documents::path::{DesiredPath, Slug}; +use crate::documents::title::Title; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DocMeta { + pub workspace_id: Uuid, + pub doc_type: DocumentType, + pub path: Option, + pub slug: Slug, + pub desired_path: DesiredPath, + pub title: Title, + pub archived_at: Option>, +} diff --git a/api/crates/domain/src/documents/mod.rs b/api/crates/domain/src/documents/mod.rs new file mode 100644 index 00000000..a2d007e2 --- /dev/null +++ b/api/crates/domain/src/documents/mod.rs @@ -0,0 +1,13 @@ +pub mod access_policy; +pub mod delete_plan; +pub mod doc_type; +pub mod document; +pub mod hierarchy; +pub mod meta; +pub mod path; +pub mod permissions; +pub mod policy; +pub mod public_policy; +pub mod share; +pub mod sharing_policy; +pub mod title; diff --git a/api/crates/domain/src/documents/path.rs b/api/crates/domain/src/documents/path.rs new file mode 100644 index 00000000..4e4f02da --- /dev/null +++ b/api/crates/domain/src/documents/path.rs @@ -0,0 +1,545 @@ +use std::fmt; +use std::path::{Component, Path, PathBuf}; +use uuid::Uuid; + +use crate::documents::doc_type::DocumentType; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct RepoPath(String); + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidRepoPath; + +impl fmt::Display for InvalidRepoPath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid repo path") + } +} + +impl std::error::Error for InvalidRepoPath {} + +impl RepoPath { + pub fn new(raw: impl Into) -> Result { + let raw = raw.into(); + let normalized = normalize_repo_path_impl(&raw).ok_or(InvalidRepoPath)?; + Ok(Self(normalized)) + } + + pub fn as_str(&self) -> &str { + &self.0 + } + + pub fn into_string(self) -> String { + self.0 + } +} + +impl fmt::Display for RepoPath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Slug(String); + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidSlug; + +impl fmt::Display for InvalidSlug { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid slug") + } +} + +impl std::error::Error for InvalidSlug {} + +impl Slug { + pub fn from_title(title: &str) -> Self { + Self(slugify_impl(title)) + } + + pub fn new(raw: impl Into) -> Result { + let raw = raw.into(); + let trimmed = raw.trim(); + if trimmed.is_empty() { + return Err(InvalidSlug); + } + if matches!(trimmed, "." | "..") { + return Err(InvalidSlug); + } + if trimmed.contains('/') || trimmed.contains('\\') { + return Err(InvalidSlug); + } + Ok(Self(trimmed.to_string())) + } + + pub fn with_suffix(&self, attempt: usize) -> Self { + Self(apply_slug_suffix(self.as_str(), attempt)) + } + + pub fn as_str(&self) -> &str { + &self.0 + } + + pub fn into_string(self) -> String { + self.0 + } +} + +impl fmt::Display for Slug { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct DesiredPath(String); + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidDesiredPath; + +impl fmt::Display for InvalidDesiredPath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid desired path") + } +} + +impl std::error::Error for InvalidDesiredPath {} + +impl DesiredPath { + pub fn root() -> Self { + Self(String::new()) + } + + pub fn new(raw: impl Into) -> Result { + let raw = raw.into(); + let normalized = normalize_repo_path_impl(&raw).ok_or(InvalidDesiredPath)?; + // Ensure the last segment is not empty (e.g. ".md") after stripping extension. + let last = normalized.rsplit('/').next().unwrap_or_default().trim(); + let base = last.strip_suffix(".md").unwrap_or(last).trim(); + if base.is_empty() || matches!(base, "." | "..") { + return Err(InvalidDesiredPath); + } + Ok(Self(normalized)) + } + + pub fn from_parent_and_slug( + parent_desired_path: Option<&DesiredPath>, + slug: &Slug, + doc_type: DocumentType, + ) -> Self { + let prefix = parent_desired_path + .map(|p| p.as_str()) + .filter(|p| !p.is_empty()) + .map(|p| { + if p.ends_with('/') { + p.to_string() + } else { + format!("{p}/") + } + }) + .unwrap_or_default(); + + let desired = if doc_type.is_folder() { + format!("{prefix}{}", slug.as_str()) + } else { + format!("{prefix}{}.md", slug.as_str()) + }; + Self(desired) + } + + pub fn as_str(&self) -> &str { + &self.0 + } + + pub fn into_string(self) -> String { + self.0 + } +} + +impl fmt::Display for DesiredPath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +/// Convert an absolute storage path (/workspace_id/... or workspace_id/...) to a repo-relative path. +pub fn repo_relative_from_storage(workspace_id: Uuid, storage_path: &str) -> Option { + let trimmed = storage_path.trim_start_matches('/'); + let owner_prefix = workspace_id.to_string(); + let remainder = trimmed + .strip_prefix(&owner_prefix) + .map(|rest| rest.trim_start_matches('/')) + .unwrap_or(trimmed); + if remainder.is_empty() { + None + } else { + RepoPath::new(remainder.to_string()).ok() + } +} + +/// Normalize a repo-relative path string. +/// +/// - Trims whitespace and leading slashes +/// - Rejects traversal (`..`) and absolute paths +/// - Collapses redundant separators and `.` segments +/// - Standardizes path separators to `/` +pub fn normalize_repo_path(repo_path: &str) -> Option { + normalize_repo_path_impl(repo_path) +} + +/// Convert a stored doc path (which may be absolute or prefixed with workspace id) to repo-relative. +pub fn workspace_repo_relative(workspace_id: Uuid, stored_path: Option<&str>) -> Option { + let stored = stored_path?.trim_start_matches('/'); + if stored.is_empty() { + return None; + } + let owner_prefix = workspace_id.to_string(); + let repo = if let Some(rest) = stored.strip_prefix(&owner_prefix) { + rest.trim_start_matches('/') + } else { + stored + }; + if repo.is_empty() { + None + } else { + RepoPath::new(repo.to_string()).ok() + } +} + +fn slugify_impl(title: &str) -> String { + let trimmed = title.trim(); + if trimmed.is_empty() { + return "untitled".to_string(); + } + + let mut slug = String::with_capacity(trimmed.len()); + let mut last_was_space = false; + for ch in trimmed.chars() { + if ch.is_control() { + continue; + } + if ch.is_whitespace() { + if !last_was_space { + slug.push(' '); + last_was_space = true; + } + continue; + } + last_was_space = false; + let safe = match ch { + '/' | '\\' | ':' | '*' | '?' | '\"' | '<' | '>' | '|' => '-', + _ => ch, + }; + slug.push(safe); + } + + let mut slug = slug + .trim_matches(|c: char| matches!(c, ' ' | '-')) + .to_string(); + if slug.is_empty() { + slug.push_str("untitled"); + } + if slug.len() > 100 { + slug.truncate(100); + } + slug +} + +pub fn apply_slug_suffix(base: &str, attempt: usize) -> String { + if attempt == 0 { + base.to_string() + } else { + format!("{base}-{}", attempt + 1) + } +} + +pub fn desired_path_candidates<'a>( + base_slug: &'a Slug, + parent_desired_path: Option<&'a DesiredPath>, + doc_type: DocumentType, + max_attempts: usize, +) -> impl Iterator + 'a { + (0..max_attempts).map(move |attempt| { + let slug = base_slug.with_suffix(attempt); + let desired_path = DesiredPath::from_parent_and_slug(parent_desired_path, &slug, doc_type); + (slug, desired_path) + }) +} + +pub fn parent_desired_path(desired_path: &DesiredPath) -> Option { + let mut parts = desired_path.as_str().rsplitn(2, '/'); + parts.next()?; // skip current file/folder + parts + .next() + .and_then(|p| DesiredPath::new(p.to_string()).ok()) +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidSlugFromDesiredPath; + +impl fmt::Display for InvalidSlugFromDesiredPath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid slug from desired path") + } +} + +impl std::error::Error for InvalidSlugFromDesiredPath {} + +pub fn slug_from_desired_path( + desired_path: &DesiredPath, +) -> Result { + let segment = desired_path + .as_str() + .rsplit('/') + .next() + .ok_or(InvalidSlugFromDesiredPath)?; + let trimmed = segment.trim(); + if trimmed.is_empty() { + return Err(InvalidSlugFromDesiredPath); + } + let slug = trimmed + .strip_suffix(".md") + .unwrap_or(trimmed) + .trim_matches('/'); + if slug.is_empty() { + return Err(InvalidSlugFromDesiredPath); + } + Slug::new(slug.to_string()).map_err(|_| InvalidSlugFromDesiredPath) +} + +fn normalize_repo_path_impl(repo_path: &str) -> Option { + let trimmed = repo_path.trim().trim_start_matches('/'); + if trimmed.is_empty() { + return None; + } + // Treat Windows separators as separators before component parsing. + let trimmed = trimmed.replace('\\', "/"); + let mut normalized = PathBuf::new(); + for component in Path::new(&trimmed).components() { + match component { + Component::Normal(part) => normalized.push(part), + Component::CurDir => continue, + _ => return None, + } + } + if normalized.as_os_str().is_empty() { + return None; + } + Some(normalized.to_string_lossy().replace('\\', "/")) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::documents::doc_type::DocumentType; + + #[test] + fn repo_path_new_normalizes_and_rejects_traversal() { + assert_eq!( + RepoPath::new("//docs//foo.md").unwrap().as_str(), + "docs/foo.md" + ); + assert!(RepoPath::new("../secret").is_err()); + assert!(RepoPath::new("foo/../bar").is_err()); + } + + #[test] + fn slug_new_rejects_empty() { + assert!(Slug::new("".to_string()).is_err()); + assert!(Slug::new(" ".to_string()).is_err()); + } + + #[test] + fn repo_relative_strips_workspace_prefix() { + let ws = Uuid::new_v4(); + let p = format!("{}/foo/bar.md", ws); + assert_eq!( + repo_relative_from_storage(ws, &p), + Some(RepoPath::new("foo/bar.md".to_string()).unwrap()) + ); + let with_slash = format!("/{}/foo/bar.md", ws); + assert_eq!( + repo_relative_from_storage(ws, &with_slash), + Some(RepoPath::new("foo/bar.md".to_string()).unwrap()) + ); + } + + #[test] + fn repo_relative_keeps_plain_paths() { + let ws = Uuid::new_v4(); + assert_eq!( + repo_relative_from_storage(ws, "foo/bar.md"), + Some(RepoPath::new("foo/bar.md".to_string()).unwrap()) + ); + } + + #[test] + fn repo_relative_empty_becomes_none() { + let ws = Uuid::new_v4(); + assert_eq!(repo_relative_from_storage(ws, "/"), None); + let p = format!("{}/", ws); + assert_eq!(repo_relative_from_storage(ws, &p), None); + } + + #[test] + fn workspace_repo_relative_strips_workspace_prefix_and_slash() { + let ws = Uuid::new_v4(); + let p = format!("{}/docs/readme.md", ws); + assert_eq!( + workspace_repo_relative(ws, Some(&p)), + Some(RepoPath::new("docs/readme.md".to_string()).unwrap()) + ); + let with_slash = format!("/{}/docs/readme.md", ws); + assert_eq!( + workspace_repo_relative(ws, Some(&with_slash)), + Some(RepoPath::new("docs/readme.md".to_string()).unwrap()) + ); + } + + #[test] + fn workspace_repo_relative_passes_through_repo_paths() { + let ws = Uuid::new_v4(); + assert_eq!( + workspace_repo_relative(ws, Some("docs/readme.md")), + Some(RepoPath::new("docs/readme.md".to_string()).unwrap()) + ); + } + + #[test] + fn workspace_repo_relative_none_or_empty() { + let ws = Uuid::new_v4(); + assert_eq!(workspace_repo_relative(ws, None), None); + assert_eq!(workspace_repo_relative(ws, Some("/")), None); + } + + #[test] + fn desired_path_candidates_yields_slug_and_desired_path() { + let base = Slug::new("foo".to_string()).unwrap(); + let parent = DesiredPath::new("bar").unwrap(); + let candidates: Vec<_> = + desired_path_candidates(&base, Some(&parent), DocumentType::Document, 2).collect(); + assert_eq!( + candidates, + vec![ + ( + Slug::new("foo".to_string()).unwrap(), + DesiredPath::new("bar/foo.md").unwrap() + ), + ( + Slug::new("foo-2".to_string()).unwrap(), + DesiredPath::new("bar/foo-2.md").unwrap() + ), + ] + ); + } + + #[test] + fn desired_path_candidates_for_folder_omits_md_extension() { + let base = Slug::new("foo".to_string()).unwrap(); + let parent = DesiredPath::new("bar/").unwrap(); + let candidates: Vec<_> = + desired_path_candidates(&base, Some(&parent), DocumentType::Folder, 2).collect(); + assert_eq!( + candidates, + vec![ + ( + Slug::new("foo".to_string()).unwrap(), + DesiredPath::new("bar/foo").unwrap() + ), + ( + Slug::new("foo-2".to_string()).unwrap(), + DesiredPath::new("bar/foo-2").unwrap() + ), + ] + ); + } + + #[test] + fn desired_path_candidates_with_zero_attempts_is_empty() { + let base = Slug::new("foo".to_string()).unwrap(); + let candidates: Vec<_> = + desired_path_candidates(&base, None, DocumentType::Document, 0).collect(); + assert!(candidates.is_empty()); + } + + #[test] + fn from_parent_and_slug_handles_root_parent() { + let slug = Slug::new("foo".to_string()).unwrap(); + let root = DesiredPath::root(); + assert_eq!( + DesiredPath::from_parent_and_slug(Some(&root), &slug, DocumentType::Document), + DesiredPath::new("foo.md").unwrap() + ); + } + + #[test] + fn parent_desired_path_extracts_parent_segment() { + assert_eq!( + parent_desired_path(&DesiredPath::new("a/b.md").unwrap()), + Some(DesiredPath::new("a").unwrap()) + ); + assert_eq!( + parent_desired_path(&DesiredPath::new("b.md").unwrap()), + None + ); + } + + #[test] + fn slug_from_desired_path_strips_md_extension_only_when_present() { + assert_eq!( + slug_from_desired_path(&DesiredPath::new("a/b.md").unwrap()) + .unwrap() + .as_str(), + "b" + ); + assert_eq!( + slug_from_desired_path(&DesiredPath::new("a/b").unwrap()) + .unwrap() + .as_str(), + "b" + ); + assert_eq!( + slug_from_desired_path(&DesiredPath::new("a/b.md.backup").unwrap()) + .unwrap() + .as_str(), + "b.md.backup" + ); + } + + #[test] + fn slug_from_desired_path_rejects_trailing_slash_and_empty_slug() { + let p = DesiredPath::new("a/b/").unwrap(); + assert_eq!(p.as_str(), "a/b"); + assert_eq!(slug_from_desired_path(&p).unwrap().as_str(), "b"); + assert!(DesiredPath::new(".md").is_err()); + } + + #[test] + fn normalize_repo_path_trims_and_standardizes() { + assert_eq!( + normalize_repo_path("//docs//foo.md"), + Some("docs/foo.md".to_string()) + ); + assert_eq!( + normalize_repo_path("notes/./bar.md"), + Some("notes/bar.md".to_string()) + ); + let user = Uuid::new_v4(); + let path = format!(r"{}\notes\foo.md", user); + assert_eq!( + normalize_repo_path(&path), + Some(format!("{}/notes/foo.md", user)) + ); + } + + #[test] + fn normalize_repo_path_rejects_traversal_and_empty() { + assert!(normalize_repo_path("../secret").is_none()); + assert!(normalize_repo_path("foo/../bar").is_none()); + assert!(normalize_repo_path(r"foo\..\bar").is_none()); + assert!(normalize_repo_path("").is_none()); + assert!(normalize_repo_path("/").is_none()); + } +} diff --git a/api/crates/domain/src/documents/permissions.rs b/api/crates/domain/src/documents/permissions.rs new file mode 100644 index 00000000..01b3f3a7 --- /dev/null +++ b/api/crates/domain/src/documents/permissions.rs @@ -0,0 +1,104 @@ +use crate::access::permissions::{ + PERM_DOC_ARCHIVE, PERM_DOC_CREATE, PERM_DOC_DELETE, PERM_DOC_EDIT, PERM_DOC_MOVE, + PERM_FOLDER_CREATE, PERM_FOLDER_DELETE, PermissionSet, +}; +use crate::documents::doc_type::DocumentType; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum DocumentPermissionError { + Forbidden, +} + +pub type Result = std::result::Result; + +pub fn ensure_can_create(permissions: &PermissionSet, doc_type: DocumentType) -> Result<()> { + ensure_folder_sensitive_permission( + permissions, + doc_type, + PERM_DOC_CREATE, + Some(PERM_FOLDER_CREATE), + ) +} + +pub fn ensure_can_delete(permissions: &PermissionSet, doc_type: DocumentType) -> Result<()> { + ensure_folder_sensitive_permission( + permissions, + doc_type, + PERM_DOC_DELETE, + Some(PERM_FOLDER_DELETE), + ) +} + +pub fn ensure_can_edit(permissions: &PermissionSet, doc_type: DocumentType) -> Result<()> { + ensure_folder_sensitive_permission(permissions, doc_type, PERM_DOC_EDIT, None) +} + +pub fn ensure_can_move(permissions: &PermissionSet, doc_type: DocumentType) -> Result<()> { + ensure_folder_sensitive_permission(permissions, doc_type, PERM_DOC_MOVE, None) +} + +pub fn ensure_can_archive(permissions: &PermissionSet, doc_type: DocumentType) -> Result<()> { + ensure_folder_sensitive_permission(permissions, doc_type, PERM_DOC_ARCHIVE, None) +} + +fn ensure_folder_sensitive_permission( + permissions: &PermissionSet, + doc_type: DocumentType, + doc_permission: &'static str, + folder_permission: Option<&'static str>, +) -> Result<()> { + let required = if doc_type.is_folder() { + folder_permission.unwrap_or(doc_permission) + } else { + doc_permission + }; + if permissions.allows(required) { + Ok(()) + } else { + Err(DocumentPermissionError::Forbidden) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::documents::doc_type::DocumentType; + + #[test] + fn folder_creation_requires_folder_permission() { + let perms = PermissionSet::from_slice(&[PERM_DOC_CREATE]); + assert_eq!( + ensure_can_create(&perms, DocumentType::Folder), + Err(DocumentPermissionError::Forbidden) + ); + + let perms = PermissionSet::from_slice(&[PERM_FOLDER_CREATE]); + assert_eq!(ensure_can_create(&perms, DocumentType::Folder), Ok(())); + } + + #[test] + fn document_creation_requires_doc_permission() { + let perms = PermissionSet::from_slice(&[PERM_DOC_CREATE]); + assert_eq!(ensure_can_create(&perms, DocumentType::Document), Ok(())); + } + + #[test] + fn folder_delete_uses_folder_permission_when_available() { + let perms = PermissionSet::from_slice(&[PERM_DOC_DELETE]); + assert_eq!( + ensure_can_delete(&perms, DocumentType::Folder), + Err(DocumentPermissionError::Forbidden) + ); + + let perms = PermissionSet::from_slice(&[PERM_FOLDER_DELETE]); + assert_eq!(ensure_can_delete(&perms, DocumentType::Folder), Ok(())); + } + + #[test] + fn edit_move_archive_use_doc_permissions_for_all_types() { + let perms = PermissionSet::from_slice(&[PERM_DOC_EDIT, PERM_DOC_MOVE, PERM_DOC_ARCHIVE]); + assert_eq!(ensure_can_edit(&perms, DocumentType::Folder), Ok(())); + assert_eq!(ensure_can_move(&perms, DocumentType::Folder), Ok(())); + assert_eq!(ensure_can_archive(&perms, DocumentType::Folder), Ok(())); + } +} diff --git a/api/crates/domain/src/documents/policy.rs b/api/crates/domain/src/documents/policy.rs new file mode 100644 index 00000000..3fdafce8 --- /dev/null +++ b/api/crates/domain/src/documents/policy.rs @@ -0,0 +1,176 @@ +use chrono::{DateTime, Utc}; + +use crate::access::permissions::PermissionSet; +use crate::documents::doc_type::DocumentType; +use crate::documents::permissions; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum DocumentPolicyError { + Forbidden, + Archived, + NotArchived, + FolderNotSupported, +} + +#[derive(Debug, Clone, Copy)] +pub struct DocumentState { + pub doc_type: DocumentType, + pub archived: bool, +} + +impl DocumentState { + pub fn new(doc_type: DocumentType, archived_at: Option>) -> Self { + Self { + doc_type, + archived: archived_at.is_some(), + } + } +} + +pub fn ensure_duplicate_allowed(state: DocumentState) -> Result<(), DocumentPolicyError> { + if state.doc_type.is_folder() { + return Err(DocumentPolicyError::FolderNotSupported); + } + Ok(()) +} + +pub fn ensure_editable( + state: DocumentState, + permissions: &PermissionSet, +) -> Result<(), DocumentPolicyError> { + ensure_active(state)?; + permissions::ensure_can_edit(permissions, state.doc_type) + .map_err(|_| DocumentPolicyError::Forbidden) +} + +pub fn ensure_movable( + state: DocumentState, + permissions: &PermissionSet, +) -> Result<(), DocumentPolicyError> { + ensure_active(state)?; + permissions::ensure_can_move(permissions, state.doc_type) + .map_err(|_| DocumentPolicyError::Forbidden) +} + +pub fn ensure_archivable( + state: DocumentState, + permissions: &PermissionSet, +) -> Result<(), DocumentPolicyError> { + if state.archived { + return Err(DocumentPolicyError::Archived); + } + permissions::ensure_can_archive(permissions, state.doc_type) + .map_err(|_| DocumentPolicyError::Forbidden) +} + +pub fn ensure_unarchivable( + state: DocumentState, + permissions: &PermissionSet, +) -> Result<(), DocumentPolicyError> { + if !state.archived { + return Err(DocumentPolicyError::NotArchived); + } + permissions::ensure_can_archive(permissions, state.doc_type) + .map_err(|_| DocumentPolicyError::Forbidden) +} + +pub fn ensure_active(state: DocumentState) -> Result<(), DocumentPolicyError> { + if state.archived { + Err(DocumentPolicyError::Archived) + } else { + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::access::permissions::{PERM_DOC_ARCHIVE, PERM_DOC_EDIT, PERM_DOC_MOVE}; + use crate::documents::doc_type::DocumentType; + + #[test] + fn duplicate_folder_is_not_allowed() { + let state = DocumentState::new(DocumentType::Folder, None); + assert_eq!( + ensure_duplicate_allowed(state), + Err(DocumentPolicyError::FolderNotSupported) + ); + let state = DocumentState::new(DocumentType::Document, None); + assert_eq!(ensure_duplicate_allowed(state), Ok(())); + } + + #[test] + fn editable_requires_active_and_perm() { + let perms = PermissionSet::from_slice(&[PERM_DOC_EDIT]); + let active = DocumentState::new(DocumentType::Document, None); + assert_eq!(ensure_editable(active, &perms), Ok(())); + + let archived = DocumentState::new(DocumentType::Document, Some(Utc::now())); + assert_eq!( + ensure_editable(archived, &perms), + Err(DocumentPolicyError::Archived) + ); + + let missing_perm = PermissionSet::default(); + assert_eq!( + ensure_editable(active, &missing_perm), + Err(DocumentPolicyError::Forbidden) + ); + } + + #[test] + fn movable_requires_active_and_perm() { + let perms = PermissionSet::from_slice(&[PERM_DOC_MOVE]); + let active = DocumentState::new(DocumentType::Document, None); + assert_eq!(ensure_movable(active, &perms), Ok(())); + + let archived = DocumentState::new(DocumentType::Document, Some(Utc::now())); + assert_eq!( + ensure_movable(archived, &perms), + Err(DocumentPolicyError::Archived) + ); + + let missing_perm = PermissionSet::default(); + assert_eq!( + ensure_movable(active, &missing_perm), + Err(DocumentPolicyError::Forbidden) + ); + } + + #[test] + fn archivable_and_unarchivable_are_guarded_by_state_and_perm() { + let perms = PermissionSet::from_slice(&[PERM_DOC_ARCHIVE]); + let active = DocumentState::new(DocumentType::Document, None); + let archived = DocumentState::new(DocumentType::Document, Some(Utc::now())); + + assert_eq!(ensure_archivable(active, &perms), Ok(())); + assert_eq!( + ensure_archivable(archived, &perms), + Err(DocumentPolicyError::Archived) + ); + + assert_eq!( + ensure_unarchivable(active, &perms), + Err(DocumentPolicyError::NotArchived) + ); + assert_eq!(ensure_unarchivable(archived, &perms), Ok(())); + + let missing_perm = PermissionSet::default(); + assert_eq!( + ensure_archivable(active, &missing_perm), + Err(DocumentPolicyError::Forbidden) + ); + assert_eq!( + ensure_unarchivable(archived, &missing_perm), + Err(DocumentPolicyError::Forbidden) + ); + } + + #[test] + fn ensure_active_rejects_archived() { + let active = DocumentState::new(DocumentType::Document, None); + assert_eq!(ensure_active(active), Ok(())); + let archived = DocumentState::new(DocumentType::Document, Some(Utc::now())); + assert_eq!(ensure_active(archived), Err(DocumentPolicyError::Archived)); + } +} diff --git a/api/crates/domain/src/documents/public_policy.rs b/api/crates/domain/src/documents/public_policy.rs new file mode 100644 index 00000000..249e2b18 --- /dev/null +++ b/api/crates/domain/src/documents/public_policy.rs @@ -0,0 +1,52 @@ +use crate::access::permissions::{PERM_PUBLIC_PUBLISH, PERM_PUBLIC_UNPUBLISH, PermissionSet}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PublicPolicyError { + Forbidden, +} + +pub fn ensure_public_publish_allowed(permissions: &PermissionSet) -> Result<(), PublicPolicyError> { + if permissions.allows(PERM_PUBLIC_PUBLISH) { + Ok(()) + } else { + Err(PublicPolicyError::Forbidden) + } +} + +pub fn ensure_public_unpublish_allowed( + permissions: &PermissionSet, +) -> Result<(), PublicPolicyError> { + if permissions.allows(PERM_PUBLIC_UNPUBLISH) { + Ok(()) + } else { + Err(PublicPolicyError::Forbidden) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::access::permissions::{PERM_PUBLIC_PUBLISH, PERM_PUBLIC_UNPUBLISH}; + + #[test] + fn publish_requires_permission() { + let perms = PermissionSet::default(); + assert_eq!( + ensure_public_publish_allowed(&perms), + Err(PublicPolicyError::Forbidden) + ); + let perms = PermissionSet::from_slice(&[PERM_PUBLIC_PUBLISH]); + assert_eq!(ensure_public_publish_allowed(&perms), Ok(())); + } + + #[test] + fn unpublish_requires_permission() { + let perms = PermissionSet::default(); + assert_eq!( + ensure_public_unpublish_allowed(&perms), + Err(PublicPolicyError::Forbidden) + ); + let perms = PermissionSet::from_slice(&[PERM_PUBLIC_UNPUBLISH]); + assert_eq!(ensure_public_unpublish_allowed(&perms), Ok(())); + } +} diff --git a/api/crates/domain/src/documents/share.rs b/api/crates/domain/src/documents/share.rs new file mode 100644 index 00000000..acea5493 --- /dev/null +++ b/api/crates/domain/src/documents/share.rs @@ -0,0 +1,147 @@ +use chrono::{DateTime, Utc}; +use std::fmt; +use std::str::FromStr; +use uuid::Uuid; + +use crate::documents::doc_type::DocumentType; + +pub const SHARE_PERMISSION_VIEW: &str = "view"; +pub const SHARE_PERMISSION_EDIT: &str = "edit"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SharePermission { + View, + Edit, +} + +impl SharePermission { + pub fn parse(permission: &str) -> Option { + match normalize_permission(permission)? { + SHARE_PERMISSION_VIEW => Some(SharePermission::View), + SHARE_PERMISSION_EDIT => Some(SharePermission::Edit), + _ => None, + } + } + + pub fn as_str(self) -> &'static str { + match self { + SharePermission::View => SHARE_PERMISSION_VIEW, + SharePermission::Edit => SHARE_PERMISSION_EDIT, + } + } + + pub fn allows_edit(self) -> bool { + matches!(self, SharePermission::Edit) + } +} + +impl fmt::Display for SharePermission { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidSharePermission; + +impl fmt::Display for InvalidSharePermission { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid share permission") + } +} + +impl std::error::Error for InvalidSharePermission {} + +impl FromStr for SharePermission { + type Err = InvalidSharePermission; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidSharePermission) + } +} + +#[derive(Debug, Clone)] +pub struct ShareContext { + pub share_id: Uuid, + pub permission: SharePermission, + pub expires_at: Option>, + pub shared_id: Uuid, + pub shared_type: DocumentType, + pub workspace_id: Uuid, +} + +pub fn normalize_permission(permission: &str) -> Option<&'static str> { + match permission.trim() { + SHARE_PERMISSION_VIEW => Some(SHARE_PERMISSION_VIEW), + SHARE_PERMISSION_EDIT => Some(SHARE_PERMISSION_EDIT), + _ => None, + } +} + +pub fn is_edit_permission(permission: &str) -> bool { + permission.trim() == SHARE_PERMISSION_EDIT +} + +pub fn is_expired(expires_at: Option<&DateTime>, now: DateTime) -> bool { + matches!(expires_at, Some(exp) if *exp <= now) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn normalize_permission_accepts_view_and_edit() { + assert_eq!(normalize_permission("view"), Some(SHARE_PERMISSION_VIEW)); + assert_eq!(normalize_permission("edit"), Some(SHARE_PERMISSION_EDIT)); + assert_eq!(normalize_permission(" view "), Some(SHARE_PERMISSION_VIEW)); + } + + #[test] + fn normalize_permission_rejects_unknown() { + assert_eq!(normalize_permission(""), None); + assert_eq!(normalize_permission("owner"), None); + } + + #[test] + fn share_permission_parses_and_formats() { + assert_eq!(SharePermission::parse("view"), Some(SharePermission::View)); + assert_eq!(SharePermission::parse("edit"), Some(SharePermission::Edit)); + assert_eq!(SharePermission::parse("nope"), None); + assert_eq!(SharePermission::Edit.as_str(), SHARE_PERMISSION_EDIT); + assert!(SharePermission::Edit.allows_edit()); + assert!(!SharePermission::View.allows_edit()); + } + + #[test] + fn none_is_not_expired() { + assert!(!is_expired(None, Utc::now())); + } + + #[test] + fn past_is_expired() { + let now = Utc::now(); + let past = now - chrono::Duration::seconds(1); + assert!(is_expired(Some(&past), now)); + } + + #[test] + fn future_is_not_expired() { + let now = Utc::now(); + let future = now + chrono::Duration::seconds(1); + assert!(!is_expired(Some(&future), now)); + } + + #[test] + fn exactly_now_is_expired() { + let now = Utc::now(); + assert!(is_expired(Some(&now), now)); + } + + #[test] + fn is_edit_permission_trims_and_is_strict() { + assert!(is_edit_permission(" edit ")); + assert!(!is_edit_permission("view")); + assert!(!is_edit_permission("EDIT")); + } +} diff --git a/api/crates/domain/src/documents/sharing_policy.rs b/api/crates/domain/src/documents/sharing_policy.rs new file mode 100644 index 00000000..d41bc5b2 --- /dev/null +++ b/api/crates/domain/src/documents/sharing_policy.rs @@ -0,0 +1,71 @@ +use crate::access::permissions::{ + PERM_DOC_VIEW, PERM_SHARE_CREATE, PERM_SHARE_DELETE, PermissionSet, +}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SharingPolicyError { + Forbidden, +} + +pub fn ensure_share_create_allowed(permissions: &PermissionSet) -> Result<(), SharingPolicyError> { + if permissions.allows(PERM_SHARE_CREATE) { + Ok(()) + } else { + Err(SharingPolicyError::Forbidden) + } +} + +pub fn ensure_share_delete_allowed(permissions: &PermissionSet) -> Result<(), SharingPolicyError> { + if permissions.allows(PERM_SHARE_DELETE) { + Ok(()) + } else { + Err(SharingPolicyError::Forbidden) + } +} + +pub fn ensure_document_view_allowed(permissions: &PermissionSet) -> Result<(), SharingPolicyError> { + if permissions.allows(PERM_DOC_VIEW) { + Ok(()) + } else { + Err(SharingPolicyError::Forbidden) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::access::permissions::{PERM_DOC_VIEW, PERM_SHARE_CREATE, PERM_SHARE_DELETE}; + + #[test] + fn share_create_requires_permission() { + let perms = PermissionSet::default(); + assert_eq!( + ensure_share_create_allowed(&perms), + Err(SharingPolicyError::Forbidden) + ); + let perms = PermissionSet::from_slice(&[PERM_SHARE_CREATE]); + assert_eq!(ensure_share_create_allowed(&perms), Ok(())); + } + + #[test] + fn share_delete_requires_permission() { + let perms = PermissionSet::default(); + assert_eq!( + ensure_share_delete_allowed(&perms), + Err(SharingPolicyError::Forbidden) + ); + let perms = PermissionSet::from_slice(&[PERM_SHARE_DELETE]); + assert_eq!(ensure_share_delete_allowed(&perms), Ok(())); + } + + #[test] + fn document_view_requires_permission() { + let perms = PermissionSet::default(); + assert_eq!( + ensure_document_view_allowed(&perms), + Err(SharingPolicyError::Forbidden) + ); + let perms = PermissionSet::from_slice(&[PERM_DOC_VIEW]); + assert_eq!(ensure_document_view_allowed(&perms), Ok(())); + } +} diff --git a/api/crates/domain/src/documents/title.rs b/api/crates/domain/src/documents/title.rs new file mode 100644 index 00000000..57880527 --- /dev/null +++ b/api/crates/domain/src/documents/title.rs @@ -0,0 +1,88 @@ +use std::fmt; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Title(String); + +impl Title { + pub fn new(raw: impl Into) -> Self { + Self(raw.into()) + } + + pub fn from_user_input(raw: &str) -> Self { + let trimmed = raw.trim(); + if trimmed.is_empty() { + Self("Untitled".to_string()) + } else { + Self(trimmed.to_string()) + } + } + + pub fn as_str(&self) -> &str { + &self.0 + } + + pub fn into_string(self) -> String { + self.0 + } +} + +impl fmt::Display for Title { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +pub fn duplicate_title(source_title: &Title, override_title: Option) -> Title { + if let Some(custom) = override_title { + let trimmed = custom.trim(); + if !trimmed.is_empty() { + return Title::new(trimmed.to_string()); + } + } + let base = source_title.as_str().trim(); + let fallback = if base.is_empty() { "Untitled" } else { base }; + Title::new(format!("{fallback} (Copy)")) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn from_user_input_trims_and_falls_back() { + assert_eq!(Title::from_user_input(" Hello ").as_str(), "Hello"); + assert_eq!(Title::from_user_input(" ").as_str(), "Untitled"); + } + + #[test] + fn uses_override_when_present() { + assert_eq!( + duplicate_title(&Title::new("src"), Some("Custom".into())).as_str(), + "Custom" + ); + } + + #[test] + fn falls_back_to_source_with_suffix() { + assert_eq!( + duplicate_title(&Title::new("Doc"), None).as_str(), + "Doc (Copy)" + ); + } + + #[test] + fn empty_source_uses_untitled() { + assert_eq!( + duplicate_title(&Title::new(""), None).as_str(), + "Untitled (Copy)" + ); + } + + #[test] + fn empty_override_ignored() { + assert_eq!( + duplicate_title(&Title::new("Doc"), Some(" ".into())).as_str(), + "Doc (Copy)" + ); + } +} diff --git a/api/crates/domain/src/git/auth.rs b/api/crates/domain/src/git/auth.rs new file mode 100644 index 00000000..f15ec27d --- /dev/null +++ b/api/crates/domain/src/git/auth.rs @@ -0,0 +1,82 @@ +use std::fmt; +use std::str::FromStr; + +pub const GIT_AUTH_TYPE_TOKEN: &str = "token"; +pub const GIT_AUTH_TYPE_SSH: &str = "ssh"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum GitAuthType { + Token, + Ssh, +} + +impl GitAuthType { + pub fn parse(value: &str) -> Option { + match value.trim() { + GIT_AUTH_TYPE_TOKEN => Some(Self::Token), + GIT_AUTH_TYPE_SSH => Some(Self::Ssh), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::Token => GIT_AUTH_TYPE_TOKEN, + Self::Ssh => GIT_AUTH_TYPE_SSH, + } + } + + pub fn validate_repository_url(self, repository_url: &str) -> bool { + match self { + Self::Token => repository_url.starts_with("https://"), + Self::Ssh => true, + } + } +} + +impl fmt::Display for GitAuthType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidGitAuthType; + +impl fmt::Display for InvalidGitAuthType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid git auth type") + } +} + +impl std::error::Error for InvalidGitAuthType {} + +impl FromStr for GitAuthType { + type Err = InvalidGitAuthType; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidGitAuthType) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_and_formats() { + assert_eq!(GitAuthType::parse(" token "), Some(GitAuthType::Token)); + assert_eq!(GitAuthType::parse("ssh"), Some(GitAuthType::Ssh)); + assert_eq!(GitAuthType::parse("nope"), None); + assert_eq!(GitAuthType::Token.as_str(), GIT_AUTH_TYPE_TOKEN); + assert_eq!(GitAuthType::Ssh.to_string(), GIT_AUTH_TYPE_SSH); + } + + #[test] + fn token_requires_https_url() { + assert!(GitAuthType::Token.validate_repository_url("https://example.com/repo.git")); + assert!(!GitAuthType::Token.validate_repository_url("http://example.com/repo.git")); + assert!(GitAuthType::Ssh.validate_repository_url("ssh://example.com/repo.git")); + assert!(GitAuthType::Ssh.validate_repository_url("git@example.com:repo.git")); + } +} diff --git a/api/crates/domain/src/git/mod.rs b/api/crates/domain/src/git/mod.rs new file mode 100644 index 00000000..f1c614ea --- /dev/null +++ b/api/crates/domain/src/git/mod.rs @@ -0,0 +1,6 @@ +// Git sync/pull/ignore/rebuild domain lives here. + +pub mod auth; +pub mod policy; +pub mod pull_session; +pub mod sync_log; diff --git a/api/crates/domain/src/git/policy.rs b/api/crates/domain/src/git/policy.rs new file mode 100644 index 00000000..80700d5a --- /dev/null +++ b/api/crates/domain/src/git/policy.rs @@ -0,0 +1,31 @@ +use crate::access::permissions::{PERM_GIT_SYNC, PermissionSet}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum GitPolicyError { + Forbidden, +} + +pub fn ensure_git_sync_allowed(permissions: &PermissionSet) -> Result<(), GitPolicyError> { + if permissions.allows(PERM_GIT_SYNC) { + Ok(()) + } else { + Err(GitPolicyError::Forbidden) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::access::permissions::PERM_GIT_SYNC; + + #[test] + fn git_sync_requires_permission() { + let perms = PermissionSet::default(); + assert_eq!( + ensure_git_sync_allowed(&perms), + Err(GitPolicyError::Forbidden) + ); + let perms = PermissionSet::from_slice(&[PERM_GIT_SYNC]); + assert_eq!(ensure_git_sync_allowed(&perms), Ok(())); + } +} diff --git a/api/crates/domain/src/git/pull_session.rs b/api/crates/domain/src/git/pull_session.rs new file mode 100644 index 00000000..f06e3a0d --- /dev/null +++ b/api/crates/domain/src/git/pull_session.rs @@ -0,0 +1,110 @@ +use std::fmt; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; + +pub const GIT_PULL_STATUS_PENDING: &str = "pending"; +pub const GIT_PULL_STATUS_RESOLVING: &str = "resolving"; +pub const GIT_PULL_STATUS_MERGED: &str = "merged"; +pub const GIT_PULL_STATUS_STALE: &str = "stale"; +pub const GIT_PULL_STATUS_ERROR: &str = "error"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum GitPullSessionStatus { + Pending, + Resolving, + Merged, + Stale, + Error, +} + +impl GitPullSessionStatus { + pub fn parse(value: &str) -> Option { + match value.trim() { + GIT_PULL_STATUS_PENDING => Some(Self::Pending), + GIT_PULL_STATUS_RESOLVING => Some(Self::Resolving), + GIT_PULL_STATUS_MERGED => Some(Self::Merged), + GIT_PULL_STATUS_STALE => Some(Self::Stale), + GIT_PULL_STATUS_ERROR => Some(Self::Error), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::Pending => GIT_PULL_STATUS_PENDING, + Self::Resolving => GIT_PULL_STATUS_RESOLVING, + Self::Merged => GIT_PULL_STATUS_MERGED, + Self::Stale => GIT_PULL_STATUS_STALE, + Self::Error => GIT_PULL_STATUS_ERROR, + } + } + + pub const fn is_in_progress(self) -> bool { + matches!(self, Self::Pending | Self::Resolving) + } +} + +impl fmt::Display for GitPullSessionStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidGitPullSessionStatus; + +impl fmt::Display for InvalidGitPullSessionStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid git pull session status") + } +} + +impl std::error::Error for InvalidGitPullSessionStatus {} + +impl FromStr for GitPullSessionStatus { + type Err = InvalidGitPullSessionStatus; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidGitPullSessionStatus) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_formats_and_in_progress() { + assert_eq!( + GitPullSessionStatus::parse(" pending "), + Some(GitPullSessionStatus::Pending) + ); + assert_eq!( + GitPullSessionStatus::parse("resolving"), + Some(GitPullSessionStatus::Resolving) + ); + assert_eq!( + GitPullSessionStatus::parse("merged"), + Some(GitPullSessionStatus::Merged) + ); + assert_eq!( + GitPullSessionStatus::parse("stale"), + Some(GitPullSessionStatus::Stale) + ); + assert_eq!( + GitPullSessionStatus::parse("error"), + Some(GitPullSessionStatus::Error) + ); + assert_eq!(GitPullSessionStatus::parse("nope"), None); + + assert!(GitPullSessionStatus::Pending.is_in_progress()); + assert!(GitPullSessionStatus::Resolving.is_in_progress()); + assert!(!GitPullSessionStatus::Merged.is_in_progress()); + assert_eq!( + GitPullSessionStatus::Merged.as_str(), + GIT_PULL_STATUS_MERGED + ); + } +} diff --git a/api/crates/domain/src/git/sync_log.rs b/api/crates/domain/src/git/sync_log.rs new file mode 100644 index 00000000..53cd82a2 --- /dev/null +++ b/api/crates/domain/src/git/sync_log.rs @@ -0,0 +1,145 @@ +use std::fmt; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; + +pub const GIT_SYNC_OPERATION_PUSH: &str = "push"; +pub const GIT_SYNC_OPERATION_PULL: &str = "pull"; +pub const GIT_SYNC_OPERATION_COMMIT: &str = "commit"; +pub const GIT_SYNC_OPERATION_INIT: &str = "init"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum GitSyncOperation { + Push, + Pull, + Commit, + Init, +} + +impl GitSyncOperation { + pub fn parse(value: &str) -> Option { + match value.trim() { + GIT_SYNC_OPERATION_PUSH => Some(Self::Push), + GIT_SYNC_OPERATION_PULL => Some(Self::Pull), + GIT_SYNC_OPERATION_COMMIT => Some(Self::Commit), + GIT_SYNC_OPERATION_INIT => Some(Self::Init), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::Push => GIT_SYNC_OPERATION_PUSH, + Self::Pull => GIT_SYNC_OPERATION_PULL, + Self::Commit => GIT_SYNC_OPERATION_COMMIT, + Self::Init => GIT_SYNC_OPERATION_INIT, + } + } +} + +impl fmt::Display for GitSyncOperation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidGitSyncOperation; + +impl fmt::Display for InvalidGitSyncOperation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid git sync operation") + } +} + +impl std::error::Error for InvalidGitSyncOperation {} + +impl FromStr for GitSyncOperation { + type Err = InvalidGitSyncOperation; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidGitSyncOperation) + } +} + +pub const GIT_SYNC_STATUS_SUCCESS: &str = "success"; +pub const GIT_SYNC_STATUS_ERROR: &str = "error"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum GitSyncStatus { + Success, + Error, +} + +impl GitSyncStatus { + pub fn parse(value: &str) -> Option { + match value.trim() { + GIT_SYNC_STATUS_SUCCESS => Some(Self::Success), + GIT_SYNC_STATUS_ERROR => Some(Self::Error), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::Success => GIT_SYNC_STATUS_SUCCESS, + Self::Error => GIT_SYNC_STATUS_ERROR, + } + } +} + +impl fmt::Display for GitSyncStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidGitSyncStatus; + +impl fmt::Display for InvalidGitSyncStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid git sync status") + } +} + +impl std::error::Error for InvalidGitSyncStatus {} + +impl FromStr for GitSyncStatus { + type Err = InvalidGitSyncStatus; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidGitSyncStatus) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_and_formats() { + assert_eq!( + GitSyncOperation::parse(" push "), + Some(GitSyncOperation::Push) + ); + assert_eq!( + GitSyncOperation::parse("commit"), + Some(GitSyncOperation::Commit) + ); + assert_eq!(GitSyncOperation::parse("nope"), None); + assert_eq!(GitSyncOperation::Init.as_str(), "init"); + assert_eq!(GitSyncOperation::Pull.to_string(), "pull"); + + assert_eq!( + GitSyncStatus::parse(" success "), + Some(GitSyncStatus::Success) + ); + assert_eq!(GitSyncStatus::parse("error"), Some(GitSyncStatus::Error)); + assert_eq!(GitSyncStatus::parse("nope"), None); + assert_eq!(GitSyncStatus::Success.as_str(), "success"); + assert_eq!(GitSyncStatus::Error.to_string(), "error"); + } +} diff --git a/api/crates/domain/src/identity/api_token.rs b/api/crates/domain/src/identity/api_token.rs new file mode 100644 index 00000000..45e9ecb3 --- /dev/null +++ b/api/crates/domain/src/identity/api_token.rs @@ -0,0 +1,7 @@ +use uuid::Uuid; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct ApiTokenSubject { + pub owner_id: Uuid, + pub workspace_id: Uuid, +} diff --git a/api/crates/domain/src/identity/mod.rs b/api/crates/domain/src/identity/mod.rs new file mode 100644 index 00000000..a178a951 --- /dev/null +++ b/api/crates/domain/src/identity/mod.rs @@ -0,0 +1,4 @@ +// Identity (auth/sessions/api_tokens) domain lives here. + +pub mod api_token; +pub mod policy; diff --git a/api/crates/domain/src/identity/policy.rs b/api/crates/domain/src/identity/policy.rs new file mode 100644 index 00000000..d19bd52e --- /dev/null +++ b/api/crates/domain/src/identity/policy.rs @@ -0,0 +1,54 @@ +use crate::access::permissions::{PERM_API_TOKEN_MANAGE, PERM_SHORTCUT_UPDATE, PermissionSet}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum IdentityPolicyError { + Forbidden, +} + +pub fn ensure_api_token_manage_allowed( + permissions: &PermissionSet, +) -> Result<(), IdentityPolicyError> { + if permissions.allows(PERM_API_TOKEN_MANAGE) { + Ok(()) + } else { + Err(IdentityPolicyError::Forbidden) + } +} + +pub fn ensure_shortcut_update_allowed( + permissions: &PermissionSet, +) -> Result<(), IdentityPolicyError> { + if permissions.allows(PERM_SHORTCUT_UPDATE) { + Ok(()) + } else { + Err(IdentityPolicyError::Forbidden) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::access::permissions::{PERM_API_TOKEN_MANAGE, PERM_SHORTCUT_UPDATE}; + + #[test] + fn api_token_manage_requires_permission() { + let perms = PermissionSet::default(); + assert_eq!( + ensure_api_token_manage_allowed(&perms), + Err(IdentityPolicyError::Forbidden) + ); + let perms = PermissionSet::from_slice(&[PERM_API_TOKEN_MANAGE]); + assert_eq!(ensure_api_token_manage_allowed(&perms), Ok(())); + } + + #[test] + fn shortcut_update_requires_permission() { + let perms = PermissionSet::default(); + assert_eq!( + ensure_shortcut_update_allowed(&perms), + Err(IdentityPolicyError::Forbidden) + ); + let perms = PermissionSet::from_slice(&[PERM_SHORTCUT_UPDATE]); + assert_eq!(ensure_shortcut_update_allowed(&perms), Ok(())); + } +} diff --git a/api/crates/domain/src/lib.rs b/api/crates/domain/src/lib.rs new file mode 100644 index 00000000..ea6d3c94 --- /dev/null +++ b/api/crates/domain/src/lib.rs @@ -0,0 +1,8 @@ +pub mod access; +pub mod core; +pub mod documents; +pub mod git; +pub mod identity; +pub mod plugins; +pub mod storage; +pub mod workspaces; diff --git a/api/crates/domain/src/plugins/events.rs b/api/crates/domain/src/plugins/events.rs new file mode 100644 index 00000000..b4dadf65 --- /dev/null +++ b/api/crates/domain/src/plugins/events.rs @@ -0,0 +1,92 @@ +use std::fmt; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; + +pub const PLUGIN_EVENT_INSTALLED: &str = "installed"; +pub const PLUGIN_EVENT_UNINSTALLED: &str = "uninstalled"; +pub const PLUGIN_EVENT_UPDATED: &str = "updated"; +pub const PLUGIN_EVENT_PUBLISH: &str = "publish"; +pub const PLUGIN_EVENT_UNPUBLISH: &str = "unpublish"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum PluginEventKind { + Installed, + Uninstalled, + Updated, + Publish, + Unpublish, +} + +impl PluginEventKind { + pub fn parse(value: &str) -> Option { + match value.trim() { + PLUGIN_EVENT_INSTALLED => Some(Self::Installed), + PLUGIN_EVENT_UNINSTALLED => Some(Self::Uninstalled), + PLUGIN_EVENT_UPDATED => Some(Self::Updated), + PLUGIN_EVENT_PUBLISH => Some(Self::Publish), + PLUGIN_EVENT_UNPUBLISH => Some(Self::Unpublish), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::Installed => PLUGIN_EVENT_INSTALLED, + Self::Uninstalled => PLUGIN_EVENT_UNINSTALLED, + Self::Updated => PLUGIN_EVENT_UPDATED, + Self::Publish => PLUGIN_EVENT_PUBLISH, + Self::Unpublish => PLUGIN_EVENT_UNPUBLISH, + } + } + + pub const fn affects_manifests(self) -> bool { + true + } +} + +impl fmt::Display for PluginEventKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidPluginEventKind; + +impl fmt::Display for InvalidPluginEventKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid plugin event kind") + } +} + +impl std::error::Error for InvalidPluginEventKind {} + +impl FromStr for PluginEventKind { + type Err = InvalidPluginEventKind; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidPluginEventKind) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_and_formats() { + assert_eq!( + PluginEventKind::parse(" installed "), + Some(PluginEventKind::Installed) + ); + assert_eq!( + PluginEventKind::parse("uninstalled"), + Some(PluginEventKind::Uninstalled) + ); + assert_eq!(PluginEventKind::parse("nope"), None); + assert_eq!(PluginEventKind::Publish.as_str(), "publish"); + assert_eq!(PluginEventKind::Unpublish.to_string(), "unpublish"); + } +} diff --git a/api/crates/domain/src/plugins/mod.rs b/api/crates/domain/src/plugins/mod.rs new file mode 100644 index 00000000..34e4b855 --- /dev/null +++ b/api/crates/domain/src/plugins/mod.rs @@ -0,0 +1,5 @@ +// Plugin execution/management/records domain lives here. + +pub mod events; +pub mod policy; +pub mod scope; diff --git a/api/crates/domain/src/plugins/policy.rs b/api/crates/domain/src/plugins/policy.rs new file mode 100644 index 00000000..9f48c59a --- /dev/null +++ b/api/crates/domain/src/plugins/policy.rs @@ -0,0 +1,149 @@ +use std::collections::HashSet; + +use uuid::Uuid; + +use crate::access::permissions::{PERM_DOC_CREATE, PERM_DOC_EDIT, PermissionSet}; + +pub const PLUGIN_PERMISSION_DOC_WRITE: &str = "doc.write"; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PluginPolicyError { + PermissionDenied { permission: String }, +} + +pub fn ensure_plugin_permission( + permissions: &HashSet, + required: &str, +) -> Result<(), PluginPolicyError> { + if permissions.iter().any(|p| p == required) { + Ok(()) + } else { + Err(PluginPolicyError::PermissionDenied { + permission: required.to_string(), + }) + } +} + +pub fn ensure_workspace_can_create_documents( + permissions: &PermissionSet, +) -> Result<(), PluginPolicyError> { + if permissions.allows(PERM_DOC_CREATE) { + Ok(()) + } else { + Err(PluginPolicyError::PermissionDenied { + permission: PERM_DOC_CREATE.to_string(), + }) + } +} + +pub fn ensure_workspace_can_edit_documents( + permissions: &PermissionSet, +) -> Result<(), PluginPolicyError> { + if permissions.allows(PERM_DOC_EDIT) { + Ok(()) + } else { + Err(PluginPolicyError::PermissionDenied { + permission: PERM_DOC_EDIT.to_string(), + }) + } +} + +pub fn ensure_doc_id_within_allowed_scope( + doc_id: Uuid, + allowed_doc_id: Uuid, +) -> Result<(), PluginPolicyError> { + if doc_id == allowed_doc_id { + Ok(()) + } else { + Err(PluginPolicyError::PermissionDenied { + permission: PERM_DOC_EDIT.to_string(), + }) + } +} + +pub fn ensure_record_owned_by_plugin( + record_plugin: &str, + plugin: &str, +) -> Result<(), PluginPolicyError> { + if record_plugin == plugin { + Ok(()) + } else { + Err(PluginPolicyError::PermissionDenied { + permission: PERM_DOC_EDIT.to_string(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::access::permissions::{PERM_DOC_CREATE, PERM_DOC_EDIT}; + + #[test] + fn plugin_permission_requires_membership() { + let perms: HashSet = HashSet::new(); + assert_eq!( + ensure_plugin_permission(&perms, PLUGIN_PERMISSION_DOC_WRITE), + Err(PluginPolicyError::PermissionDenied { + permission: PLUGIN_PERMISSION_DOC_WRITE.to_string() + }) + ); + let perms: HashSet = [PLUGIN_PERMISSION_DOC_WRITE.to_string()] + .into_iter() + .collect(); + assert_eq!( + ensure_plugin_permission(&perms, PLUGIN_PERMISSION_DOC_WRITE), + Ok(()) + ); + } + + #[test] + fn workspace_create_requires_permission() { + let perms = PermissionSet::default(); + assert_eq!( + ensure_workspace_can_create_documents(&perms), + Err(PluginPolicyError::PermissionDenied { + permission: PERM_DOC_CREATE.to_string() + }) + ); + let perms = PermissionSet::from_slice(&[PERM_DOC_CREATE]); + assert_eq!(ensure_workspace_can_create_documents(&perms), Ok(())); + } + + #[test] + fn workspace_edit_requires_permission() { + let perms = PermissionSet::default(); + assert_eq!( + ensure_workspace_can_edit_documents(&perms), + Err(PluginPolicyError::PermissionDenied { + permission: PERM_DOC_EDIT.to_string() + }) + ); + let perms = PermissionSet::from_slice(&[PERM_DOC_EDIT]); + assert_eq!(ensure_workspace_can_edit_documents(&perms), Ok(())); + } + + #[test] + fn doc_scope_must_match_allowed_doc_id() { + let allowed = Uuid::new_v4(); + let other = Uuid::new_v4(); + assert_eq!( + ensure_doc_id_within_allowed_scope(other, allowed), + Err(PluginPolicyError::PermissionDenied { + permission: PERM_DOC_EDIT.to_string() + }) + ); + assert_eq!(ensure_doc_id_within_allowed_scope(allowed, allowed), Ok(())); + } + + #[test] + fn record_owner_must_match_plugin() { + assert_eq!(ensure_record_owned_by_plugin("a", "a"), Ok(())); + assert_eq!( + ensure_record_owned_by_plugin("a", "b"), + Err(PluginPolicyError::PermissionDenied { + permission: PERM_DOC_EDIT.to_string() + }) + ); + } +} diff --git a/api/crates/domain/src/plugins/scope.rs b/api/crates/domain/src/plugins/scope.rs new file mode 100644 index 00000000..ff5cf001 --- /dev/null +++ b/api/crates/domain/src/plugins/scope.rs @@ -0,0 +1,197 @@ +use std::fmt; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; + +pub const PLUGIN_SCOPE_GLOBAL: &str = "global"; +pub const PLUGIN_SCOPE_USER: &str = "user"; +pub const PLUGIN_SCOPE_DOC: &str = "doc"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum PluginScope { + Global, + User, + Doc, +} + +impl PluginScope { + pub fn parse(value: &str) -> Option { + match value.trim() { + PLUGIN_SCOPE_GLOBAL => Some(Self::Global), + PLUGIN_SCOPE_USER => Some(Self::User), + PLUGIN_SCOPE_DOC => Some(Self::Doc), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::Global => PLUGIN_SCOPE_GLOBAL, + Self::User => PLUGIN_SCOPE_USER, + Self::Doc => PLUGIN_SCOPE_DOC, + } + } +} + +impl fmt::Display for PluginScope { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidPluginScope; + +impl fmt::Display for InvalidPluginScope { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid plugin scope") + } +} + +impl std::error::Error for InvalidPluginScope {} + +impl FromStr for PluginScope { + type Err = InvalidPluginScope; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidPluginScope) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum PluginRecordScope { + User, + Doc, +} + +impl PluginRecordScope { + pub fn parse(value: &str) -> Option { + match value.trim() { + PLUGIN_SCOPE_USER => Some(Self::User), + PLUGIN_SCOPE_DOC => Some(Self::Doc), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::User => PLUGIN_SCOPE_USER, + Self::Doc => PLUGIN_SCOPE_DOC, + } + } +} + +impl fmt::Display for PluginRecordScope { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidPluginRecordScope; + +impl fmt::Display for InvalidPluginRecordScope { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid plugin record scope") + } +} + +impl std::error::Error for InvalidPluginRecordScope {} + +impl FromStr for PluginRecordScope { + type Err = InvalidPluginRecordScope; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidPluginRecordScope) + } +} + +pub const PLUGIN_INSTALLATION_STATUS_ENABLED: &str = "enabled"; +pub const PLUGIN_INSTALLATION_STATUS_DISABLED: &str = "disabled"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum PluginInstallationStatus { + Enabled, + Disabled, +} + +impl PluginInstallationStatus { + pub fn parse(value: &str) -> Option { + match value.trim() { + PLUGIN_INSTALLATION_STATUS_ENABLED => Some(Self::Enabled), + PLUGIN_INSTALLATION_STATUS_DISABLED => Some(Self::Disabled), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::Enabled => PLUGIN_INSTALLATION_STATUS_ENABLED, + Self::Disabled => PLUGIN_INSTALLATION_STATUS_DISABLED, + } + } +} + +impl fmt::Display for PluginInstallationStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidPluginInstallationStatus; + +impl fmt::Display for InvalidPluginInstallationStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid plugin installation status") + } +} + +impl std::error::Error for InvalidPluginInstallationStatus {} + +impl FromStr for PluginInstallationStatus { + type Err = InvalidPluginInstallationStatus; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidPluginInstallationStatus) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_scopes() { + assert_eq!(PluginScope::parse("global"), Some(PluginScope::Global)); + assert_eq!(PluginScope::parse(" user "), Some(PluginScope::User)); + assert_eq!(PluginScope::parse("doc"), Some(PluginScope::Doc)); + assert_eq!(PluginScope::parse("nope"), None); + + assert_eq!( + PluginRecordScope::parse("user"), + Some(PluginRecordScope::User) + ); + assert_eq!( + PluginRecordScope::parse("doc"), + Some(PluginRecordScope::Doc) + ); + assert_eq!(PluginRecordScope::parse("global"), None); + } + + #[test] + fn parses_installation_status() { + assert_eq!( + PluginInstallationStatus::parse("enabled"), + Some(PluginInstallationStatus::Enabled) + ); + assert_eq!( + PluginInstallationStatus::parse(" disabled "), + Some(PluginInstallationStatus::Disabled) + ); + assert_eq!(PluginInstallationStatus::parse("nope"), None); + } +} diff --git a/api/crates/domain/src/storage/ingest_backend.rs b/api/crates/domain/src/storage/ingest_backend.rs new file mode 100644 index 00000000..d62df071 --- /dev/null +++ b/api/crates/domain/src/storage/ingest_backend.rs @@ -0,0 +1,69 @@ +use std::fmt; + +pub const STORAGE_INGEST_BACKEND_FS_WATCHER: &str = "fs_watcher"; +pub const STORAGE_INGEST_BACKEND_RECONCILE: &str = "reconcile"; +pub const STORAGE_INGEST_BACKEND_CONSISTENCY: &str = "consistency"; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StorageIngestBackend { + FsWatcher, + Reconcile, + Consistency, + Other(String), +} + +impl StorageIngestBackend { + pub fn parse(raw: &str) -> Self { + let trimmed = raw.trim(); + match trimmed { + STORAGE_INGEST_BACKEND_FS_WATCHER => Self::FsWatcher, + STORAGE_INGEST_BACKEND_RECONCILE => Self::Reconcile, + STORAGE_INGEST_BACKEND_CONSISTENCY => Self::Consistency, + other => Self::Other(other.to_string()), + } + } + + pub fn as_str(&self) -> &str { + match self { + Self::FsWatcher => STORAGE_INGEST_BACKEND_FS_WATCHER, + Self::Reconcile => STORAGE_INGEST_BACKEND_RECONCILE, + Self::Consistency => STORAGE_INGEST_BACKEND_CONSISTENCY, + Self::Other(value) => value.as_str(), + } + } + + pub fn is_fs_watcher(&self) -> bool { + matches!(self, Self::FsWatcher) + } +} + +impl fmt::Display for StorageIngestBackend { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_known_and_preserves_unknown() { + assert_eq!( + StorageIngestBackend::parse("fs_watcher"), + StorageIngestBackend::FsWatcher + ); + assert_eq!( + StorageIngestBackend::parse(" reconcile "), + StorageIngestBackend::Reconcile + ); + assert_eq!( + StorageIngestBackend::parse("consistency"), + StorageIngestBackend::Consistency + ); + assert_eq!( + StorageIngestBackend::parse(" custom "), + StorageIngestBackend::Other("custom".to_string()) + ); + } +} diff --git a/api/crates/domain/src/storage/mod.rs b/api/crates/domain/src/storage/mod.rs new file mode 100644 index 00000000..578f0508 --- /dev/null +++ b/api/crates/domain/src/storage/mod.rs @@ -0,0 +1,3 @@ +// Storage ingest/reconcile domain types live here. + +pub mod ingest_backend; diff --git a/api/crates/domain/src/workspaces/mod.rs b/api/crates/domain/src/workspaces/mod.rs new file mode 100644 index 00000000..e6f32be6 --- /dev/null +++ b/api/crates/domain/src/workspaces/mod.rs @@ -0,0 +1,2 @@ +pub mod permissions; +pub mod roles; diff --git a/api/crates/domain/src/workspaces/permissions.rs b/api/crates/domain/src/workspaces/permissions.rs new file mode 100644 index 00000000..5f080864 --- /dev/null +++ b/api/crates/domain/src/workspaces/permissions.rs @@ -0,0 +1 @@ +pub use crate::access::permissions::*; diff --git a/api/crates/domain/src/workspaces/roles.rs b/api/crates/domain/src/workspaces/roles.rs new file mode 100644 index 00000000..98348a2a --- /dev/null +++ b/api/crates/domain/src/workspaces/roles.rs @@ -0,0 +1,210 @@ +use std::fmt; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; + +pub const WORKSPACE_ROLE_KIND_SYSTEM: &str = "system"; +pub const WORKSPACE_ROLE_KIND_CUSTOM: &str = "custom"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum WorkspaceRoleKind { + System, + Custom, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidWorkspaceRoleKind; + +impl fmt::Display for InvalidWorkspaceRoleKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid workspace role kind") + } +} + +impl std::error::Error for InvalidWorkspaceRoleKind {} + +impl WorkspaceRoleKind { + pub fn parse(value: &str) -> Option { + match value.trim() { + WORKSPACE_ROLE_KIND_SYSTEM => Some(Self::System), + WORKSPACE_ROLE_KIND_CUSTOM => Some(Self::Custom), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::System => WORKSPACE_ROLE_KIND_SYSTEM, + Self::Custom => WORKSPACE_ROLE_KIND_CUSTOM, + } + } +} + +impl fmt::Display for WorkspaceRoleKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +impl FromStr for WorkspaceRoleKind { + type Err = InvalidWorkspaceRoleKind; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidWorkspaceRoleKind) + } +} + +pub const WORKSPACE_SYSTEM_ROLE_OWNER: &str = "owner"; +pub const WORKSPACE_SYSTEM_ROLE_ADMIN: &str = "admin"; +pub const WORKSPACE_SYSTEM_ROLE_EDITOR: &str = "editor"; +pub const WORKSPACE_SYSTEM_ROLE_VIEWER: &str = "viewer"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum WorkspaceSystemRole { + Owner, + Admin, + Editor, + Viewer, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidWorkspaceSystemRole; + +impl fmt::Display for InvalidWorkspaceSystemRole { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid workspace system role") + } +} + +impl std::error::Error for InvalidWorkspaceSystemRole {} + +impl WorkspaceSystemRole { + pub fn parse(value: &str) -> Option { + match value.trim() { + WORKSPACE_SYSTEM_ROLE_OWNER => Some(Self::Owner), + WORKSPACE_SYSTEM_ROLE_ADMIN => Some(Self::Admin), + WORKSPACE_SYSTEM_ROLE_EDITOR => Some(Self::Editor), + WORKSPACE_SYSTEM_ROLE_VIEWER => Some(Self::Viewer), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::Owner => WORKSPACE_SYSTEM_ROLE_OWNER, + Self::Admin => WORKSPACE_SYSTEM_ROLE_ADMIN, + Self::Editor => WORKSPACE_SYSTEM_ROLE_EDITOR, + Self::Viewer => WORKSPACE_SYSTEM_ROLE_VIEWER, + } + } +} + +impl fmt::Display for WorkspaceSystemRole { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +impl FromStr for WorkspaceSystemRole { + type Err = InvalidWorkspaceSystemRole; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidWorkspaceSystemRole) + } +} + +pub const WORKSPACE_BASE_ROLE_ADMIN: &str = "admin"; +pub const WORKSPACE_BASE_ROLE_EDITOR: &str = "editor"; +pub const WORKSPACE_BASE_ROLE_VIEWER: &str = "viewer"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum WorkspaceBaseRole { + Admin, + Editor, + Viewer, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InvalidWorkspaceBaseRole; + +impl fmt::Display for InvalidWorkspaceBaseRole { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("invalid workspace base role") + } +} + +impl std::error::Error for InvalidWorkspaceBaseRole {} + +impl WorkspaceBaseRole { + pub fn parse(value: &str) -> Option { + match value.trim() { + WORKSPACE_BASE_ROLE_ADMIN => Some(Self::Admin), + WORKSPACE_BASE_ROLE_EDITOR => Some(Self::Editor), + WORKSPACE_BASE_ROLE_VIEWER => Some(Self::Viewer), + _ => None, + } + } + + pub const fn as_str(self) -> &'static str { + match self { + Self::Admin => WORKSPACE_BASE_ROLE_ADMIN, + Self::Editor => WORKSPACE_BASE_ROLE_EDITOR, + Self::Viewer => WORKSPACE_BASE_ROLE_VIEWER, + } + } +} + +impl fmt::Display for WorkspaceBaseRole { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +impl FromStr for WorkspaceBaseRole { + type Err = InvalidWorkspaceBaseRole; + + fn from_str(s: &str) -> Result { + Self::parse(s).ok_or(InvalidWorkspaceBaseRole) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_roles() { + assert_eq!( + WorkspaceRoleKind::parse("system"), + Some(WorkspaceRoleKind::System) + ); + assert_eq!( + WorkspaceRoleKind::parse(" custom "), + Some(WorkspaceRoleKind::Custom) + ); + assert_eq!(WorkspaceRoleKind::parse("nope"), None); + + assert_eq!( + WorkspaceSystemRole::parse("owner"), + Some(WorkspaceSystemRole::Owner) + ); + assert_eq!( + WorkspaceSystemRole::parse(" viewer "), + Some(WorkspaceSystemRole::Viewer) + ); + assert_eq!(WorkspaceSystemRole::parse("nope"), None); + + assert_eq!( + WorkspaceBaseRole::parse("admin"), + Some(WorkspaceBaseRole::Admin) + ); + assert_eq!( + WorkspaceBaseRole::parse(" editor "), + Some(WorkspaceBaseRole::Editor) + ); + assert_eq!(WorkspaceBaseRole::parse("owner"), None); + } +} diff --git a/api/crates/infrastructure/Cargo.toml b/api/crates/infrastructure/Cargo.toml new file mode 100644 index 00000000..25f964fe --- /dev/null +++ b/api/crates/infrastructure/Cargo.toml @@ -0,0 +1,63 @@ +[package] +name = "infrastructure" +version = "0.1.0" +edition = "2024" + +[dependencies] +application = { path = "../application" } +domain = { path = "../domain" } + +anyhow = "1" +async-trait = "0.1" +chrono = { version = "0.4", features = ["serde", "clock"] } +dotenvy = "0.15" +futures-core = "0.3" +futures-util = { version = "0.3", features = ["sink"] } +git2 = { version = "0.18", default-features = true, features = ["vendored-libgit2"] } +htmlescape = "0.3" +hex = "0.4" +hmac = "0.12" +http = "1" +jsonwebtoken = { version = "9", default-features = false, features = ["use_pem"] } +mime_guess = "2" +notify = "6" +once_cell = "1" +rand = "0.8" +redis = { version = "0.27", features = ["tokio-comp", "aio", "streams", "script", "connection-manager"] } +regex = "1" +reqwest = { version = "0.11", features = ["json", "stream", "rustls-tls"] } +semver = "1" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +sha2 = "0.10" +ammonia = { version = "3" } +comrak = { version = "0.22" } +syntect = { version = "5", default-features = true } +syntect-assets = "0.23" +sqlx = { version = "0.8", features = ["runtime-tokio", "postgres", "uuid", "chrono", "macros"] } +tempfile = "3" +thiserror = "1" +tokio = { version = "1.46", features = ["rt-multi-thread", "macros", "process"] } +tokio-stream = { version = "0.1", features = ["sync"] } +tracing = "0.1" +urlencoding = "2" +uuid = { version = "1", features = ["v4", "serde"] } +walkdir = "2.5" +zip = { version = "0.6" } +yrs = { version = "0.24", features = ["sync"] } +yrs-warp = "0.9" + +argon2 = "0.5" +password-hash = "0.5" + +aes-gcm = "0.10" +aead = "0.5" + +aws-config = { version = "1", features = ["behavior-version-latest"] } +aws-sdk-s3 = "1" + +extism = { version = "1" } + +pandoc = "0.8" + +base64 = "0.21" diff --git a/api/src/infrastructure/crypto/mod.rs b/api/crates/infrastructure/src/core/crypto/mod.rs similarity index 98% rename from api/src/infrastructure/crypto/mod.rs rename to api/crates/infrastructure/src/core/crypto/mod.rs index e0cf70fe..989b68a4 100644 --- a/api/src/infrastructure/crypto/mod.rs +++ b/api/crates/infrastructure/src/core/crypto/mod.rs @@ -10,7 +10,7 @@ fn derive_key(secret: &str) -> Key { let out = hasher.finalize(); let mut k = [0u8; 32]; k.copy_from_slice(&out); - Key::::from_slice(&k).clone() + *Key::::from_slice(&k) } pub fn encrypt_string(secret: &str, plaintext: &str) -> anyhow::Result { diff --git a/api/src/infrastructure/db/advisory_lock.rs b/api/crates/infrastructure/src/core/db/advisory_lock.rs similarity index 100% rename from api/src/infrastructure/db/advisory_lock.rs rename to api/crates/infrastructure/src/core/db/advisory_lock.rs diff --git a/api/src/infrastructure/db/mod.rs b/api/crates/infrastructure/src/core/db/mod.rs similarity index 73% rename from api/src/infrastructure/db/mod.rs rename to api/crates/infrastructure/src/core/db/mod.rs index f6c4a9d5..1ec7f9ab 100644 --- a/api/src/infrastructure/db/mod.rs +++ b/api/crates/infrastructure/src/core/db/mod.rs @@ -11,10 +11,9 @@ pub async fn connect_pool(database_url: &str) -> anyhow::Result { } pub async fn migrate(pool: &PgPool) -> anyhow::Result<()> { - // Uses compile-time embedded migrations under ./migrations - sqlx::migrate!("./migrations").run(pool).await?; + // Uses compile-time embedded migrations from the api package. + sqlx::migrate!("../../migrations").run(pool).await?; Ok(()) } pub mod advisory_lock; -pub mod repositories; diff --git a/api/src/infrastructure/health/db_probe.rs b/api/crates/infrastructure/src/core/health/db_probe.rs similarity index 74% rename from api/src/infrastructure/health/db_probe.rs rename to api/crates/infrastructure/src/core/health/db_probe.rs index 18a0698d..51b11a6e 100644 --- a/api/src/infrastructure/health/db_probe.rs +++ b/api/crates/infrastructure/src/core/health/db_probe.rs @@ -3,8 +3,9 @@ use std::sync::Arc; use async_trait::async_trait; use sqlx::Row; -use crate::application::ports::health_probe::{HealthProbe, HealthStatus}; -use crate::infrastructure::db::PgPool; +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::core::ports::health_probe::{HealthProbe, HealthStatus}; pub struct DatabaseHealthProbe { pool: PgPool, @@ -18,7 +19,7 @@ impl DatabaseHealthProbe { #[async_trait] impl HealthProbe for DatabaseHealthProbe { - async fn probe(&self) -> anyhow::Result { + async fn probe(&self) -> PortResult { let ok = sqlx::query("SELECT 1") .map(|row: sqlx::postgres::PgRow| row.get::(0)) .fetch_one(&self.pool) diff --git a/api/src/infrastructure/health/mod.rs b/api/crates/infrastructure/src/core/health/mod.rs similarity index 100% rename from api/src/infrastructure/health/mod.rs rename to api/crates/infrastructure/src/core/health/mod.rs diff --git a/api/src/application/services/markdown/mod.rs b/api/crates/infrastructure/src/core/markdown/mod.rs similarity index 94% rename from api/src/application/services/markdown/mod.rs rename to api/crates/infrastructure/src/core/markdown/mod.rs index 87f2a3e7..41543fd9 100644 --- a/api/src/application/services/markdown/mod.rs +++ b/api/crates/infrastructure/src/core/markdown/mod.rs @@ -1,42 +1,35 @@ use once_cell::sync::Lazy; -use serde::{Deserialize, Serialize}; use std::collections::HashSet; use std::sync::Mutex; -use crate::application::utils::hash::sha256_hex; +use application::core::dtos::markdown::{PlaceholderItem, RenderOptions, RenderResponse}; +use application::core::ports::errors::PortResult; +use application::core::ports::markdown_renderer::MarkdownRenderer; +use application::core::services::utils::hash::sha256_hex; -#[derive(Debug, Deserialize, Serialize, Default, Clone)] -#[serde(default)] -pub struct RenderOptions { - pub flavor: Option, - pub theme: Option, - pub features: Option>, - pub sanitize: Option, - /// If true, convert soft line breaks (single newlines) into
tags - pub hardbreaks: Option, - /// If provided, rewrite attachment-relative links/images to absolute under /uploads/{doc_id} - pub doc_id: Option, - /// If provided, prefix absolute URLs with this origin (e.g., https://api.example.com) - pub base_origin: Option, - /// If true, rewrite attachment URLs (./attachments/, attachments/, /uploads/) - pub absolute_attachments: Option, - /// Optional share token to append as query (?token=...) - pub token: Option, +pub struct ComrakMarkdownRenderer; + +impl ComrakMarkdownRenderer { + pub fn new() -> Self { + Self + } } -#[derive(Debug, Serialize, Clone)] -pub struct PlaceholderItem { - pub kind: String, - pub id: String, - pub code: String, +impl Default for ComrakMarkdownRenderer { + fn default() -> Self { + Self::new() + } } -#[derive(Debug, Serialize, Clone)] -pub struct RenderResponse { - pub html: String, - #[serde(skip_serializing_if = "Vec::is_empty")] - pub placeholders: Vec, - pub hash: String, +impl MarkdownRenderer for ComrakMarkdownRenderer { + fn render( + &self, + text: String, + opts: RenderOptions, + placeholder_kinds: Option<&HashSet>, + ) -> PortResult { + render(text, opts, placeholder_kinds).map_err(Into::into) + } } fn wants_feature(opts: &RenderOptions, name: &str) -> bool { @@ -202,11 +195,7 @@ pub fn render( } else { // Hashtags and directives starting with '#' let start = j; // position of '#' - let prev_char = if start == 0 { - None - } else { - s[..start].chars().rev().next() - }; + let prev_char = s[..start].chars().next_back(); if let Some(prev) = prev_char { if prev.is_alphanumeric() || matches!( @@ -469,7 +458,7 @@ pub fn render( if !txt.is_empty() { txt } else { - url.split('/').last().unwrap_or(&url).to_string() + url.rsplit('/').next().unwrap_or(&url).to_string() } }; let new_url = rewrite_attachment_url(&url, opts).unwrap_or(url.clone()); diff --git a/api/crates/infrastructure/src/core/mod.rs b/api/crates/infrastructure/src/core/mod.rs new file mode 100644 index 00000000..d329d833 --- /dev/null +++ b/api/crates/infrastructure/src/core/mod.rs @@ -0,0 +1,5 @@ +pub mod crypto; +pub mod db; +pub mod health; +pub mod markdown; +pub mod storage; diff --git a/api/src/infrastructure/storage/dirty.rs b/api/crates/infrastructure/src/core/storage/dirty.rs similarity index 100% rename from api/src/infrastructure/storage/dirty.rs rename to api/crates/infrastructure/src/core/storage/dirty.rs diff --git a/api/src/infrastructure/storage/fs_ingest_watcher.rs b/api/crates/infrastructure/src/core/storage/fs_ingest_watcher.rs similarity index 94% rename from api/src/infrastructure/storage/fs_ingest_watcher.rs rename to api/crates/infrastructure/src/core/storage/fs_ingest_watcher.rs index d560a38e..c2e5583c 100644 --- a/api/src/infrastructure/storage/fs_ingest_watcher.rs +++ b/api/crates/infrastructure/src/core/storage/fs_ingest_watcher.rs @@ -9,27 +9,30 @@ use tokio::sync::mpsc::{self, UnboundedSender}; use tracing::{debug, error, warn}; use uuid::Uuid; -use crate::application::ports::storage_ingest_queue::{StorageIngestKind, StorageIngestQueue}; -use crate::application::services::storage_ingest::normalize_repo_path; -use crate::application::utils::hash::sha256_hex; -use crate::domain::workspaces::permissions::PermissionSet; +use application::core::ports::storage::storage_ingest_queue::{ + StorageIngestKind, StorageIngestQueue, +}; +use application::core::services::storage::ingest::normalize_repo_path; +use application::core::services::utils::hash::sha256_hex; +use domain::access::permissions::PermissionSet; +use domain::storage::ingest_backend::StorageIngestBackend; pub struct FsIngestWatcher { uploads_root: PathBuf, queue: Arc, - backend_name: String, + backend: StorageIngestBackend, } impl FsIngestWatcher { pub fn new( uploads_root: PathBuf, queue: Arc, - backend_name: &str, + backend: StorageIngestBackend, ) -> Self { Self { uploads_root, queue, - backend_name: backend_name.to_string(), + backend, } } @@ -115,7 +118,7 @@ impl FsIngestWatcher { user_id, None, &clean_repo, - &self.backend_name, + self.backend.clone(), kind, content_hash.as_deref(), payload, @@ -158,7 +161,7 @@ impl FsIngestWatcher { to_user, None, &clean_to, - &self.backend_name, + self.backend.clone(), StorageIngestKind::Upsert, content_hash.as_deref(), payload, diff --git a/api/crates/infrastructure/src/core/storage/gitignore_port_impl.rs b/api/crates/infrastructure/src/core/storage/gitignore_port_impl.rs new file mode 100644 index 00000000..9d143be0 --- /dev/null +++ b/api/crates/infrastructure/src/core/storage/gitignore_port_impl.rs @@ -0,0 +1,117 @@ +use application::core::ports::errors::PortResult; +use application::git::ports::gitignore_port::GitignorePort; + +pub struct FsGitignorePort; + +#[async_trait::async_trait] +impl GitignorePort for FsGitignorePort { + async fn ensure_gitignore(&self, dir: &str) -> PortResult { + let out: anyhow::Result = async { + use tokio::io::AsyncWriteExt; + let path = std::path::Path::new(dir).join(".gitignore"); + if let Some(parent) = path.parent() { + tokio::fs::create_dir_all(parent).await?; + } + let defaults = vec![ + "# RefMD auto-generated .gitignore", + "*.md.tmp", + ".DS_Store", + "Thumbs.db", + ".env", + ".env.local", + ]; + let mut created_or_updated = false; + if tokio::fs::try_exists(&path).await.unwrap_or(false) { + let existing = tokio::fs::read_to_string(&path).await.unwrap_or_default(); + let mut lines: std::collections::BTreeSet = + existing.lines().map(|s| s.to_string()).collect(); + let mut changed = false; + for d in &defaults { + if !lines.contains(*d) { + lines.insert((*d).to_string()); + changed = true; + } + } + if changed { + let mut buf = String::new(); + for l in lines { + buf.push_str(&l); + buf.push('\n'); + } + let mut f = tokio::fs::File::create(&path).await?; + f.write_all(buf.as_bytes()).await?; + created_or_updated = true; + } + } else { + let mut f = tokio::fs::File::create(&path).await?; + for d in &defaults { + f.write_all(d.as_bytes()).await?; + f.write_all(b"\n").await?; + } + created_or_updated = true; + } + Ok(created_or_updated) + } + .await; + out.map_err(Into::into) + } + + async fn upsert_gitignore_patterns(&self, dir: &str, patterns: &[String]) -> PortResult { + let out: anyhow::Result = async { + use tokio::io::AsyncWriteExt; + let path = std::path::Path::new(dir).join(".gitignore"); + if let Some(parent) = path.parent() { + tokio::fs::create_dir_all(parent).await?; + } + let mut set: std::collections::BTreeSet = + if tokio::fs::try_exists(&path).await.unwrap_or(false) { + tokio::fs::read_to_string(&path) + .await + .unwrap_or_default() + .lines() + .map(|s| s.to_string()) + .collect() + } else { + Default::default() + }; + let before = set.len(); + for p in patterns { + if !p.trim().is_empty() { + set.insert(p.trim().to_string()); + } + } + if set.len() != before { + let mut buf = String::new(); + for l in &set { + buf.push_str(l); + buf.push('\n'); + } + let mut f = tokio::fs::File::create(&path).await?; + f.write_all(buf.as_bytes()).await?; + return Ok(set.len() - before); + } + Ok(0) + } + .await; + out.map_err(Into::into) + } + + async fn read_gitignore_patterns(&self, dir: &str) -> PortResult> { + let out: anyhow::Result> = async { + let path = std::path::Path::new(dir).join(".gitignore"); + let content = if tokio::fs::try_exists(&path).await.unwrap_or(false) { + tokio::fs::read_to_string(&path).await.unwrap_or_default() + } else { + String::new() + }; + let patterns: Vec = content + .lines() + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty() && !s.starts_with('#')) + .collect(); + Ok(patterns) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/src/infrastructure/storage/ingest_queue.rs b/api/crates/infrastructure/src/core/storage/ingest_queue.rs similarity index 60% rename from api/src/infrastructure/storage/ingest_queue.rs rename to api/crates/infrastructure/src/core/storage/ingest_queue.rs index f8d943bc..bf3dbf64 100644 --- a/api/src/infrastructure/storage/ingest_queue.rs +++ b/api/crates/infrastructure/src/core/storage/ingest_queue.rs @@ -4,10 +4,12 @@ use serde_json::{Value, json}; use sqlx::Row; use uuid::Uuid; -use crate::application::ports::storage_ingest_queue::{ +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::core::ports::storage::storage_ingest_queue::{ StorageIngestEvent, StorageIngestKind, StorageIngestQueue, StorageIngestQueueStats, }; -use crate::infrastructure::db::PgPool; +use domain::storage::ingest_backend::StorageIngestBackend; pub struct PgStorageIngestQueue { pool: PgPool, @@ -52,15 +54,16 @@ impl StorageIngestQueue for PgStorageIngestQueue { user_id: Uuid, actor_id: Option, repo_path: &str, - backend: &str, + backend: StorageIngestBackend, kind: StorageIngestKind, content_hash: Option<&str>, payload: Option, permission_snapshot: &[String], - ) -> anyhow::Result<()> { - let kind_str = Self::kind_to_str(kind); - sqlx::query( - r#" + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let kind_str = Self::kind_to_str(kind); + sqlx::query( + r#" INSERT INTO storage_ingest_queue (workspace_id, user_id, actor_id, repo_path, backend, event_kind, content_hash, payload, permission_snapshot, attempts, locked_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, 0, NULL) ON CONFLICT ON CONSTRAINT storage_ingest_queue_workspace_repo_backend_unique @@ -101,24 +104,28 @@ impl StorageIngestQueue for PgStorageIngestQueue { ELSE storage_ingest_queue.created_at END "#, - ) - .bind(workspace_id) - .bind(user_id) - .bind(actor_id) - .bind(repo_path) - .bind(backend) - .bind(kind_str) - .bind(content_hash) - .bind(payload) - .bind(json!(permission_snapshot)) - .execute(&self.pool) - .await?; - Ok(()) + ) + .bind(workspace_id) + .bind(user_id) + .bind(actor_id) + .bind(repo_path) + .bind(backend.as_str()) + .bind(kind_str) + .bind(content_hash) + .bind(payload) + .bind(json!(permission_snapshot)) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) } - async fn fetch_next_event(&self) -> anyhow::Result> { - let row = sqlx::query( - r#" + async fn fetch_next_event(&self) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#" WITH next_event AS ( SELECT id FROM storage_ingest_queue WHERE locked_at IS NULL @@ -133,62 +140,69 @@ impl StorageIngestQueue for PgStorageIngestQueue { WHERE q.id IN (SELECT id FROM next_event) RETURNING q.* "#, - ) - .fetch_optional(&self.pool) - .await?; - - let Some(row) = row else { - return Ok(None); - }; - - let kind: String = row.get("event_kind"); - let kind = Self::str_to_kind(&kind)?; - - let locked_at: DateTime = row.get("locked_at"); - - let snapshot_value: Option = row.try_get("permission_snapshot").ok(); - Ok(Some(StorageIngestEvent { - id: row.get("id"), - workspace_id: row.get("workspace_id"), - user_id: row.get("user_id"), - actor_id: row.try_get("actor_id").ok(), - repo_path: row.get("repo_path"), - backend: row.get("backend"), - kind, - content_hash: row.try_get("content_hash").ok(), - payload: row.try_get::, _>("payload").unwrap_or(None), - attempts: row.try_get("attempts").unwrap_or_default(), - locked_at, - permission_snapshot: parse_permission_snapshot(snapshot_value), - })) + ) + .fetch_optional(&self.pool) + .await?; + + let Some(row) = row else { + return Ok(None); + }; + + let kind: String = row.get("event_kind"); + let kind = Self::str_to_kind(&kind)?; + + let locked_at: DateTime = row.get("locked_at"); + + let snapshot_value: Option = row.try_get("permission_snapshot").ok(); + Ok(Some(StorageIngestEvent { + id: row.get("id"), + workspace_id: row.get("workspace_id"), + user_id: row.get("user_id"), + actor_id: row.try_get("actor_id").ok(), + repo_path: row.get("repo_path"), + backend: StorageIngestBackend::parse(&row.get::("backend")), + kind, + content_hash: row.try_get("content_hash").ok(), + payload: row.try_get::, _>("payload").unwrap_or(None), + attempts: row.try_get("attempts").unwrap_or_default(), + locked_at, + permission_snapshot: parse_permission_snapshot(snapshot_value), + })) + } + .await; + out.map_err(Into::into) } - async fn complete_event(&self, event_id: i64, locked_at: DateTime) -> anyhow::Result<()> { - let mut tx = self.pool.begin().await?; - let updated = sqlx::query( - r#" + async fn complete_event(&self, event_id: i64, locked_at: DateTime) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let mut tx = self.pool.begin().await?; + let updated = sqlx::query( + r#" UPDATE storage_ingest_queue SET locked_at = NULL, attempts = 0, pending_retry = false WHERE id = $1 AND locked_at = $2 AND pending_retry = true "#, - ) - .bind(event_id) - .bind(locked_at) - .execute(&mut *tx) - .await?; - if updated.rows_affected() == 0 { - sqlx::query( - "DELETE FROM storage_ingest_queue WHERE id = $1 AND locked_at = $2 AND pending_retry = false", ) .bind(event_id) .bind(locked_at) .execute(&mut *tx) .await?; + if updated.rows_affected() == 0 { + sqlx::query( + "DELETE FROM storage_ingest_queue WHERE id = $1 AND locked_at = $2 AND pending_retry = false", + ) + .bind(event_id) + .bind(locked_at) + .execute(&mut *tx) + .await?; + } + tx.commit().await?; + Ok(()) } - tx.commit().await?; - Ok(()) + .await; + out.map_err(Into::into) } async fn fail_event( @@ -196,9 +210,10 @@ impl StorageIngestQueue for PgStorageIngestQueue { event_id: i64, locked_at: DateTime, error: &str, - ) -> anyhow::Result<()> { - sqlx::query( - r#" + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#" UPDATE storage_ingest_queue SET locked_at = NULL, attempts = attempts, @@ -211,18 +226,22 @@ impl StorageIngestQueue for PgStorageIngestQueue { pending_retry = false WHERE id = $1 AND locked_at = $3 "#, - ) - .bind(event_id) - .bind(error) - .bind(locked_at) - .execute(&self.pool) - .await?; - Ok(()) + ) + .bind(event_id) + .bind(error) + .bind(locked_at) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) } - async fn stats(&self) -> anyhow::Result { - let row = sqlx::query( - r#" + async fn stats(&self) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query( + r#" SELECT COUNT(*) FILTER (WHERE locked_at IS NULL) AS pending, COUNT(*) FILTER (WHERE locked_at IS NOT NULL) AS locked, @@ -230,15 +249,18 @@ impl StorageIngestQueue for PgStorageIngestQueue { MIN(created_at) FILTER (WHERE locked_at IS NULL) AS oldest_created_at FROM storage_ingest_queue "#, - ) - .fetch_one(&self.pool) - .await?; - - Ok(StorageIngestQueueStats { - pending: row.try_get("pending").unwrap_or(0), - locked: row.try_get("locked").unwrap_or(0), - distinct_users: row.try_get("distinct_users").unwrap_or(0), - oldest_created_at: row.try_get("oldest_created_at").ok(), - }) + ) + .fetch_one(&self.pool) + .await?; + + Ok(StorageIngestQueueStats { + pending: row.try_get("pending").unwrap_or(0), + locked: row.try_get("locked").unwrap_or(0), + distinct_users: row.try_get("distinct_users").unwrap_or(0), + oldest_created_at: row.try_get("oldest_created_at").ok(), + }) + } + .await; + out.map_err(Into::into) } } diff --git a/api/src/infrastructure/storage/ingest_worker.rs b/api/crates/infrastructure/src/core/storage/ingest_worker.rs similarity index 87% rename from api/src/infrastructure/storage/ingest_worker.rs rename to api/crates/infrastructure/src/core/storage/ingest_worker.rs index 59a75b62..f84d29e7 100644 --- a/api/src/infrastructure/storage/ingest_worker.rs +++ b/api/crates/infrastructure/src/core/storage/ingest_worker.rs @@ -4,9 +4,11 @@ use std::time::Duration; use async_trait::async_trait; use tracing::{error, info, warn}; -use crate::application::ports::storage_ingest_queue::{StorageIngestEvent, StorageIngestQueue}; -use crate::application::services::metrics::MetricsRegistry; -use crate::application::services::storage_ingest::StorageIngestHandler; +use application::core::ports::storage::storage_ingest_queue::{ + StorageIngestEvent, StorageIngestQueue, +}; +use application::core::services::metrics::MetricsRegistry; +use application::core::services::storage::ingest::StorageIngestHandler; pub struct LoggingStorageIngestHandler; @@ -17,7 +19,7 @@ impl StorageIngestHandler for LoggingStorageIngestHandler { workspace_id = %event.workspace_id, actor_id = ?event.actor_id, repo_path = event.repo_path, - backend = event.backend, + backend = event.backend.as_str(), kind = ?event.kind, "storage_ingest_event_received" ); @@ -94,8 +96,10 @@ impl StorageIngestWorker { #[cfg(test)] mod tests { use super::*; - use crate::application::ports::storage_ingest_queue::StorageIngestKind; + use application::core::ports::errors::PortResult; + use application::core::ports::storage::storage_ingest_queue::StorageIngestKind; use chrono::Utc; + use domain::storage::ingest_backend::StorageIngestBackend; use std::sync::Mutex; use uuid::Uuid; @@ -123,16 +127,16 @@ mod tests { _user_id: Uuid, _actor_id: Option, _repo_path: &str, - _backend: &str, + _backend: StorageIngestBackend, _kind: StorageIngestKind, _content_hash: Option<&str>, _payload: Option, _permission_snapshot: &[String], - ) -> anyhow::Result<()> { + ) -> PortResult<()> { unimplemented!() } - async fn fetch_next_event(&self) -> anyhow::Result> { + async fn fetch_next_event(&self) -> PortResult> { Ok(None) } @@ -140,7 +144,7 @@ mod tests { &self, event_id: i64, _locked_at: chrono::DateTime, - ) -> anyhow::Result<()> { + ) -> PortResult<()> { self.completed.lock().unwrap().push(event_id); Ok(()) } @@ -150,15 +154,16 @@ mod tests { event_id: i64, _locked_at: chrono::DateTime, _error: &str, - ) -> anyhow::Result<()> { + ) -> PortResult<()> { self.failed.lock().unwrap().push(event_id); Ok(()) } async fn stats( &self, - ) -> anyhow::Result - { + ) -> PortResult< + application::core::ports::storage::storage_ingest_queue::StorageIngestQueueStats, + > { unimplemented!() } } @@ -185,7 +190,7 @@ mod tests { user_id: Uuid::new_v4(), actor_id: None, repo_path: "docs/foo.md".into(), - backend: "fs".into(), + backend: StorageIngestBackend::parse("fs"), kind: StorageIngestKind::Upsert, content_hash: None, payload: None, diff --git a/api/crates/infrastructure/src/core/storage/job_queue.rs b/api/crates/infrastructure/src/core/storage/job_queue.rs new file mode 100644 index 00000000..63922cc4 --- /dev/null +++ b/api/crates/infrastructure/src/core/storage/job_queue.rs @@ -0,0 +1,252 @@ +use async_trait::async_trait; +use chrono::{DateTime, Utc}; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::core::ports::storage::storage_projection_queue::{ + StorageProjectionJob, StorageProjectionJobKind, StorageProjectionQueue, +}; + +pub struct PgStorageProjectionQueue { + pool: PgPool, +} + +impl PgStorageProjectionQueue { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } + + fn kind_to_str(kind: StorageProjectionJobKind) -> &'static str { + match kind { + StorageProjectionJobKind::DocSync => "doc_sync", + StorageProjectionJobKind::FolderSync => "folder_sync", + StorageProjectionJobKind::DeleteDoc => "delete_doc", + StorageProjectionJobKind::DeleteFolder => "delete_folder", + } + } + + fn str_to_kind(raw: &str) -> anyhow::Result { + match raw { + "doc_sync" => Ok(StorageProjectionJobKind::DocSync), + "folder_sync" => Ok(StorageProjectionJobKind::FolderSync), + "delete_doc" => Ok(StorageProjectionJobKind::DeleteDoc), + "delete_folder" => Ok(StorageProjectionJobKind::DeleteFolder), + _ => anyhow::bail!("unsupported_storage_projection_job_type {raw}"), + } + } +} + +#[async_trait] +impl StorageProjectionQueue for PgStorageProjectionQueue { + async fn enqueue_doc_job( + &self, + workspace_id: Uuid, + doc_id: Uuid, + kind: StorageProjectionJobKind, + reason: Option<&str>, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + match kind { + StorageProjectionJobKind::DocSync | StorageProjectionJobKind::DeleteDoc => {} + other => anyhow::bail!("job_kind {other:?} requires a folder_id"), + } + + let job_type = Self::kind_to_str(kind); + sqlx::query( + r#" + INSERT INTO storage_projection_jobs (workspace_id, job_type, doc_id, reason, attempts, locked_at, last_error) + VALUES ($1, $2, $3, $4, 0, NULL, NULL) + ON CONFLICT (job_type, doc_id) WHERE doc_id IS NOT NULL + DO UPDATE SET reason = EXCLUDED.reason, + locked_at = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN NULL + ELSE storage_projection_jobs.locked_at + END, + attempts = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN 0 + ELSE storage_projection_jobs.attempts + END, + last_error = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN NULL + ELSE storage_projection_jobs.last_error + END, + workspace_id = EXCLUDED.workspace_id, + pending_retry = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN false + ELSE true + END, + updated_at = now() + "#, + ) + .bind(workspace_id) + .bind(job_type) + .bind(doc_id) + .bind(reason) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn enqueue_folder_job( + &self, + workspace_id: Uuid, + folder_id: Uuid, + kind: StorageProjectionJobKind, + reason: Option<&str>, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + match kind { + StorageProjectionJobKind::FolderSync | StorageProjectionJobKind::DeleteFolder => {} + other => anyhow::bail!("job_kind {other:?} requires a doc_id"), + } + + let job_type = Self::kind_to_str(kind); + sqlx::query( + r#" + INSERT INTO storage_projection_jobs (workspace_id, job_type, folder_id, reason, attempts, locked_at, last_error) + VALUES ($1, $2, $3, $4, 0, NULL, NULL) + ON CONFLICT (job_type, folder_id) WHERE folder_id IS NOT NULL + DO UPDATE SET reason = EXCLUDED.reason, + locked_at = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN NULL + ELSE storage_projection_jobs.locked_at + END, + attempts = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN 0 + ELSE storage_projection_jobs.attempts + END, + last_error = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN NULL + ELSE storage_projection_jobs.last_error + END, + workspace_id = EXCLUDED.workspace_id, + pending_retry = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN false + ELSE true + END, + updated_at = now() + "#, + ) + .bind(workspace_id) + .bind(job_type) + .bind(folder_id) + .bind(reason) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn fetch_next_job( + &self, + lock_timeout_secs: i64, + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#" + WITH next_job AS ( + SELECT id FROM storage_projection_jobs + WHERE locked_at IS NULL + OR locked_at < now() - ($1 * interval '1 second') + ORDER BY created_at + LIMIT 1 + FOR UPDATE SKIP LOCKED + ) + UPDATE storage_projection_jobs j + SET locked_at = now(), + attempts = attempts + 1, + updated_at = now() + WHERE j.id IN (SELECT id FROM next_job) + RETURNING j.id, j.workspace_id, j.job_type, j.doc_id, j.folder_id, j.reason, j.attempts, j.locked_at + "#, + ) + .bind(lock_timeout_secs.max(1)) + .fetch_optional(&self.pool) + .await?; + + let Some(row) = row else { + return Ok(None); + }; + + let job_type: String = row.get("job_type"); + let kind = Self::str_to_kind(&job_type)?; + + Ok(Some(StorageProjectionJob { + id: row.get("id"), + workspace_id: row.get("workspace_id"), + job_type: kind, + doc_id: row.try_get::, _>("doc_id").unwrap_or(None), + folder_id: row.try_get::, _>("folder_id").unwrap_or(None), + reason: row.try_get::, _>("reason").unwrap_or(None), + attempts: row.try_get("attempts").unwrap_or_default(), + locked_at: row.get::, _>("locked_at"), + })) + } + .await; + out.map_err(Into::into) + } + + async fn complete_job(&self, job_id: i64, locked_at: DateTime) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let mut tx = self.pool.begin().await?; + let updated = sqlx::query( + r#" + UPDATE storage_projection_jobs + SET locked_at = NULL, + attempts = 0, + last_error = NULL, + pending_retry = false, + updated_at = now() + WHERE id = $1 AND locked_at = $2 AND pending_retry = true + "#, + ) + .bind(job_id) + .bind(locked_at) + .execute(&mut *tx) + .await?; + if updated.rows_affected() == 0 { + sqlx::query( + "DELETE FROM storage_projection_jobs WHERE id = $1 AND locked_at = $2 AND pending_retry = false", + ) + .bind(job_id) + .bind(locked_at) + .execute(&mut *tx) + .await?; + } + tx.commit().await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn fail_job(&self, job_id: i64, locked_at: DateTime, error: &str) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#" + UPDATE storage_projection_jobs + SET last_error = $2, + locked_at = NULL, + pending_retry = false, + updated_at = now() + WHERE id = $1 AND locked_at = $3 + "#, + ) + .bind(job_id) + .bind(error) + .bind(locked_at) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/src/infrastructure/storage/mod.rs b/api/crates/infrastructure/src/core/storage/mod.rs similarity index 96% rename from api/src/infrastructure/storage/mod.rs rename to api/crates/infrastructure/src/core/storage/mod.rs index dec9af49..75d528b2 100644 --- a/api/src/infrastructure/storage/mod.rs +++ b/api/crates/infrastructure/src/core/storage/mod.rs @@ -1,4 +1,3 @@ -mod core; mod dirty; mod fs_ingest_watcher; mod gitignore_port_impl; @@ -6,18 +5,19 @@ mod ingest_queue; mod ingest_worker; mod job_queue; mod monitor; +mod paths; mod reconcile_backend; mod reconcile_jobs; mod s3_port_impl; mod storage_port_impl; mod worker; -pub use core::*; pub use dirty::*; pub use fs_ingest_watcher::FsIngestWatcher; pub use ingest_queue::PgStorageIngestQueue; pub use ingest_worker::{LoggingStorageIngestHandler, StorageIngestWorker}; pub use job_queue::PgStorageProjectionQueue; pub use monitor::StorageConsistencyMonitor; +pub use paths::*; pub use reconcile_backend::{FsReconcileBackend, S3ReconcileBackend}; pub use reconcile_jobs::PgStorageReconcileJobs; pub use worker::StorageProjectionWorker; diff --git a/api/src/infrastructure/storage/monitor.rs b/api/crates/infrastructure/src/core/storage/monitor.rs similarity index 94% rename from api/src/infrastructure/storage/monitor.rs rename to api/crates/infrastructure/src/core/storage/monitor.rs index c6ff9d52..358cd755 100644 --- a/api/src/infrastructure/storage/monitor.rs +++ b/api/crates/infrastructure/src/core/storage/monitor.rs @@ -7,15 +7,16 @@ use tokio::{self, sync::Mutex, time::sleep}; use tracing::{error, info, warn}; use uuid::Uuid; -use crate::{ - application::ports::storage_ingest_queue::{StorageIngestKind, StorageIngestQueue}, - application::ports::storage_port::StorageResolverPort, - application::ports::storage_projection_queue::{ - StorageProjectionJobKind, StorageProjectionQueue, - }, - domain::workspaces::permissions::PermissionSet, - infrastructure::db::PgPool, +use crate::core::db::PgPool; +use application::core::ports::storage::storage_ingest_queue::{ + StorageIngestKind, StorageIngestQueue, }; +use application::core::ports::storage::storage_port::StorageResolverPort; +use application::core::ports::storage::storage_projection_queue::{ + StorageProjectionJobKind, StorageProjectionQueue, +}; +use domain::access::permissions::PermissionSet; +use domain::storage::ingest_backend::StorageIngestBackend; /// Periodically verifies that metadata entries in `documents` / `files` /// still have a corresponding object in the configured storage backend. @@ -213,7 +214,7 @@ impl StorageConsistencyMonitor { workspace_id, None, &repo_path, - "consistency", + StorageIngestBackend::Consistency, StorageIngestKind::Delete, None, Some(json!({ diff --git a/api/src/infrastructure/storage/core.rs b/api/crates/infrastructure/src/core/storage/paths.rs similarity index 96% rename from api/src/infrastructure/storage/core.rs rename to api/crates/infrastructure/src/core/storage/paths.rs index 6b53d990..f809445d 100644 --- a/api/src/infrastructure/storage/core.rs +++ b/api/crates/infrastructure/src/core/storage/paths.rs @@ -2,7 +2,8 @@ use sqlx::Row; use std::path::{Path, PathBuf}; use uuid::Uuid; -use crate::infrastructure::db::PgPool; +use crate::core::db::PgPool; +use domain::documents::doc_type::DOC_TYPE_FOLDER; fn pathbuf_to_string(path: &Path) -> String { path.to_string_lossy().replace('\\', "/") @@ -88,7 +89,7 @@ pub async fn build_doc_dir( .flatten() .is_some(); - let rel = if dtype == "folder" { + let rel = if dtype == DOC_TYPE_FOLDER { owner_relative_buf(owner_id, &desired_path, archived) } else { owner_relative_parent_buf(owner_id, &desired_path, archived) @@ -109,7 +110,7 @@ pub async fn build_doc_file_path( .fetch_one(pool) .await?; let dtype: String = row.get("type"); - if dtype == "folder" { + if dtype == DOC_TYPE_FOLDER { anyhow::bail!("folder_has_no_markdown_path"); } let owner_id: Uuid = row.get("owner_id"); @@ -193,7 +194,7 @@ pub async fn mark_dirty_upsert_abs_path( is_text: bool, content_hash: Option<&str>, ) -> anyhow::Result<()> { - if crate::infrastructure::storage::dirty_tracking_suppressed() { + if crate::core::storage::dirty_tracking_suppressed() { return Ok(()); } let rel = relative_from_uploads(uploads_root, abs_path).replace('\\', "/"); @@ -213,7 +214,7 @@ pub async fn mark_dirty_upsert_relative( is_text: bool, content_hash: Option<&str>, ) -> anyhow::Result<()> { - if crate::infrastructure::storage::dirty_tracking_suppressed() { + if crate::core::storage::dirty_tracking_suppressed() { return Ok(()); } let rel = relative.trim_start_matches('/'); @@ -228,7 +229,7 @@ pub async fn mark_dirty_upsert_relative( } pub async fn mark_dirty_delete_relative(pool: &PgPool, relative: &str) -> anyhow::Result<()> { - if crate::infrastructure::storage::dirty_tracking_suppressed() { + if crate::core::storage::dirty_tracking_suppressed() { return Ok(()); } let rel = relative.trim_start_matches('/'); @@ -257,7 +258,7 @@ pub async fn move_doc_paths( }; let owner_id: Uuid = row.get("owner_id"); let dtype: String = row.get("type"); - if dtype == "folder" { + if dtype == DOC_TYPE_FOLDER { return Ok(()); } let old_rel: Option = row.try_get("path").ok(); @@ -391,12 +392,12 @@ pub async fn delete_doc_physical( None => return Ok(()), }; let dtype: String = row.get("type"); - if dtype == "folder" { + if dtype == DOC_TYPE_FOLDER { return Ok(()); } // Delete the document file itself - if let Some(rel) = row.try_get::("path").ok() { + if let Ok(rel) = row.try_get::("path") { let full = uploads_root.join(&rel); let _ = tokio::fs::remove_file(&full).await; // Mark delete for document markdown diff --git a/api/src/infrastructure/storage/reconcile_backend.rs b/api/crates/infrastructure/src/core/storage/reconcile_backend.rs similarity index 54% rename from api/src/infrastructure/storage/reconcile_backend.rs rename to api/crates/infrastructure/src/core/storage/reconcile_backend.rs index a1c5b870..8e91bf1e 100644 --- a/api/src/infrastructure/storage/reconcile_backend.rs +++ b/api/crates/infrastructure/src/core/storage/reconcile_backend.rs @@ -10,7 +10,8 @@ use tokio::task; use uuid::Uuid; use walkdir::WalkDir; -use crate::application::ports::storage_reconcile_backend::StorageReconcileBackend; +use application::core::ports::errors::PortResult; +use application::core::ports::storage::storage_reconcile_backend::StorageReconcileBackend; use super::s3::S3StorageConfig; @@ -26,30 +27,34 @@ impl FsReconcileBackend { #[async_trait] impl StorageReconcileBackend for FsReconcileBackend { - async fn list_paths(&self, user_id: Uuid) -> anyhow::Result> { - let root = self.root.clone(); - task::spawn_blocking(move || { - let user_root = root.join(user_id.to_string()); - if !user_root.exists() { - return Ok(Vec::new()); - } - let mut paths = Vec::new(); - for entry in WalkDir::new(&user_root).into_iter().filter_map(Result::ok) { - if entry.path().is_file() { - if let Some(rel) = entry - .path() - .strip_prefix(&root) - .ok() - .and_then(|p| p.to_str()) - { - paths.push(rel.replace('\\', "/")); + async fn list_paths(&self, user_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let root = self.root.clone(); + task::spawn_blocking(move || { + let user_root = root.join(user_id.to_string()); + if !user_root.exists() { + return Ok(Vec::new()); + } + let mut paths = Vec::new(); + for entry in WalkDir::new(&user_root).into_iter().filter_map(Result::ok) { + if entry.path().is_file() { + if let Some(rel) = entry + .path() + .strip_prefix(&root) + .ok() + .and_then(|p| p.to_str()) + { + paths.push(rel.replace('\\', "/")); + } } } - } - Ok(paths) - }) - .await - .map_err(|err| anyhow!(err))? + Ok(paths) + }) + .await + .map_err(|err| anyhow!(err))? + } + .await; + out.map_err(Into::into) } } @@ -107,37 +112,41 @@ impl S3ReconcileBackend { #[async_trait] impl StorageReconcileBackend for S3ReconcileBackend { - async fn list_paths(&self, user_id: Uuid) -> anyhow::Result> { - let mut paths = Vec::new(); - let prefix = if self.root_prefix.is_empty() { - user_id.to_string() - } else { - format!("{}/{}", self.root_prefix, user_id) - }; - let mut token = None; - loop { - let resp = self - .client - .list_objects_v2() - .bucket(&self.bucket) - .prefix(&prefix) - .set_continuation_token(token.clone()) - .send() - .await?; - for obj in resp.contents() { - if let Some(key) = obj.key() { - if let Some(repo_path) = self.repo_path_from_key(key) { - paths.push(repo_path); + async fn list_paths(&self, user_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let mut paths = Vec::new(); + let prefix = if self.root_prefix.is_empty() { + user_id.to_string() + } else { + format!("{}/{}", self.root_prefix, user_id) + }; + let mut token = None; + loop { + let resp = self + .client + .list_objects_v2() + .bucket(&self.bucket) + .prefix(&prefix) + .set_continuation_token(token.clone()) + .send() + .await?; + for obj in resp.contents() { + if let Some(key) = obj.key() { + if let Some(repo_path) = self.repo_path_from_key(key) { + paths.push(repo_path); + } } } + if let Some(next) = resp.next_continuation_token() { + token = Some(next.to_string()); + } else { + break; + } } - if let Some(next) = resp.next_continuation_token() { - token = Some(next.to_string()); - } else { - break; - } + Ok(paths) } - Ok(paths) + .await; + out.map_err(Into::into) } } diff --git a/api/src/infrastructure/storage/reconcile_jobs.rs b/api/crates/infrastructure/src/core/storage/reconcile_jobs.rs similarity index 51% rename from api/src/infrastructure/storage/reconcile_jobs.rs rename to api/crates/infrastructure/src/core/storage/reconcile_jobs.rs index 3d51a6ae..29a66650 100644 --- a/api/src/infrastructure/storage/reconcile_jobs.rs +++ b/api/crates/infrastructure/src/core/storage/reconcile_jobs.rs @@ -3,9 +3,10 @@ use sqlx::Row; use tracing::debug; use uuid::Uuid; -use crate::{ - application::ports::storage_reconcile_jobs::{StorageReconcileJob, StorageReconcileJobs}, - infrastructure::db::PgPool, +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::core::ports::storage::storage_reconcile_jobs::{ + StorageReconcileJob, StorageReconcileJobs, }; pub struct PgStorageReconcileJobs { @@ -20,9 +21,10 @@ impl PgStorageReconcileJobs { #[async_trait] impl StorageReconcileJobs for PgStorageReconcileJobs { - async fn enqueue(&self, workspace_id: Uuid, scope: &str) -> anyhow::Result<()> { - sqlx::query( - r#" + async fn enqueue(&self, workspace_id: Uuid, scope: &str) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#" INSERT INTO storage_reconcile_jobs (workspace_id, scope, attempts, locked_at, last_error) VALUES ($1, $2, 0, NULL, NULL) ON CONFLICT ON CONSTRAINT storage_reconcile_jobs_workspace_scope_unique @@ -42,25 +44,26 @@ impl StorageReconcileJobs for PgStorageReconcileJobs { END, updated_at = now() "#, - ) - .bind(workspace_id) - .bind(scope) - .execute(&self.pool) - .await?; - debug!( - workspace_id = %workspace_id, - scope, - "storage_reconcile_job_enqueued" - ); - Ok(()) + ) + .bind(workspace_id) + .bind(scope) + .execute(&self.pool) + .await?; + debug!( + workspace_id = %workspace_id, + scope, + "storage_reconcile_job_enqueued" + ); + Ok(()) + } + .await; + out.map_err(Into::into) } - async fn fetch_next( - &self, - lock_timeout_secs: i64, - ) -> anyhow::Result> { - let row = sqlx::query( - r#" + async fn fetch_next(&self, lock_timeout_secs: i64) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#" WITH next_job AS ( SELECT id FROM storage_reconcile_jobs WHERE locked_at IS NULL @@ -76,22 +79,26 @@ impl StorageReconcileJobs for PgStorageReconcileJobs { WHERE j.id IN (SELECT id FROM next_job) RETURNING j.id, j.workspace_id, j.scope, j.attempts "#, - ) - .bind(lock_timeout_secs.max(1)) - .fetch_optional(&self.pool) - .await?; + ) + .bind(lock_timeout_secs.max(1)) + .fetch_optional(&self.pool) + .await?; - Ok(row.map(|r| StorageReconcileJob { - id: r.get("id"), - workspace_id: r.get("workspace_id"), - scope: r.get("scope"), - attempts: r.get("attempts"), - })) + Ok(row.map(|r| StorageReconcileJob { + id: r.get("id"), + workspace_id: r.get("workspace_id"), + scope: r.get("scope"), + attempts: r.get("attempts"), + })) + } + .await; + out.map_err(Into::into) } - async fn complete(&self, job_id: i64) -> anyhow::Result<()> { - let result = sqlx::query( - r#" + async fn complete(&self, job_id: i64) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let result = sqlx::query( + r#" UPDATE storage_reconcile_jobs SET locked_at = NULL, attempts = 0, @@ -100,23 +107,27 @@ impl StorageReconcileJobs for PgStorageReconcileJobs { updated_at = now() WHERE id = $1 AND pending_retry = true "#, - ) - .bind(job_id) - .execute(&self.pool) - .await?; + ) + .bind(job_id) + .execute(&self.pool) + .await?; - if result.rows_affected() == 0 { - sqlx::query("DELETE FROM storage_reconcile_jobs WHERE id = $1") - .bind(job_id) - .execute(&self.pool) - .await?; + if result.rows_affected() == 0 { + sqlx::query("DELETE FROM storage_reconcile_jobs WHERE id = $1") + .bind(job_id) + .execute(&self.pool) + .await?; + } + Ok(()) } - Ok(()) + .await; + out.map_err(Into::into) } - async fn fail(&self, job_id: i64, error: &str) -> anyhow::Result<()> { - sqlx::query( - r#" + async fn fail(&self, job_id: i64, error: &str) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#" UPDATE storage_reconcile_jobs SET last_error = $2, locked_at = NULL, @@ -124,11 +135,14 @@ impl StorageReconcileJobs for PgStorageReconcileJobs { updated_at = now() WHERE id = $1 "#, - ) - .bind(job_id) - .bind(error) - .execute(&self.pool) - .await?; - Ok(()) + ) + .bind(job_id) + .bind(error) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) } } diff --git a/api/src/infrastructure/storage/s3_port_impl.rs b/api/crates/infrastructure/src/core/storage/s3_port_impl.rs similarity index 57% rename from api/src/infrastructure/storage/s3_port_impl.rs rename to api/crates/infrastructure/src/core/storage/s3_port_impl.rs index 88ccd288..90443765 100644 --- a/api/src/infrastructure/storage/s3_port_impl.rs +++ b/api/crates/infrastructure/src/core/storage/s3_port_impl.rs @@ -15,11 +15,13 @@ use aws_sdk_s3::{Client, error::SdkError}; use tokio::io::AsyncReadExt; use uuid::Uuid; -use crate::application::ports::storage_port::{ +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::core::ports::storage::storage_port::{ StorageProjectionPort, StorageResolverPort, StoredAttachment, }; -use crate::application::utils::hash::sha256_hex; -use crate::infrastructure::db::PgPool; +use application::core::services::utils::hash::sha256_hex; +use domain::documents::doc_type::DOC_TYPE_FOLDER; #[derive(Clone, Debug)] pub struct S3StorageConfig { @@ -101,8 +103,8 @@ impl S3StoragePort { } fn key_from_path(&self, abs_path: &Path) -> String { - let rel = crate::infrastructure::storage::relative_from_uploads(&self.root, abs_path) - .replace('\\', "/"); + let rel = + crate::core::storage::relative_from_uploads(&self.root, abs_path).replace('\\', "/"); self.relative_to_key(&rel) } @@ -195,7 +197,7 @@ impl S3StoragePort { }; let owner_id: Uuid = row.get("owner_id"); let dtype: String = row.get("type"); - if dtype == "folder" { + if dtype == DOC_TYPE_FOLDER { return Ok(()); } let old_rel: Option = row.try_get("path").ok(); @@ -206,12 +208,9 @@ impl S3StoragePort { .ok() .flatten() .is_some(); - let target_rel = crate::infrastructure::storage::owner_relative_from_desired( - owner_id, - &desired_path, - archived, - ); - let target_parent_rel = crate::infrastructure::storage::owner_relative_parent_from_desired( + let target_rel = + crate::core::storage::owner_relative_from_desired(owner_id, &desired_path, archived); + let target_parent_rel = crate::core::storage::owner_relative_parent_from_desired( owner_id, &desired_path, archived, @@ -225,10 +224,8 @@ impl S3StoragePort { self.copy_object(&src_key, &dst_key).await?; self.delete_object(&src_key).await?; } - let _ = crate::infrastructure::storage::mark_dirty_delete_relative( - &self.pool, &old_rel, - ) - .await; + let _ = + crate::core::storage::mark_dirty_delete_relative(&self.pool, &old_rel).await; } } @@ -246,7 +243,7 @@ impl S3StoragePort { let old_path: String = row.get("storage_path"); let new_path = dst_attachments.join(&filename); let new_rel_attachment = - crate::infrastructure::storage::relative_from_uploads(&self.root, &new_path) + crate::core::storage::relative_from_uploads(&self.root, &new_path) .replace('\\', "/"); if old_path != new_rel_attachment { let src_key = self.relative_to_key(&old_path); @@ -264,11 +261,9 @@ impl S3StoragePort { .execute(&self.pool) .await?; } - let _ = crate::infrastructure::storage::mark_dirty_delete_relative( - &self.pool, &old_path, - ) - .await; - let _ = crate::infrastructure::storage::mark_dirty_upsert_relative( + let _ = + crate::core::storage::mark_dirty_delete_relative(&self.pool, &old_path).await; + let _ = crate::core::storage::mark_dirty_upsert_relative( &self.pool, &new_rel_attachment, false, @@ -286,13 +281,9 @@ impl S3StoragePort { .execute(&self.pool) .await?; - let _ = crate::infrastructure::storage::mark_dirty_upsert_relative( - &self.pool, - &target_rel, - true, - None, - ) - .await; + let _ = + crate::core::storage::mark_dirty_upsert_relative(&self.pool, &target_rel, true, None) + .await; Ok(()) } @@ -328,81 +319,103 @@ fn sanitize_filename(name: &str) -> String { #[async_trait] impl StorageProjectionPort for S3StoragePort { - async fn move_folder_subtree(&self, folder_id: Uuid) -> anyhow::Result { - let ids = - crate::infrastructure::storage::list_descendant_docs(&self.pool, folder_id).await?; - for id in &ids { - self.move_doc_paths(*id).await?; + async fn move_folder_subtree(&self, folder_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let ids = crate::core::storage::list_descendant_docs(&self.pool, folder_id).await?; + for id in &ids { + self.move_doc_paths(*id).await?; + } + Ok(ids.len()) } - Ok(ids.len()) + .await; + out.map_err(Into::into) } - async fn delete_doc_physical(&self, doc_id: Uuid) -> anyhow::Result<()> { - use sqlx::Row; - - let row = sqlx::query("SELECT type, path FROM documents WHERE id = $1") - .bind(doc_id) - .fetch_optional(&self.pool) - .await?; - let row = match row { - Some(r) => r, - None => return Ok(()), - }; - let dtype: String = row.get("type"); - if dtype == "folder" { - return Ok(()); - } - if let Some(path) = row.try_get::("path").ok() { - let key = self.relative_to_key(&path); - let _ = self.delete_object(&key).await; - } + async fn delete_doc_physical(&self, doc_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + use sqlx::Row; - let attachments = sqlx::query("SELECT storage_path FROM files WHERE document_id = $1") - .bind(doc_id) - .fetch_all(&self.pool) - .await?; - for row in attachments { - if let Ok(storage_path) = row.try_get::("storage_path") { - let key = self.relative_to_key(&storage_path); + let row = sqlx::query("SELECT type, path FROM documents WHERE id = $1") + .bind(doc_id) + .fetch_optional(&self.pool) + .await?; + let row = match row { + Some(r) => r, + None => return Ok(()), + }; + let dtype: String = row.get("type"); + if dtype == DOC_TYPE_FOLDER { + return Ok(()); + } + if let Ok(path) = row.try_get::("path") { + let key = self.relative_to_key(&path); let _ = self.delete_object(&key).await; } + + let attachments = sqlx::query("SELECT storage_path FROM files WHERE document_id = $1") + .bind(doc_id) + .fetch_all(&self.pool) + .await?; + for row in attachments { + if let Ok(storage_path) = row.try_get::("storage_path") { + let key = self.relative_to_key(&storage_path); + let _ = self.delete_object(&key).await; + } + } + Ok(()) } - Ok(()) + .await; + out.map_err(Into::into) } - async fn delete_folder_physical(&self, folder_id: Uuid) -> anyhow::Result { - let ids = - crate::infrastructure::storage::list_descendant_docs(&self.pool, folder_id).await?; - for id in &ids { - self.delete_doc_physical(*id).await?; + async fn delete_folder_physical(&self, folder_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let ids = crate::core::storage::list_descendant_docs(&self.pool, folder_id).await?; + for id in &ids { + self.delete_doc_physical(*id).await?; + } + Ok(ids.len()) } - Ok(ids.len()) + .await; + out.map_err(Into::into) } - async fn sync_doc_paths(&self, doc_id: Uuid) -> anyhow::Result<()> { - self.move_doc_paths(doc_id).await + async fn sync_doc_paths(&self, doc_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { self.move_doc_paths(doc_id).await }.await; + out.map_err(Into::into) } - async fn delete_relative_path(&self, rel: &str) -> anyhow::Result<()> { - let key = self.relative_to_key(rel); - let _ = self.delete_object(&key).await; - crate::infrastructure::storage::mark_dirty_delete_relative(&self.pool, rel).await?; - Ok(()) + async fn delete_relative_path(&self, rel: &str) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let key = self.relative_to_key(rel); + let _ = self.delete_object(&key).await; + crate::core::storage::mark_dirty_delete_relative(&self.pool, rel).await?; + Ok(()) + } + .await; + out.map_err(Into::into) } } #[async_trait] impl StorageResolverPort for S3StoragePort { - async fn build_doc_dir(&self, doc_id: Uuid) -> anyhow::Result { - crate::infrastructure::storage::build_doc_dir(&self.pool, &self.root, doc_id).await + async fn build_doc_dir(&self, doc_id: Uuid) -> PortResult { + let out: anyhow::Result = + async { crate::core::storage::build_doc_dir(&self.pool, &self.root, doc_id).await } + .await; + out.map_err(Into::into) } - async fn build_doc_file_path(&self, doc_id: Uuid) -> anyhow::Result { - crate::infrastructure::storage::build_doc_file_path(&self.pool, &self.root, doc_id).await + async fn build_doc_file_path(&self, doc_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + crate::core::storage::build_doc_file_path(&self.pool, &self.root, doc_id).await + } + .await; + out.map_err(Into::into) } fn relative_from_uploads(&self, abs: &Path) -> String { - crate::infrastructure::storage::relative_from_uploads(&self.root, abs).replace('\\', "/") + crate::core::storage::relative_from_uploads(&self.root, abs).replace('\\', "/") } fn user_repo_dir(&self, user_id: Uuid) -> String { @@ -418,89 +431,105 @@ impl StorageResolverPort for S3StoragePort { self.root.join(rel) } - async fn resolve_upload_path(&self, doc_id: Uuid, rest_path: &str) -> anyhow::Result { - let doc_dir = crate::infrastructure::storage::build_doc_dir(&self.pool, &self.root, doc_id) - .await? - .to_path_buf(); - if !doc_dir.starts_with(&self.root) { - anyhow::bail!("forbidden"); - } + async fn resolve_upload_path(&self, doc_id: Uuid, rest_path: &str) -> PortResult { + let out: anyhow::Result = async { + let doc_dir = crate::core::storage::build_doc_dir(&self.pool, &self.root, doc_id) + .await? + .to_path_buf(); + if !doc_dir.starts_with(&self.root) { + anyhow::bail!("forbidden"); + } - let mut relative = PathBuf::new(); - for component in Path::new(rest_path).components() { - match component { - Component::Normal(part) => relative.push(part), - Component::CurDir => continue, - _ => anyhow::bail!("forbidden"), + let mut relative = PathBuf::new(); + for component in Path::new(rest_path).components() { + match component { + Component::Normal(part) => relative.push(part), + Component::CurDir => continue, + _ => anyhow::bail!("forbidden"), + } + } + if relative.as_os_str().is_empty() { + anyhow::bail!("forbidden"); } - } - if relative.as_os_str().is_empty() { - anyhow::bail!("forbidden"); - } - let full_path = doc_dir.join(&relative); - if !full_path.starts_with(&self.root) { - anyhow::bail!("forbidden"); - } + let full_path = doc_dir.join(&relative); + if !full_path.starts_with(&self.root) { + anyhow::bail!("forbidden"); + } - let rel = crate::infrastructure::storage::relative_from_uploads(&self.root, &full_path) - .replace('\\', "/"); - let key = self.relative_to_key(&rel); - if !self.object_exists(&key).await? { - anyhow::bail!("not_found"); + let rel = crate::core::storage::relative_from_uploads(&self.root, &full_path) + .replace('\\', "/"); + let key = self.relative_to_key(&rel); + if !self.object_exists(&key).await? { + anyhow::bail!("not_found"); + } + Ok(full_path) } - Ok(full_path) + .await; + out.map_err(Into::into) } - async fn read_bytes(&self, abs_path: &Path) -> anyhow::Result> { - let key = self.key_from_path(abs_path); - let resp = self - .client - .get_object() - .bucket(&self.bucket) - .key(&key) - .send() - .await; - - let object = match resp { - Ok(obj) => obj, - Err(SdkError::ServiceError(service_err)) => { - if service_err.err().is_no_such_key() { - let err = - io::Error::new(io::ErrorKind::NotFound, format!("object {key} not found")); - return Err(err.into()); + async fn read_bytes(&self, abs_path: &Path) -> PortResult> { + let out: anyhow::Result> = async { + let key = self.key_from_path(abs_path); + let resp = self + .client + .get_object() + .bucket(&self.bucket) + .key(&key) + .send() + .await; + + let object = match resp { + Ok(obj) => obj, + Err(SdkError::ServiceError(service_err)) => { + if service_err.err().is_no_such_key() { + let err = io::Error::new( + io::ErrorKind::NotFound, + format!("object {key} not found"), + ); + return Err(err.into()); + } + return Err(anyhow!("failed to get object {key}: {}", service_err.err())); } - return Err(anyhow!("failed to get object {key}: {}", service_err.err())); - } - Err(err) => { - return Err(anyhow!("failed to get object {key}: {err}")); - } - }; + Err(err) => { + return Err(anyhow!("failed to get object {key}: {err}")); + } + }; - let mut reader = object.body.into_async_read(); - let mut data = Vec::new(); - reader.read_to_end(&mut data).await?; - Ok(data) + let mut reader = object.body.into_async_read(); + let mut data = Vec::new(); + reader.read_to_end(&mut data).await?; + Ok(data) + } + .await; + out.map_err(Into::into) } - async fn exists(&self, abs_path: &Path) -> anyhow::Result { - let key = self.key_from_path(abs_path); - self.object_exists(&key).await + async fn exists(&self, abs_path: &Path) -> PortResult { + let out: anyhow::Result = async { + let key = self.key_from_path(abs_path); + self.object_exists(&key).await + } + .await; + out.map_err(Into::into) } - async fn write_bytes(&self, abs_path: &Path, data: &[u8]) -> anyhow::Result<()> { - if let Some(parent) = abs_path.parent() { - tokio::fs::create_dir_all(parent).await.ok(); + async fn write_bytes(&self, abs_path: &Path, data: &[u8]) -> PortResult<()> { + let out: anyhow::Result<()> = async { + if let Some(parent) = abs_path.parent() { + tokio::fs::create_dir_all(parent).await.ok(); + } + let key = self.key_from_path(abs_path); + self.put_object(&key, data).await?; + let rel = crate::core::storage::relative_from_uploads(&self.root, abs_path) + .replace('\\', "/"); + let _ = crate::core::storage::mark_dirty_upsert_relative(&self.pool, &rel, true, None) + .await; + Ok(()) } - let key = self.key_from_path(abs_path); - self.put_object(&key, data).await?; - let rel = crate::infrastructure::storage::relative_from_uploads(&self.root, abs_path) - .replace('\\', "/"); - let _ = crate::infrastructure::storage::mark_dirty_upsert_relative( - &self.pool, &rel, true, None, - ) .await; - Ok(()) + out.map_err(Into::into) } async fn store_doc_attachment( @@ -508,61 +537,63 @@ impl StorageResolverPort for S3StoragePort { doc_id: Uuid, original_filename: Option<&str>, bytes: &[u8], - ) -> anyhow::Result { - use tokio::fs; + ) -> PortResult { + let out: anyhow::Result = async { + use tokio::fs; - let base_dir = - crate::infrastructure::storage::build_doc_dir(&self.pool, &self.root, doc_id) + let base_dir = crate::core::storage::build_doc_dir(&self.pool, &self.root, doc_id) .await? .to_path_buf(); - let attachments_dir = base_dir.join("attachments"); - let _ = fs::create_dir_all(&attachments_dir).await; - - let sanitized = sanitize_filename(original_filename.unwrap_or("attachment")); - let mut target = attachments_dir.join(&sanitized); - let mut relative = - crate::infrastructure::storage::relative_from_uploads(&self.root, &target) - .replace('\\', "/"); - let mut counter = 1; - loop { - let key = self.relative_to_key(&relative); - if !self.object_exists(&key).await? { - break; + let attachments_dir = base_dir.join("attachments"); + let _ = fs::create_dir_all(&attachments_dir).await; + + let sanitized = sanitize_filename(original_filename.unwrap_or("attachment")); + let mut target = attachments_dir.join(&sanitized); + let mut relative = + crate::core::storage::relative_from_uploads(&self.root, &target).replace('\\', "/"); + let mut counter = 1; + loop { + let key = self.relative_to_key(&relative); + if !self.object_exists(&key).await? { + break; + } + let stem = target + .file_stem() + .and_then(|s| s.to_str()) + .unwrap_or("attachment"); + let ext = target + .extension() + .and_then(|s| s.to_str()) + .filter(|s| !s.is_empty()) + .map(|s| format!(".{s}")) + .unwrap_or_default(); + let new_name = format!("{stem}-{counter}{ext}"); + target = attachments_dir.join(&new_name); + relative = crate::core::storage::relative_from_uploads(&self.root, &target) + .replace('\\', "/"); + counter += 1; } - let stem = target - .file_stem() - .and_then(|s| s.to_str()) - .unwrap_or("attachment"); - let ext = target - .extension() - .and_then(|s| s.to_str()) - .filter(|s| !s.is_empty()) - .map(|s| format!(".{s}")) - .unwrap_or_default(); - let new_name = format!("{stem}-{counter}{ext}"); - target = attachments_dir.join(&new_name); - relative = crate::infrastructure::storage::relative_from_uploads(&self.root, &target) - .replace('\\', "/"); - counter += 1; - } - if let Some(parent) = target.parent() { - let _ = fs::create_dir_all(parent).await; + if let Some(parent) = target.parent() { + let _ = fs::create_dir_all(parent).await; + } + let key = self.relative_to_key(&relative); + self.put_object(&key, bytes).await?; + let size = bytes.len() as i64; + let hash = sha256_hex(bytes); + Ok(StoredAttachment { + filename: target + .file_name() + .and_then(|s| s.to_str()) + .unwrap_or("attachment") + .to_string(), + relative_path: relative, + size, + content_hash: hash, + }) } - let key = self.relative_to_key(&relative); - self.put_object(&key, bytes).await?; - let size = bytes.len() as i64; - let hash = sha256_hex(bytes); - Ok(StoredAttachment { - filename: target - .file_name() - .and_then(|s| s.to_str()) - .unwrap_or("attachment") - .to_string(), - relative_path: relative, - size, - content_hash: hash, - }) + .await; + out.map_err(Into::into) } } diff --git a/api/crates/infrastructure/src/core/storage/storage_port_impl.rs b/api/crates/infrastructure/src/core/storage/storage_port_impl.rs new file mode 100644 index 00000000..50bf2897 --- /dev/null +++ b/api/crates/infrastructure/src/core/storage/storage_port_impl.rs @@ -0,0 +1,311 @@ +use std::path::{Path, PathBuf}; +use uuid::Uuid; + +use application::core::ports::errors::PortResult; +use application::core::ports::storage::storage_port::{ + StorageProjectionPort, StorageResolverPort, StoredAttachment, +}; +use application::core::services::utils::hash::sha256_hex; + +pub struct FsStoragePort { + pub pool: crate::core::db::PgPool, + pub uploads_root: PathBuf, +} + +#[async_trait::async_trait] +impl StorageProjectionPort for FsStoragePort { + async fn move_folder_subtree(&self, folder_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + crate::core::storage::move_folder_subtree( + &self.pool, + self.uploads_root.as_path(), + folder_id, + ) + .await + } + .await; + out.map_err(Into::into) + } + + async fn delete_doc_physical(&self, doc_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + crate::core::storage::delete_doc_physical( + &self.pool, + self.uploads_root.as_path(), + doc_id, + ) + .await + } + .await; + out.map_err(Into::into) + } + + async fn delete_folder_physical(&self, folder_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + crate::core::storage::delete_folder_physical( + &self.pool, + self.uploads_root.as_path(), + folder_id, + ) + .await + } + .await; + out.map_err(Into::into) + } + + async fn sync_doc_paths(&self, doc_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + crate::core::storage::move_doc_paths(&self.pool, self.uploads_root.as_path(), doc_id) + .await + } + .await; + out.map_err(Into::into) + } + + async fn delete_relative_path(&self, rel: &str) -> PortResult<()> { + let out: anyhow::Result<()> = async { + use std::io::ErrorKind; + + let abs = self.absolute_from_relative(rel); + if tokio::fs::try_exists(&abs).await.unwrap_or(false) { + match tokio::fs::metadata(&abs).await { + Ok(meta) => { + if meta.is_dir() { + tokio::fs::remove_dir_all(&abs).await?; + } else { + tokio::fs::remove_file(&abs).await?; + } + } + Err(err) if err.kind() == ErrorKind::NotFound => {} + Err(err) => return Err(err.into()), + } + crate::core::storage::mark_dirty_delete_relative(&self.pool, rel).await?; + } + Ok(()) + } + .await; + out.map_err(Into::into) + } +} + +#[async_trait::async_trait] +impl StorageResolverPort for FsStoragePort { + async fn build_doc_dir(&self, doc_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + crate::core::storage::build_doc_dir(&self.pool, self.uploads_root.as_path(), doc_id) + .await + } + .await; + out.map_err(Into::into) + } + + async fn build_doc_file_path(&self, doc_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + crate::core::storage::build_doc_file_path( + &self.pool, + self.uploads_root.as_path(), + doc_id, + ) + .await + } + .await; + out.map_err(Into::into) + } + + fn relative_from_uploads(&self, abs: &Path) -> String { + crate::core::storage::relative_from_uploads(self.uploads_root.as_path(), abs) + } + + fn user_repo_dir(&self, user_id: Uuid) -> String { + let path = self.uploads_root.join(user_id.to_string()); + path.to_string_lossy().to_string() + } + + fn absolute_from_relative(&self, rel: &str) -> PathBuf { + self.uploads_root.join(rel) + } + + async fn resolve_upload_path(&self, doc_id: Uuid, rest_path: &str) -> PortResult { + let out: anyhow::Result = async { + use std::path::Component; + use tokio::fs; + + // Build base directory for the document (guaranteed to live under uploads dir). + let doc_dir = crate::core::storage::build_doc_dir( + &self.pool, + self.uploads_root.as_path(), + doc_id, + ) + .await?; + let uploads_root = self.uploads_root.as_path(); + + if !doc_dir.starts_with(uploads_root) { + anyhow::bail!("forbidden"); + } + + // Normalise the rest path and reject any traversal attempts. + let mut relative = PathBuf::new(); + for component in Path::new(rest_path).components() { + match component { + Component::Normal(part) => relative.push(part), + Component::CurDir => continue, + _ => anyhow::bail!("forbidden"), + } + } + + if relative.as_os_str().is_empty() { + anyhow::bail!("forbidden"); + } + + let full_path = doc_dir.join(relative); + if !full_path.starts_with(uploads_root) { + anyhow::bail!("forbidden"); + } + + if !fs::try_exists(&full_path).await.unwrap_or(false) { + anyhow::bail!("not_found"); + } + + Ok(full_path) + } + .await; + out.map_err(Into::into) + } + + async fn read_bytes(&self, abs_path: &Path) -> PortResult> { + let out: anyhow::Result> = async { + let data = tokio::fs::read(abs_path).await?; + Ok(data) + } + .await; + out.map_err(Into::into) + } + + async fn exists(&self, abs_path: &Path) -> PortResult { + let out: anyhow::Result = + async { Ok(tokio::fs::try_exists(abs_path).await.unwrap_or(false)) }.await; + out.map_err(Into::into) + } + + async fn write_bytes(&self, abs_path: &Path, data: &[u8]) -> PortResult<()> { + let out: anyhow::Result<()> = async { + if let Some(parent) = abs_path.parent() { + tokio::fs::create_dir_all(parent).await?; + } + // Short-circuit when content is unchanged to avoid unnecessary dirty tracking. + let new_hash = sha256_hex(data); + if tokio::fs::try_exists(abs_path).await.unwrap_or(false) { + match tokio::fs::read(abs_path).await { + Ok(existing) => { + let old_hex = sha256_hex(&existing); + if old_hex == new_hash { + // No-op write; do not mark dirty. + return Ok(()); + } + } + Err(err) if err.kind() == std::io::ErrorKind::NotFound => {} + Err(err) => return Err(err.into()), + } + } + tokio::fs::write(abs_path, data).await?; + // Mark dirty (best-effort) + let is_text = abs_path + .extension() + .and_then(|e| e.to_str()) + .map(|e| e.eq_ignore_ascii_case("md")) + .unwrap_or(false); + let _ = crate::core::storage::mark_dirty_upsert_abs_path( + &self.pool, + self.uploads_root.as_path(), + abs_path, + is_text, + Some(&new_hash), + ) + .await; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn store_doc_attachment( + &self, + doc_id: Uuid, + original_filename: Option<&str>, + bytes: &[u8], + ) -> PortResult { + let out: anyhow::Result = async { + use tokio::fs; + + let base_dir = crate::core::storage::build_doc_dir( + &self.pool, + self.uploads_root.as_path(), + doc_id, + ) + .await?; + let attachments_dir = base_dir.join("attachments"); + let _ = fs::create_dir_all(&attachments_dir).await; + + let original = original_filename.unwrap_or("file.bin"); + let mut safe = crate::core::storage::sanitize_title(original); + + let ts = chrono::Utc::now().format("%Y%m%d-%H%M%S"); + let (stem, ext) = { + let p = Path::new(&safe); + let stem = p + .file_stem() + .and_then(|s| s.to_str()) + .filter(|s| !s.is_empty()) + .unwrap_or("file") + .to_string(); + let ext = p + .extension() + .and_then(|s| s.to_str()) + .unwrap_or("") + .to_string(); + (stem, ext) + }; + + safe = if ext.is_empty() { + format!("{}_{}", stem, ts) + } else { + format!("{}_{}.{}", stem, ts, ext) + }; + + let mut candidate = attachments_dir.join(&safe); + let mut counter = 1; + while fs::try_exists(&candidate).await.unwrap_or(false) { + let p = Path::new(&safe); + let stem = p.file_stem().and_then(|s| s.to_str()).unwrap_or("file"); + let ext = p.extension().and_then(|s| s.to_str()).unwrap_or(""); + let new_name = if ext.is_empty() { + format!("{}-{}", stem, counter) + } else { + format!("{}-{}.{}", stem, counter, ext) + }; + candidate = attachments_dir.join(&new_name); + safe = new_name; + counter += 1; + } + + fs::write(&candidate, bytes).await?; + let relative = crate::core::storage::relative_from_uploads( + self.uploads_root.as_path(), + &candidate, + ) + .replace('\\', "/"); + let size = bytes.len() as i64; + + let content_hash = sha256_hex(bytes); + + Ok(StoredAttachment { + filename: safe, + relative_path: relative, + size, + content_hash, + }) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/core/storage/worker/delete.rs b/api/crates/infrastructure/src/core/storage/worker/delete.rs new file mode 100644 index 00000000..e2ccfbe3 --- /dev/null +++ b/api/crates/infrastructure/src/core/storage/worker/delete.rs @@ -0,0 +1,181 @@ +use std::path::PathBuf; + +use tracing::{info, warn}; +use uuid::Uuid; + +use application::core::ports::storage::storage_projection_queue::{ + StorageDeleteJobMetadata, StorageJobReason, +}; +use application::workspaces::services::permission_snapshot::permission_set_from_snapshot; +use domain::access::permissions::{ + PERM_DOC_DELETE, PERM_FILE_DELETE, PERM_FOLDER_DELETE, PermissionSet, +}; +use domain::documents::doc_type::DocumentType; + +use super::StorageProjectionWorker; + +pub(super) fn parse_delete_job_metadata( + reason: Option<&String>, +) -> Option { + reason.and_then(|raw| { + serde_json::from_str::>(raw) + .ok() + .and_then(|wrapper| wrapper.metadata) + }) +} + +fn workspace_repo_relative(workspace_id: Uuid, repo_path: &str) -> String { + let mut full = PathBuf::from(workspace_id.to_string()); + full.push(repo_path.trim_start_matches('/')); + normalize_relative_path(full) +} + +fn normalize_relative_path(path: PathBuf) -> String { + path.to_string_lossy().replace('\\', "/") +} + +const FALLBACK_DELETE_PERMISSIONS: &[&str] = + &[PERM_DOC_DELETE, PERM_FOLDER_DELETE, PERM_FILE_DELETE]; + +impl StorageProjectionWorker { + pub(super) async fn handle_delete_doc( + &self, + doc_id: Uuid, + metadata: Option<&StorageDeleteJobMetadata>, + ) -> anyhow::Result<()> { + self.storage.delete_doc_physical(doc_id).await?; + if let Some(meta) = metadata { + self.delete_doc_by_metadata(meta).await?; + } + Ok(()) + } + + pub(super) async fn handle_delete_folder( + &self, + folder_id: Uuid, + metadata: Option<&StorageDeleteJobMetadata>, + ) -> anyhow::Result<()> { + self.storage.delete_folder_physical(folder_id).await?; + if let Some(meta) = metadata { + self.delete_folder_by_metadata(meta).await?; + } + Ok(()) + } + + pub(super) async fn delete_doc_by_metadata( + &self, + metadata: &StorageDeleteJobMetadata, + ) -> anyhow::Result<()> { + let permissions = self.permission_set_from_metadata(metadata).await?; + if metadata.doc_type == DocumentType::Folder { + if !permissions.allows(PERM_FOLDER_DELETE) { + warn!( + workspace_id = %metadata.workspace_id, + "storage_projection_folder_delete_permission_denied" + ); + } + return Ok(()); + } + if !permissions.allows(PERM_DOC_DELETE) { + warn!( + workspace_id = %metadata.workspace_id, + "storage_projection_doc_delete_permission_denied" + ); + return Ok(()); + } + let Some(repo_path) = metadata.repo_path.as_deref() else { + return Ok(()); + }; + let doc_relative = workspace_repo_relative(metadata.workspace_id, repo_path); + self.storage.delete_relative_path(&doc_relative).await?; + if let Some(paths) = metadata.attachment_paths.as_ref() { + let can_delete_attachments = permissions.allows(PERM_FILE_DELETE); + for rel in paths { + if !can_delete_attachments { + warn!( + workspace_id = %metadata.workspace_id, + attachment_path = rel.as_str(), + "storage_projection_attachment_delete_permission_denied" + ); + break; + } + if let Err(err) = self.storage.delete_relative_path(rel).await { + warn!( + workspace_id = %metadata.workspace_id, + attachment_path = rel.as_str(), + error = ?err, + "storage_attachment_delete_failed" + ); + } + } + } + Ok(()) + } + + pub(super) async fn delete_folder_by_metadata( + &self, + metadata: &StorageDeleteJobMetadata, + ) -> anyhow::Result<()> { + let Some(repo_path) = metadata.repo_path.as_deref() else { + return Ok(()); + }; + let permissions = self.permission_set_from_metadata(metadata).await?; + if !permissions.allows(PERM_FOLDER_DELETE) { + warn!( + workspace_id = %metadata.workspace_id, + "storage_projection_folder_delete_permission_denied" + ); + return Ok(()); + } + let folder_relative = workspace_repo_relative(metadata.workspace_id, repo_path); + self.storage.delete_relative_path(&folder_relative).await?; + Ok(()) + } + + pub(super) async fn permission_set_from_metadata( + &self, + metadata: &StorageDeleteJobMetadata, + ) -> anyhow::Result { + let set = permission_set_from_snapshot(&metadata.permission_snapshot); + if !set.is_empty() { + return Ok(set); + } + if let Some(actor_id) = metadata.actor_id { + match self + .permission_resolver + .load_permission_set(metadata.workspace_id, actor_id) + .await + { + Ok(Some(resolved)) => { + info!( + workspace_id = %metadata.workspace_id, + actor_id = %actor_id, + "storage_projection_permissions_rehydrated" + ); + return Ok(resolved); + } + Ok(None) => { + warn!( + workspace_id = %metadata.workspace_id, + actor_id = %actor_id, + "storage_projection_actor_missing_for_permissions" + ); + } + Err(err) => { + warn!( + error = ?err, + workspace_id = %metadata.workspace_id, + actor_id = %actor_id, + "storage_projection_permission_resolve_failed" + ); + } + } + } else { + warn!( + workspace_id = %metadata.workspace_id, + "storage_projection_permission_snapshot_missing_no_actor" + ); + } + Ok(PermissionSet::from_slice(FALLBACK_DELETE_PERMISSIONS)) + } +} diff --git a/api/crates/infrastructure/src/core/storage/worker/doc_sync.rs b/api/crates/infrastructure/src/core/storage/worker/doc_sync.rs new file mode 100644 index 00000000..9636395e --- /dev/null +++ b/api/crates/infrastructure/src/core/storage/worker/doc_sync.rs @@ -0,0 +1,24 @@ +use uuid::Uuid; + +use super::StorageProjectionWorker; + +impl StorageProjectionWorker { + pub(super) async fn handle_doc_sync(&self, doc_id: Uuid) -> anyhow::Result<()> { + self.storage.sync_doc_paths(doc_id).await?; + self.persist_markdown(doc_id).await + } + + async fn persist_markdown(&self, doc_id: Uuid) -> anyhow::Result<()> { + if let Some(export) = self.markdown.export_markdown_for_doc(&doc_id).await? { + let path = self.resolver.build_doc_file_path(doc_id).await?; + self.resolver + .write_bytes(path.as_path(), &export.bytes) + .await?; + if let Some(repo_path) = export.repo_path.as_deref() { + self.recent_exports + .record(export.workspace_id, repo_path, &export.content_hash); + } + } + Ok(()) + } +} diff --git a/api/crates/infrastructure/src/core/storage/worker/folder_sync.rs b/api/crates/infrastructure/src/core/storage/worker/folder_sync.rs new file mode 100644 index 00000000..86e0653f --- /dev/null +++ b/api/crates/infrastructure/src/core/storage/worker/folder_sync.rs @@ -0,0 +1,10 @@ +use uuid::Uuid; + +use super::StorageProjectionWorker; + +impl StorageProjectionWorker { + pub(super) async fn handle_folder_sync(&self, folder_id: Uuid) -> anyhow::Result<()> { + self.storage.move_folder_subtree(folder_id).await?; + Ok(()) + } +} diff --git a/api/crates/infrastructure/src/core/storage/worker/mod.rs b/api/crates/infrastructure/src/core/storage/worker/mod.rs new file mode 100644 index 00000000..0a466b68 --- /dev/null +++ b/api/crates/infrastructure/src/core/storage/worker/mod.rs @@ -0,0 +1,282 @@ +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Error; +use serde_json::json; +use sqlx::Error as SqlxError; +use tracing::{Instrument, error, info, info_span, warn}; +use uuid::Uuid; + +mod delete; +mod doc_sync; +mod folder_sync; + +use crate::core::storage::suppress_git_dirty; +use application::core::ports::storage::storage_port::{StorageProjectionPort, StorageResolverPort}; +use application::core::ports::storage::storage_projection_queue::{ + StorageProjectionJob, StorageProjectionJobKind, StorageProjectionQueue, +}; +use application::core::services::metrics::MetricsRegistry; +use application::core::services::storage::projection_cache::RecentProjectionCache; +use application::documents::ports::doc_event_log::DocEventLog; +use application::documents::services::realtime::snapshot::MarkdownExportProvider; +use application::workspaces::services::WorkspacePermissionResolver; + +pub struct StorageProjectionWorker { + jobs: Arc, + storage: Arc, + resolver: Arc, + markdown: Arc, + events: Arc, + recent_exports: Arc, + lock_timeout_secs: i64, + idle_backoff: Duration, + max_attempts: i32, + metrics: Arc, + permission_resolver: Arc, +} + +impl StorageProjectionWorker { + pub fn new( + jobs: Arc, + storage: Arc, + resolver: Arc, + markdown: Arc, + events: Arc, + metrics: Arc, + permission_resolver: Arc, + recent_exports: Arc, + ) -> Self { + Self { + jobs, + storage, + resolver, + markdown, + events, + recent_exports, + lock_timeout_secs: 30, + idle_backoff: Duration::from_millis(500), + max_attempts: 5, + metrics, + permission_resolver, + } + } + + pub fn with_lock_timeout(mut self, secs: i64) -> Self { + self.lock_timeout_secs = secs; + self + } + + pub fn with_idle_backoff(mut self, backoff: Duration) -> Self { + self.idle_backoff = backoff; + self + } + + pub fn with_max_attempts(mut self, attempts: i32) -> Self { + self.max_attempts = attempts.max(1); + self + } + + pub async fn run(self: Arc) { + loop { + match self.jobs.fetch_next_job(self.lock_timeout_secs).await { + Ok(Some(job)) => { + if let Err(err) = self.process_job(job).await { + error!(error = ?err, "storage_projection_job_failed"); + } + continue; + } + Ok(None) => { + tokio::time::sleep(self.idle_backoff).await; + } + Err(err) => { + error!(error = ?err, "storage_projection_job_fetch_failed"); + tokio::time::sleep(self.idle_backoff).await; + } + } + } + } + + async fn process_job(self: &Arc, job: StorageProjectionJob) -> anyhow::Result<()> { + let span = info_span!( + "storage_projection_job", + job_id = job.id, + job_type = ?job.job_type, + doc_id = job.doc_id.map(|id| id.to_string()), + folder_id = job.folder_id.map(|id| id.to_string()) + ); + + async move { + let delete_metadata = delete::parse_delete_job_metadata(job.reason.as_ref()); + let result = suppress_git_dirty(async { + match job.job_type { + StorageProjectionJobKind::DocSync => { + let doc_id = job + .doc_id + .ok_or_else(|| anyhow::anyhow!("doc_id_required"))?; + let res = self.handle_doc_sync(doc_id).await; + if res.is_ok() { + self.emit_projection_event(doc_id, &job, "succeeded", None) + .await; + } + res + } + StorageProjectionJobKind::FolderSync => { + self.handle_folder_sync( + job.folder_id + .ok_or_else(|| anyhow::anyhow!("folder_id_required"))?, + ) + .await + } + StorageProjectionJobKind::DeleteDoc => { + let doc_id = job + .doc_id + .ok_or_else(|| anyhow::anyhow!("doc_id_required"))?; + let res = self + .handle_delete_doc(doc_id, delete_metadata.as_ref()) + .await; + if res.is_ok() { + self.emit_projection_event(doc_id, &job, "succeeded", None) + .await; + } + res + } + StorageProjectionJobKind::DeleteFolder => { + self.handle_delete_folder( + job.folder_id + .ok_or_else(|| anyhow::anyhow!("folder_id_required"))?, + delete_metadata.as_ref(), + ) + .await + } + } + }) + .await; + + match result { + Ok(()) => { + self.jobs.complete_job(job.id, job.locked_at).await?; + self.metrics.inc_storage_projection_success(); + info!("storage_projection_job_succeeded"); + } + Err(err) if missing_target(&err) => { + warn!( + error = ?err, + "storage_projection_job_missing_target_skip" + ); + self.jobs.complete_job(job.id, job.locked_at).await?; + self.metrics.inc_storage_projection_success(); + if let Some(doc_id) = job.doc_id { + self.emit_projection_event( + doc_id, + &job, + "skipped", + Some(&format!("{err:#}")), + ) + .await; + } + } + Err(err) => { + let msg = format!("{err:#}"); + if job.attempts >= self.max_attempts { + self.jobs.complete_job(job.id, job.locked_at).await?; + self.metrics.inc_storage_projection_failure(); + warn!( + error = ?err, + attempts = job.attempts, + "storage_projection_job_gave_up" + ); + if let Some(doc_id) = job.doc_id { + self.emit_projection_event( + doc_id, + &job, + "failed", + Some("max_attempts_exceeded"), + ) + .await; + } + } else { + self.jobs.fail_job(job.id, job.locked_at, &msg).await?; + self.metrics.inc_storage_projection_retry(); + warn!(error = ?err, "storage_projection_job_failed_once"); + if let Some(doc_id) = job.doc_id { + self.emit_projection_event(doc_id, &job, "failed", Some(&msg)) + .await; + } + } + } + } + + Ok(()) + } + .instrument(span) + .await + } +} + +impl StorageProjectionWorker { + async fn emit_projection_event( + &self, + doc_id: Uuid, + job: &StorageProjectionJob, + status: &str, + error: Option<&str>, + ) { + let Some(event_type) = projection_event_type(job.job_type) else { + return; + }; + let payload = json!({ + "job_id": job.id, + "job_type": job_type_label(job.job_type), + "status": status, + "reason": job.reason, + "attempts": job.attempts, + "error": error, + }); + if let Err(err) = self + .events + .append(job.workspace_id, doc_id, event_type, Some(payload)) + .await + { + warn!( + error = ?err, + doc_id = %doc_id, + event_type, + "storage_projection_event_emit_failed" + ); + } + } +} + +#[cfg(test)] +mod tests; + +fn missing_target(err: &Error) -> bool { + let needle = "document not found"; + err.chain().any(|cause| { + if let Some(sqlx_err) = cause.downcast_ref::() { + matches!(sqlx_err, SqlxError::RowNotFound) + } else if let Some(io_err) = cause.downcast_ref::() { + io_err.kind() == std::io::ErrorKind::NotFound + } else { + cause.to_string().to_lowercase().contains(needle) + } + }) +} + +fn job_type_label(kind: StorageProjectionJobKind) -> &'static str { + match kind { + StorageProjectionJobKind::DocSync => "doc_sync", + StorageProjectionJobKind::FolderSync => "folder_sync", + StorageProjectionJobKind::DeleteDoc => "delete_doc", + StorageProjectionJobKind::DeleteFolder => "delete_folder", + } +} + +fn projection_event_type(kind: StorageProjectionJobKind) -> Option<&'static str> { + match kind { + StorageProjectionJobKind::DocSync => Some("storage.projection.doc_sync"), + StorageProjectionJobKind::DeleteDoc => Some("storage.projection.doc_delete"), + _ => None, + } +} diff --git a/api/crates/infrastructure/src/core/storage/worker/tests.rs b/api/crates/infrastructure/src/core/storage/worker/tests.rs new file mode 100644 index 00000000..fd69653e --- /dev/null +++ b/api/crates/infrastructure/src/core/storage/worker/tests.rs @@ -0,0 +1,527 @@ +use super::*; + +use async_trait::async_trait; +use std::path::{Path, PathBuf}; +use std::sync::Mutex; +use std::sync::atomic::{AtomicBool, Ordering}; + +use application::core::ports::storage::storage_projection_queue::StorageDeleteJobMetadata; +use domain::access::permissions::{ + PERM_DOC_DELETE, PERM_FILE_DELETE, PERM_FOLDER_DELETE, PermissionSet, +}; +use domain::documents::doc_type::DocumentType; + +use application::core::ports::errors::PortResult; +use application::core::ports::storage::storage_port::StoredAttachment; +use application::core::services::errors::ServiceError; +use application::documents::services::realtime::snapshot::{ + MarkdownExport, MarkdownExportProvider, +}; + +struct AllowAllPermissions; + +#[async_trait] +impl WorkspacePermissionResolver for AllowAllPermissions { + async fn load_permission_set( + &self, + _workspace_id: Uuid, + _user_id: Uuid, + ) -> Result, ServiceError> { + Ok(Some(PermissionSet::all())) + } +} + +struct RecordingPermissionResolver { + called: AtomicBool, +} + +impl RecordingPermissionResolver { + fn new() -> Self { + Self { + called: AtomicBool::new(false), + } + } + + fn was_called(&self) -> bool { + self.called.load(Ordering::SeqCst) + } +} + +#[async_trait] +impl WorkspacePermissionResolver for RecordingPermissionResolver { + async fn load_permission_set( + &self, + _workspace_id: Uuid, + _user_id: Uuid, + ) -> Result, ServiceError> { + self.called.store(true, Ordering::SeqCst); + Ok(Some(PermissionSet::from_slice(&[ + PERM_DOC_DELETE, + PERM_FOLDER_DELETE, + ]))) + } +} + +struct NonePermissionResolver; + +#[async_trait] +impl WorkspacePermissionResolver for NonePermissionResolver { + async fn load_permission_set( + &self, + _workspace_id: Uuid, + _user_id: Uuid, + ) -> Result, ServiceError> { + Ok(None) + } +} + +#[tokio::test] +async fn doc_sync_invokes_storage_and_completes_job() { + let queue = Arc::new(MockQueue::default()); + let storage = Arc::new(RecordingStoragePort::default()); + let resolver_impl = Arc::new(MockResolver::default()); + let resolver: Arc = resolver_impl.clone(); + let markdown: Arc = Arc::new(MockMarkdownExporter::new()); + let events = Arc::new(RecordingDocEventLog::default()); + let metrics = Arc::new(MetricsRegistry::default()); + let permission_resolver: Arc = Arc::new(AllowAllPermissions); + let worker = Arc::new(StorageProjectionWorker::new( + queue.clone(), + storage.clone(), + resolver.clone(), + markdown.clone(), + events.clone(), + metrics.clone(), + permission_resolver.clone(), + Arc::new(RecentProjectionCache::new(Duration::from_secs(5))), + )); + let job = StorageProjectionJob { + id: 1, + workspace_id: Uuid::new_v4(), + job_type: StorageProjectionJobKind::DocSync, + doc_id: Some(Uuid::new_v4()), + folder_id: None, + reason: None, + attempts: 0, + locked_at: chrono::Utc::now(), + }; + worker.process_job(job).await.unwrap(); + assert_eq!(queue.completed(), vec![1]); + assert_eq!(storage.calls(), vec!["sync_doc_paths".to_string()]); + assert_eq!(events.events().len(), 1); + assert_eq!(events.events()[0].2, "storage.projection.doc_sync"); + assert_eq!(resolver_impl.writes().len(), 1); + assert_eq!(metrics.snapshot().storage_projection_success, 1); +} + +#[tokio::test] +async fn failing_doc_sync_marks_job_failed() { + let queue = Arc::new(MockQueue::default()); + let storage = Arc::new(RecordingStoragePort::default()); + storage.fail_next_sync(); + let resolver_impl = Arc::new(MockResolver::default()); + let resolver: Arc = resolver_impl.clone(); + let markdown: Arc = Arc::new(MockMarkdownExporter::new()); + let events = Arc::new(RecordingDocEventLog::default()); + let metrics = Arc::new(MetricsRegistry::default()); + let permission_resolver: Arc = Arc::new(AllowAllPermissions); + let worker = Arc::new(StorageProjectionWorker::new( + queue.clone(), + storage, + resolver, + markdown, + events, + metrics.clone(), + permission_resolver.clone(), + Arc::new(RecentProjectionCache::new(Duration::from_secs(5))), + )); + let job = StorageProjectionJob { + id: 2, + workspace_id: Uuid::new_v4(), + job_type: StorageProjectionJobKind::DocSync, + doc_id: Some(Uuid::new_v4()), + folder_id: None, + reason: None, + attempts: 0, + locked_at: chrono::Utc::now(), + }; + worker.process_job(job).await.unwrap(); + assert!(queue.completed().is_empty()); + assert_eq!(queue.failed().len(), 1); + assert_eq!(queue.failed()[0].0, 2); + assert_eq!(metrics.snapshot().storage_projection_retry, 1); +} + +#[tokio::test] +async fn delete_doc_metadata_removes_only_listed_attachments() { + let queue = Arc::new(MockQueue::default()); + let storage = Arc::new(RecordingStoragePort::default()); + let resolver_impl = Arc::new(MockResolver::default()); + let resolver: Arc = resolver_impl.clone(); + let markdown: Arc = Arc::new(MockMarkdownExporter::new()); + let events = Arc::new(RecordingDocEventLog::default()); + let metrics = Arc::new(MetricsRegistry::default()); + let permission_resolver: Arc = Arc::new(AllowAllPermissions); + let worker = Arc::new(StorageProjectionWorker::new( + queue, + storage.clone(), + resolver, + markdown, + events, + metrics, + permission_resolver.clone(), + Arc::new(RecentProjectionCache::new(Duration::from_secs(5))), + )); + let owner = Uuid::new_v4(); + let metadata = StorageDeleteJobMetadata { + workspace_id: owner, + repo_path: Some("docs/foo.md".into()), + doc_type: DocumentType::Document, + attachment_paths: Some(vec![ + format!("{}/docs/attachments/image.png", owner), + format!("{}/docs/attachments/asset.bin", owner), + ]), + permission_snapshot: PermissionSet::all().to_vec(), + actor_id: None, + }; + worker.delete_doc_by_metadata(&metadata).await.unwrap(); + assert_eq!( + storage.calls(), + vec![ + format!("delete_relative_path:{}/docs/foo.md", owner), + format!("delete_relative_path:{}/docs/attachments/image.png", owner), + format!("delete_relative_path:{}/docs/attachments/asset.bin", owner) + ] + ); +} + +#[tokio::test] +async fn empty_snapshot_uses_resolver_permissions_when_available() { + let queue = Arc::new(MockQueue::default()); + let storage = Arc::new(RecordingStoragePort::default()); + let resolver_impl = Arc::new(MockResolver::default()); + let resolver: Arc = resolver_impl.clone(); + let markdown: Arc = Arc::new(MockMarkdownExporter::new()); + let events = Arc::new(RecordingDocEventLog::default()); + let metrics = Arc::new(MetricsRegistry::default()); + let resolver_stub = Arc::new(RecordingPermissionResolver::new()); + let permission_resolver: Arc = resolver_stub.clone(); + let worker = Arc::new(StorageProjectionWorker::new( + queue, + storage, + resolver, + markdown, + events, + metrics, + permission_resolver, + Arc::new(RecentProjectionCache::new(Duration::from_secs(5))), + )); + let metadata = StorageDeleteJobMetadata { + workspace_id: Uuid::new_v4(), + repo_path: Some("docs/foo.md".into()), + doc_type: DocumentType::Document, + attachment_paths: None, + permission_snapshot: Vec::new(), + actor_id: Some(Uuid::new_v4()), + }; + let set = worker + .permission_set_from_metadata(&metadata) + .await + .unwrap(); + assert!(resolver_stub.was_called()); + assert!(set.allows(PERM_DOC_DELETE)); +} + +#[tokio::test] +async fn empty_snapshot_without_actor_falls_back_to_minimum_permissions() { + let queue = Arc::new(MockQueue::default()); + let storage = Arc::new(RecordingStoragePort::default()); + let resolver_impl = Arc::new(MockResolver::default()); + let resolver: Arc = resolver_impl.clone(); + let markdown: Arc = Arc::new(MockMarkdownExporter::new()); + let events = Arc::new(RecordingDocEventLog::default()); + let metrics = Arc::new(MetricsRegistry::default()); + let permission_resolver: Arc = + Arc::new(NonePermissionResolver); + let worker = Arc::new(StorageProjectionWorker::new( + queue, + storage, + resolver, + markdown, + events, + metrics, + permission_resolver, + Arc::new(RecentProjectionCache::new(Duration::from_secs(5))), + )); + let metadata = StorageDeleteJobMetadata { + workspace_id: Uuid::new_v4(), + repo_path: Some("docs/foo.md".into()), + doc_type: DocumentType::Document, + attachment_paths: None, + permission_snapshot: Vec::new(), + actor_id: None, + }; + let set = worker + .permission_set_from_metadata(&metadata) + .await + .unwrap(); + assert!(set.allows(PERM_DOC_DELETE)); + assert!(set.allows(PERM_FOLDER_DELETE)); + assert!(set.allows(PERM_FILE_DELETE)); +} + +#[derive(Default)] +struct MockQueue { + completed: Mutex>, + failed: Mutex>, +} + +impl MockQueue { + fn completed(&self) -> Vec { + self.completed.lock().unwrap().clone() + } + + fn failed(&self) -> Vec<(i64, String)> { + self.failed.lock().unwrap().clone() + } +} + +#[async_trait] +impl StorageProjectionQueue for MockQueue { + async fn enqueue_doc_job( + &self, + _workspace_id: Uuid, + _doc_id: Uuid, + _kind: StorageProjectionJobKind, + _reason: Option<&str>, + ) -> PortResult<()> { + unimplemented!() + } + + async fn enqueue_folder_job( + &self, + _workspace_id: Uuid, + _folder_id: Uuid, + _kind: StorageProjectionJobKind, + _reason: Option<&str>, + ) -> PortResult<()> { + unimplemented!() + } + + async fn fetch_next_job( + &self, + _lock_timeout_secs: i64, + ) -> PortResult> { + Ok(None) + } + + async fn complete_job( + &self, + job_id: i64, + _locked_at: chrono::DateTime, + ) -> PortResult<()> { + self.completed.lock().unwrap().push(job_id); + Ok(()) + } + + async fn fail_job( + &self, + job_id: i64, + _locked_at: chrono::DateTime, + error: &str, + ) -> PortResult<()> { + self.failed + .lock() + .unwrap() + .push((job_id, error.to_string())); + Ok(()) + } +} + +#[derive(Default)] +struct RecordingStoragePort { + calls: Mutex>, + fail_sync: AtomicBool, +} + +impl RecordingStoragePort { + fn calls(&self) -> Vec { + self.calls.lock().unwrap().clone() + } + + fn fail_next_sync(&self) { + self.fail_sync.store(true, Ordering::SeqCst); + } +} + +#[async_trait] +impl StorageProjectionPort for RecordingStoragePort { + async fn move_folder_subtree(&self, folder_id: Uuid) -> PortResult { + let _ = folder_id; + self.calls + .lock() + .unwrap() + .push("move_folder_subtree".to_string()); + Ok(0) + } + + async fn delete_doc_physical(&self, doc_id: Uuid) -> PortResult<()> { + let _ = doc_id; + self.calls + .lock() + .unwrap() + .push("delete_doc_physical".to_string()); + Ok(()) + } + + async fn delete_folder_physical(&self, folder_id: Uuid) -> PortResult { + let _ = folder_id; + self.calls + .lock() + .unwrap() + .push("delete_folder_physical".to_string()); + Ok(0) + } + + async fn sync_doc_paths(&self, _doc_id: Uuid) -> PortResult<()> { + self.calls + .lock() + .unwrap() + .push("sync_doc_paths".to_string()); + if self.fail_sync.swap(false, Ordering::SeqCst) { + return Err(anyhow::anyhow!("sync_failed").into()); + } + Ok(()) + } + + async fn delete_relative_path(&self, rel: &str) -> PortResult<()> { + self.calls + .lock() + .unwrap() + .push(format!("delete_relative_path:{rel}")); + Ok(()) + } +} + +#[derive(Default)] +struct MockResolver { + writes: Mutex)>>, +} + +impl MockResolver { + fn writes(&self) -> Vec<(Uuid, Vec)> { + self.writes.lock().unwrap().clone() + } +} + +#[async_trait] +impl StorageResolverPort for MockResolver { + async fn build_doc_dir(&self, _doc_id: Uuid) -> PortResult { + Ok(PathBuf::from("mock")) + } + + async fn build_doc_file_path(&self, doc_id: Uuid) -> PortResult { + Ok(PathBuf::from(format!("mock/{doc_id}.md"))) + } + + fn relative_from_uploads(&self, _abs: &Path) -> String { + "mock".into() + } + + fn user_repo_dir(&self, _user_id: Uuid) -> String { + "mock".into() + } + + fn absolute_from_relative(&self, rel: &str) -> PathBuf { + PathBuf::from(rel) + } + + async fn resolve_upload_path(&self, _doc_id: Uuid, _rest_path: &str) -> PortResult { + unimplemented!() + } + + async fn read_bytes(&self, _abs_path: &Path) -> PortResult> { + unimplemented!() + } + + async fn exists(&self, _abs_path: &Path) -> PortResult { + Ok(true) + } + + async fn write_bytes(&self, abs_path: &Path, data: &[u8]) -> PortResult<()> { + let doc_id = abs_path + .file_stem() + .and_then(|s| s.to_str()) + .and_then(|raw| Uuid::parse_str(raw).ok()) + .unwrap_or_else(Uuid::nil); + self.writes.lock().unwrap().push((doc_id, data.to_vec())); + Ok(()) + } + + async fn store_doc_attachment( + &self, + _doc_id: Uuid, + _original_filename: Option<&str>, + _bytes: &[u8], + ) -> PortResult { + unimplemented!() + } +} + +struct MockMarkdownExporter { + bytes: Vec, +} + +impl MockMarkdownExporter { + fn new() -> Self { + Self { + bytes: b"mock markdown".to_vec(), + } + } +} + +#[async_trait] +impl MarkdownExportProvider for MockMarkdownExporter { + async fn export_markdown_for_doc( + &self, + _doc_id: &Uuid, + ) -> anyhow::Result> { + Ok(Some(MarkdownExport { + bytes: self.bytes.clone(), + repo_path: Some("docs/mock.md".into()), + owner_id: Some(Uuid::new_v4()), + workspace_id: Uuid::new_v4(), + content_hash: "hash".into(), + })) + } +} + +#[derive(Default)] +struct RecordingDocEventLog { + events: Mutex>, +} + +impl RecordingDocEventLog { + fn events(&self) -> Vec { + self.events.lock().unwrap().clone() + } +} + +type RecordedDocEvent = (Uuid, Uuid, String, Option); + +#[async_trait] +impl DocEventLog for RecordingDocEventLog { + async fn append( + &self, + workspace_id: Uuid, + doc_id: Uuid, + event_type: &str, + payload: Option, + ) -> PortResult<()> { + self.events + .lock() + .unwrap() + .push((workspace_id, doc_id, event_type.to_string(), payload)); + Ok(()) + } +} diff --git a/api/crates/infrastructure/src/documents/db/mod.rs b/api/crates/infrastructure/src/documents/db/mod.rs new file mode 100644 index 00000000..21b552a0 --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/mod.rs @@ -0,0 +1 @@ +pub mod repositories; diff --git a/api/crates/infrastructure/src/documents/db/repositories/access_repository_sqlx/mod.rs b/api/crates/infrastructure/src/documents/db/repositories/access_repository_sqlx/mod.rs new file mode 100644 index 00000000..29979980 --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/access_repository_sqlx/mod.rs @@ -0,0 +1,155 @@ +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::documents::ports::access_repository::{AccessRepository, DocumentUserAccess}; +use domain::access::permissions::{PermissionSet, apply_custom_overrides, system_role_permissions}; + +pub struct SqlxAccessRepository { + pub pool: PgPool, +} + +impl SqlxAccessRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl AccessRepository for SqlxAccessRepository { + async fn resolve_user_document_access( + &self, + doc_id: Uuid, + user_id: Uuid, + ) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT d.workspace_id, + d.archived_at, + m.role_kind, + m.system_role, + m.custom_role_id, + r.base_role AS custom_base_role, + p.permission, + p.allowed + FROM documents d + JOIN workspace_members m + ON m.workspace_id = d.workspace_id + AND m.user_id = $2 + LEFT JOIN workspace_roles r ON r.id = m.custom_role_id + LEFT JOIN workspace_role_permissions p ON p.workspace_role_id = r.id + WHERE d.id = $1"#, + ) + .bind(doc_id) + .bind(user_id) + .fetch_all(&self.pool) + .await?; + + if rows.is_empty() { + return Ok(None); + } + + let first = &rows[0]; + let workspace_id = first.get("workspace_id"); + let archived = first + .try_get::>, _>("archived_at") + .ok() + .flatten() + .is_some(); + + let role_kind: String = first.get("role_kind"); + let system_role = first + .try_get::, _>("system_role") + .ok() + .flatten(); + let custom_base_role = first + .try_get::, _>("custom_base_role") + .ok() + .flatten(); + + let mut overrides = Vec::new(); + for row in rows { + if let (Some(permission), Some(allowed)) = ( + row.try_get::, _>("permission") + .ok() + .flatten(), + row.try_get::, _>("allowed").ok().flatten(), + ) { + overrides.push(domain::access::permissions::PermissionOverride::new( + permission, allowed, + )); + } + } + + let permissions = build_permission_set( + &role_kind, + system_role.as_deref(), + custom_base_role.as_deref(), + overrides, + ); + + Ok(Some(DocumentUserAccess { + workspace_id, + is_archived: archived, + permissions, + })) + } + .await; + out.map_err(Into::into) + } + + async fn is_document_public(&self, doc_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let count = sqlx::query_scalar::<_, i64>( + "SELECT COUNT(1) FROM public_documents WHERE document_id = $1", + ) + .bind(doc_id) + .fetch_one(&self.pool) + .await?; + Ok(count > 0) + } + .await; + out.map_err(Into::into) + } + + async fn is_document_archived(&self, doc_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let archived = sqlx::query_scalar::<_, bool>( + "SELECT archived_at IS NOT NULL FROM documents WHERE id = $1", + ) + .bind(doc_id) + .fetch_optional(&self.pool) + .await? + .unwrap_or(false); + Ok(archived) + } + .await; + out.map_err(Into::into) + } +} + +fn build_permission_set( + role_kind: &str, + system_role: Option<&str>, + custom_base_role: Option<&str>, + overrides: Vec, +) -> PermissionSet { + let set = match role_kind { + "system" => { + let role = system_role.unwrap_or("viewer"); + system_role_permissions(role) + } + "custom" => { + let base = custom_base_role.unwrap_or("viewer"); + system_role_permissions(base) + } + _ => system_role_permissions("viewer"), + }; + if overrides.is_empty() { + set + } else { + apply_custom_overrides(set, overrides) + } +} diff --git a/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/helpers.rs b/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/helpers.rs new file mode 100644 index 00000000..240ab015 --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/helpers.rs @@ -0,0 +1,583 @@ +use std::borrow::Cow; + +use anyhow::{Context, anyhow}; +use sha2::{Digest, Sha256}; +use sqlx::{Postgres, QueryBuilder, Row, Transaction, postgres::PgRow}; +use uuid::Uuid; + +use application::documents::ports::document_repository::{ + DocMeta, DocumentRepoResult, DocumentRepositoryError, SubtreeDocument, +}; +use domain::documents::doc_type::DocumentType; +use domain::documents::document::Document as DomainDocument; +use domain::documents::path as doc_path; +use domain::documents::title::Title; + +use super::SqlxDocumentRepository; + +impl SqlxDocumentRepository { + pub(super) fn map_row_to_meta(row: &PgRow) -> anyhow::Result { + let doc_type_str: String = row.get("type"); + let doc_type = + DocumentType::try_from(doc_type_str.as_str()).context("invalid_document_type")?; + let slug_str: String = row.get("slug"); + let slug = doc_path::Slug::new(slug_str).context("invalid_slug")?; + let desired_path_str: String = row.get("desired_path"); + let desired_path = + doc_path::DesiredPath::new(desired_path_str).context("invalid_desired_path")?; + let title: String = row.get("title"); + Ok(DocMeta { + workspace_id: row.get("workspace_id"), + doc_type, + path: row.try_get("path").ok(), + slug, + desired_path, + title: Title::new(title), + archived_at: row.try_get("archived_at").ok(), + }) + } + + pub(super) fn map_row_to_document(row: &PgRow) -> anyhow::Result { + let doc_type_str: String = row.get("type"); + let doc_type = + DocumentType::try_from(doc_type_str.as_str()).context("invalid_document_type")?; + let title: String = row.get("title"); + let slug_str: String = row.get("slug"); + let slug = doc_path::Slug::new(slug_str).context("invalid_slug")?; + let desired_path_str: String = row.get("desired_path"); + let desired_path = + doc_path::DesiredPath::new(desired_path_str).context("invalid_desired_path")?; + Ok(DomainDocument::rehydrate( + row.get("id"), + row.try_get("owner_user_id").ok(), + row.get("workspace_id"), + Title::new(title), + row.get("parent_id"), + doc_type, + row.get("created_at"), + row.get("updated_at"), + row.try_get("created_by_plugin").ok(), + slug, + desired_path, + row.try_get("path").ok(), + row.try_get("created_by").ok(), + row.try_get("archived_at").ok(), + row.try_get("archived_by").ok(), + row.try_get("archived_parent_id").ok(), + )) + } + + pub(super) fn hash_path(desired_path: &str) -> Vec { + Sha256::digest(desired_path.as_bytes()).to_vec() + } + + pub(super) fn owner_relative_path(owner_id: Uuid, desired_path: &str) -> String { + format!("{owner_id}/{}", desired_path.trim_start_matches('/')) + } + + pub(super) async fn resolve_parent_folder_id( + &self, + workspace_id: Uuid, + desired_parent_path: Option<&doc_path::DesiredPath>, + ) -> anyhow::Result> { + let Some(path) = desired_parent_path + .map(|p| p.as_str()) + .filter(|p| !p.is_empty()) + else { + return Ok(None); + }; + let row = sqlx::query( + r#"SELECT id, archived_at FROM documents + WHERE workspace_id = $1 AND desired_path = $2 AND type = 'folder' + LIMIT 1"#, + ) + .bind(workspace_id) + .bind(path) + .fetch_optional(&self.pool) + .await?; + + match row { + Some(row) => { + let archived_at: Option> = + row.try_get("archived_at").ok(); + if archived_at.is_some() { + Err(anyhow!("parent_folder_archived")) + } else { + Ok(Some(row.get("id"))) + } + } + None => Err(anyhow!("parent_folder_not_found")), + } + } + + pub(super) async fn update_descendant_paths_tx( + &self, + tx: &mut Transaction<'_, Postgres>, + root_id: Uuid, + ) -> DocumentRepoResult<()> { + let rows = sqlx::query( + r#" + WITH RECURSIVE tree AS ( + SELECT id, desired_path + FROM documents + WHERE id = $1 + UNION ALL + SELECT d.id, + CASE + WHEN tree.desired_path = '' THEN + CASE + WHEN d.type = 'folder' THEN d.slug + ELSE d.slug || '.md' + END + ELSE + CASE + WHEN d.type = 'folder' THEN tree.desired_path || '/' || d.slug + ELSE tree.desired_path || '/' || d.slug || '.md' + END + END AS desired_path + FROM documents d + JOIN tree ON d.parent_id = tree.id + ) + SELECT id, desired_path FROM tree WHERE id <> $1 + "#, + ) + .bind(root_id) + .fetch_all(tx.as_mut()) + .await + .map_err(|e| DocumentRepositoryError::Unexpected(e.into()))?; + + if rows.is_empty() { + return Ok(()); + } + + let mut q = QueryBuilder::new( + "UPDATE documents AS d SET desired_path = v.desired_path, \ + path_digest = v.path_digest, \ + path = d.workspace_id::text || '/' || v.desired_path, \ + updated_at = now() \ + FROM (VALUES ", + ); + let mut values = q.separated(", "); + for row in rows { + let id: Uuid = row.get("id"); + let desired_path: String = row.get("desired_path"); + let path_digest = Self::hash_path(&desired_path); + values.push("("); + values.push_bind(id); + values.push(", "); + values.push_bind(desired_path); + values.push(", "); + values.push_bind(path_digest); + values.push(")"); + } + q.push(") AS v(id, desired_path, path_digest) WHERE d.id = v.id"); + q.build().execute(tx.as_mut()).await.map_err(|e| { + if Self::is_unique_violation(&e) { + DocumentRepositoryError::PathConflict + } else { + DocumentRepositoryError::Unexpected(e.into()) + } + })?; + Ok(()) + } + + pub(super) fn is_unique_violation(err: &sqlx::Error) -> bool { + match err { + sqlx::Error::Database(db_err) => { + matches!(db_err.code(), Some(code) if code == Cow::Borrowed("23505")) + } + _ => false, + } + } + + pub(crate) async fn create_for_user_tx( + &self, + tx: &mut Transaction<'_, Postgres>, + workspace_id: Uuid, + created_by: Uuid, + title: &Title, + parent_id: Option, + doc_type: DocumentType, + created_by_plugin: Option<&str>, + slug: &doc_path::Slug, + desired_path: &doc_path::DesiredPath, + ) -> DocumentRepoResult { + sqlx::query("SAVEPOINT document_create") + .execute(tx.as_mut()) + .await + .map_err(|e| DocumentRepositoryError::Unexpected(e.into()))?; + let repo_path = Self::owner_relative_path(workspace_id, desired_path.as_str()); + let path_digest = Self::hash_path(desired_path.as_str()); + let row = sqlx::query( + r#"INSERT INTO documents (title, owner_id, owner_user_id, workspace_id, created_by, created_by_plugin, parent_id, type, slug, desired_path, path, path_digest) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) + RETURNING *"#, + ) + .bind(title.as_str()) + .bind(workspace_id) + .bind(created_by) + .bind(workspace_id) + .bind(created_by) + .bind(created_by_plugin) + .bind(parent_id) + .bind(doc_type.as_str()) + .bind(slug.as_str()) + .bind(desired_path.as_str()) + .bind(&repo_path) + .bind(&path_digest) + .fetch_one(tx.as_mut()) + .await; + match row { + Ok(row) => { + sqlx::query("RELEASE SAVEPOINT document_create") + .execute(tx.as_mut()) + .await + .ok(); + Ok(Self::map_row_to_document(&row)?) + } + Err(err) => { + if Self::is_unique_violation(&err) { + sqlx::query("ROLLBACK TO SAVEPOINT document_create") + .execute(tx.as_mut()) + .await + .ok(); + sqlx::query("RELEASE SAVEPOINT document_create") + .execute(tx.as_mut()) + .await + .ok(); + return Err(DocumentRepositoryError::PathConflict); + } + sqlx::query("ROLLBACK TO SAVEPOINT document_create") + .execute(tx.as_mut()) + .await + .ok(); + sqlx::query("RELEASE SAVEPOINT document_create") + .execute(tx.as_mut()) + .await + .ok(); + Err(DocumentRepositoryError::Unexpected(err.into())) + } + } + } + + pub(crate) async fn update_title_and_parent_for_user_tx( + &self, + tx: &mut Transaction<'_, Postgres>, + id: Uuid, + workspace_id: Uuid, + title: &Title, + parent_id: Option>, + slug: &doc_path::Slug, + desired_path: &doc_path::DesiredPath, + ) -> DocumentRepoResult> { + sqlx::query("SAVEPOINT document_update") + .execute(tx.as_mut()) + .await + .map_err(|e| DocumentRepositoryError::Unexpected(e.into()))?; + let path_digest = Self::hash_path(desired_path.as_str()); + let row = match parent_id { + None => { + sqlx::query( + r#"UPDATE documents SET + title = $1, + slug = $2, + desired_path = $3, + path_digest = $4, + updated_at = now() + WHERE id = $5 AND workspace_id = $6 + RETURNING *"#, + ) + .bind(title.as_str()) + .bind(slug.as_str()) + .bind(desired_path.as_str()) + .bind(&path_digest) + .bind(id) + .bind(workspace_id) + .fetch_optional(tx.as_mut()) + .await + } + Some(new_parent) => { + sqlx::query( + r#"UPDATE documents SET + title = $1, + parent_id = $2, + slug = $3, + desired_path = $4, + path_digest = $5, + updated_at = now() + WHERE id = $6 AND workspace_id = $7 + RETURNING *"#, + ) + .bind(title.as_str()) + .bind(new_parent) + .bind(slug.as_str()) + .bind(desired_path.as_str()) + .bind(&path_digest) + .bind(id) + .bind(workspace_id) + .fetch_optional(tx.as_mut()) + .await + } + }; + + match row { + Ok(Some(row)) => { + let doc = Self::map_row_to_document(&row)?; + if doc.doc_type() == DocumentType::Folder { + sqlx::query("SAVEPOINT document_update_descendants") + .execute(tx.as_mut()) + .await + .map_err(|e| DocumentRepositoryError::Unexpected(e.into()))?; + let result = self.update_descendant_paths_tx(tx, doc.id()).await; + match result { + Ok(()) => { + sqlx::query("RELEASE SAVEPOINT document_update_descendants") + .execute(tx.as_mut()) + .await + .ok(); + } + Err(err) => { + sqlx::query("ROLLBACK TO SAVEPOINT document_update_descendants") + .execute(tx.as_mut()) + .await + .ok(); + sqlx::query("ROLLBACK TO SAVEPOINT document_update") + .execute(tx.as_mut()) + .await + .ok(); + sqlx::query("RELEASE SAVEPOINT document_update") + .execute(tx.as_mut()) + .await + .ok(); + return Err(err); + } + } + } + sqlx::query("RELEASE SAVEPOINT document_update") + .execute(tx.as_mut()) + .await + .ok(); + Ok(Some(doc)) + } + Ok(None) => { + sqlx::query("RELEASE SAVEPOINT document_update") + .execute(tx.as_mut()) + .await + .ok(); + Ok(None) + } + Err(err) => { + if Self::is_unique_violation(&err) { + sqlx::query("ROLLBACK TO SAVEPOINT document_update") + .execute(tx.as_mut()) + .await + .ok(); + sqlx::query("RELEASE SAVEPOINT document_update") + .execute(tx.as_mut()) + .await + .ok(); + return Err(DocumentRepositoryError::PathConflict); + } + sqlx::query("ROLLBACK TO SAVEPOINT document_update") + .execute(tx.as_mut()) + .await + .ok(); + sqlx::query("RELEASE SAVEPOINT document_update") + .execute(tx.as_mut()) + .await + .ok(); + Err(DocumentRepositoryError::Unexpected(err.into())) + } + } + } + + pub(crate) async fn delete_owned_tx( + &self, + tx: &mut Transaction<'_, Postgres>, + id: Uuid, + workspace_id: Uuid, + ) -> anyhow::Result> { + let row = sqlx::query(r#"SELECT type FROM documents WHERE id = $1 AND workspace_id = $2"#) + .bind(id) + .bind(workspace_id) + .fetch_optional(tx.as_mut()) + .await?; + let dtype = match row { + Some(r) => { + let doc_type_str: String = r.get("type"); + DocumentType::try_from(doc_type_str.as_str()).context("invalid_document_type")? + } + None => return Ok(None), + }; + let res = sqlx::query(r#"DELETE FROM documents WHERE id = $1 AND workspace_id = $2"#) + .bind(id) + .bind(workspace_id) + .execute(tx.as_mut()) + .await?; + if res.rows_affected() > 0 { + Ok(Some(dtype)) + } else { + Ok(None) + } + } + + pub(crate) async fn get_meta_for_owner_tx( + &self, + tx: &mut Transaction<'_, Postgres>, + doc_id: Uuid, + workspace_id: Uuid, + ) -> anyhow::Result> { + let row = sqlx::query( + "SELECT workspace_id, type, path, slug, desired_path, title, archived_at FROM documents WHERE id = $1 AND workspace_id = $2 FOR UPDATE", + ) + .bind(doc_id) + .bind(workspace_id) + .fetch_optional(tx.as_mut()) + .await?; + row.as_ref() + .map(SqlxDocumentRepository::map_row_to_meta) + .transpose() + } + + pub(crate) async fn archive_subtree_tx( + &self, + tx: &mut Transaction<'_, Postgres>, + doc_id: Uuid, + workspace_id: Uuid, + archived_by: Uuid, + ) -> anyhow::Result> { + let updated = sqlx::query_scalar::<_, Uuid>( + r#" + WITH RECURSIVE subtree AS ( + SELECT id FROM documents WHERE id = $1 AND workspace_id = $2 + UNION ALL + SELECT d.id + FROM documents d + JOIN subtree sb ON d.parent_id = sb.id + WHERE d.workspace_id = $2 + ), + removed_shares AS ( + DELETE FROM shares s + USING subtree sb + WHERE s.document_id = sb.id + RETURNING 1 + ), + updated AS ( + UPDATE documents AS d + SET archived_at = now(), + archived_by = $3, + archived_parent_id = d.parent_id, + parent_id = NULL, + updated_at = now() + FROM subtree sb + WHERE d.id = sb.id AND d.archived_at IS NULL + RETURNING d.id + ) + SELECT id FROM updated WHERE id = $1 LIMIT 1 + "#, + ) + .bind(doc_id) + .bind(workspace_id) + .bind(archived_by) + .fetch_optional(tx.as_mut()) + .await?; + + let root = if let Some(root_id) = updated { + sqlx::query(r#"SELECT * FROM documents WHERE id = $1"#) + .bind(root_id) + .fetch_optional(tx.as_mut()) + .await? + .map(|r| Self::map_row_to_document(&r)) + .transpose()? + } else { + None + }; + + Ok(root) + } + + pub(crate) async fn unarchive_subtree_tx( + &self, + tx: &mut Transaction<'_, Postgres>, + doc_id: Uuid, + workspace_id: Uuid, + ) -> anyhow::Result> { + let updated = sqlx::query_scalar::<_, Uuid>( + r#" + WITH RECURSIVE subtree AS ( + SELECT id FROM documents WHERE id = $1 AND workspace_id = $2 + UNION ALL + SELECT d.id + FROM documents d + JOIN subtree sb ON d.archived_parent_id = sb.id + WHERE d.workspace_id = $2 + ), + updated AS ( + UPDATE documents AS d + SET parent_id = archived_parent_id, + archived_parent_id = NULL, + archived_at = NULL, + archived_by = NULL, + updated_at = now() + FROM subtree sb + WHERE d.id = sb.id AND d.archived_at IS NOT NULL + RETURNING d.id + ) + SELECT id FROM updated WHERE id = $1 LIMIT 1 + "#, + ) + .bind(doc_id) + .bind(workspace_id) + .fetch_optional(tx.as_mut()) + .await?; + + let root = if let Some(root_id) = updated { + sqlx::query(r#"SELECT * FROM documents WHERE id = $1"#) + .bind(root_id) + .fetch_optional(tx.as_mut()) + .await? + .map(|r| Self::map_row_to_document(&r)) + .transpose()? + } else { + None + }; + + Ok(root) + } + + pub(crate) async fn list_owned_subtree_documents_tx( + &self, + tx: &mut Transaction<'_, Postgres>, + workspace_id: Uuid, + root_id: Uuid, + ) -> anyhow::Result> { + let rows = sqlx::query( + r#" + WITH RECURSIVE subtree AS ( + SELECT id, type FROM documents WHERE id = $1 AND workspace_id = $2 + UNION ALL + SELECT d.id, d.type + FROM documents d + JOIN subtree sb ON COALESCE(d.parent_id, d.archived_parent_id) = sb.id + WHERE d.workspace_id = $2 + ) + SELECT id, type FROM subtree FOR UPDATE + "#, + ) + .bind(root_id) + .bind(workspace_id) + .fetch_all(tx.as_mut()) + .await?; + rows.into_iter() + .map(|r| { + let doc_type_str: String = r.get("type"); + let doc_type = DocumentType::try_from(doc_type_str.as_str()) + .context("invalid_document_type")?; + Ok(SubtreeDocument { + id: r.get("id"), + doc_type, + }) + }) + .collect() + } +} diff --git a/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/mod.rs b/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/mod.rs new file mode 100644 index 00000000..b95d16ad --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/mod.rs @@ -0,0 +1,17 @@ +use crate::core::db::PgPool; + +mod helpers; +mod repository; + +#[cfg(test)] +mod tests; + +pub struct SqlxDocumentRepository { + pub pool: PgPool, +} + +impl SqlxDocumentRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} diff --git a/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/repository.rs b/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/repository.rs new file mode 100644 index 00000000..0a134ff8 --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/repository.rs @@ -0,0 +1,420 @@ +use anyhow::{Context, anyhow}; +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use application::core::ports::errors::PortResult; +use application::documents::ports::document_path_repository::DocumentPathRepository; +use application::documents::ports::document_repository::{ + DocMeta, DocumentListState, DocumentRepoResult, DocumentRepository, DocumentRepositoryError, + SubtreeDocument, +}; +use domain::documents::doc_type::DocumentType; +use domain::documents::document::{Document as DomainDocument, SearchHit}; +use domain::documents::path as doc_path; +use domain::documents::title::Title; + +use super::SqlxDocumentRepository; + +fn unexpected_sqlx(err: sqlx::Error) -> DocumentRepositoryError { + DocumentRepositoryError::Unexpected(err.into()) +} + +#[async_trait] +impl DocumentRepository for SqlxDocumentRepository { + async fn list_for_user( + &self, + workspace_id: Uuid, + query: Option, + tag: Option, + state: DocumentListState, + ) -> DocumentRepoResult> { + let archived_condition = match state { + DocumentListState::Active => "d.archived_at IS NULL", + DocumentListState::Archived => "d.archived_at IS NOT NULL", + DocumentListState::All => "TRUE", + }; + + let rows = if let Some(t) = tag.as_ref().filter(|s| !s.trim().is_empty()) { + let sql = format!( + r#"SELECT d.* + FROM document_tags dt + JOIN tags t ON t.id = dt.tag_id + JOIN documents d ON d.id = dt.document_id + WHERE d.workspace_id = $1 AND {archived_condition} AND t.name ILIKE $2 + ORDER BY d.updated_at DESC LIMIT 100"#, + ); + sqlx::query(&sql) + .bind(workspace_id) + .bind(t) + .fetch_all(&self.pool) + .await + .map_err(unexpected_sqlx)? + } else if let Some(ref qq) = query.as_ref().filter(|s| !s.trim().is_empty()) { + let like = format!("%{}%", qq); + let sql = format!( + r#"SELECT d.* + FROM documents d + WHERE d.workspace_id = $1 AND {archived_condition} AND d.title ILIKE $2 + ORDER BY d.updated_at DESC LIMIT 100"#, + ); + sqlx::query(&sql) + .bind(workspace_id) + .bind(like) + .fetch_all(&self.pool) + .await + .map_err(unexpected_sqlx)? + } else { + let sql = format!( + r#"SELECT d.* + FROM documents d + WHERE d.workspace_id = $1 AND {archived_condition} + ORDER BY d.updated_at DESC LIMIT 100"#, + ); + sqlx::query(&sql) + .bind(workspace_id) + .fetch_all(&self.pool) + .await + .map_err(unexpected_sqlx)? + }; + + rows.into_iter() + .map(|r| Self::map_row_to_document(&r)) + .collect::>>() + .map_err(DocumentRepositoryError::from) + } + + async fn list_ids_for_user(&self, workspace_id: Uuid) -> DocumentRepoResult> { + let rows = sqlx::query("SELECT id FROM documents WHERE workspace_id = $1") + .bind(workspace_id) + .fetch_all(&self.pool) + .await + .map_err(unexpected_sqlx)?; + Ok(rows.into_iter().map(|r| r.get("id")).collect()) + } + + async fn list_workspace_documents( + &self, + workspace_id: Uuid, + ) -> DocumentRepoResult> { + let rows = sqlx::query("SELECT * FROM documents WHERE workspace_id = $1") + .bind(workspace_id) + .fetch_all(&self.pool) + .await + .map_err(unexpected_sqlx)?; + rows.into_iter() + .map(|r| Self::map_row_to_document(&r)) + .collect::>>() + .map_err(DocumentRepositoryError::from) + } + + async fn get_by_id(&self, id: Uuid) -> DocumentRepoResult> { + let row = sqlx::query(r#"SELECT * FROM documents WHERE id = $1"#) + .bind(id) + .fetch_optional(&self.pool) + .await + .map_err(unexpected_sqlx)?; + row.map(|r| Self::map_row_to_document(&r)) + .transpose() + .map_err(DocumentRepositoryError::from) + } + + async fn search_for_user( + &self, + workspace_id: Uuid, + query: Option, + limit: i64, + ) -> DocumentRepoResult> { + let q = query.unwrap_or_default(); + let like = format!("%{}%", q); + let rows = if q.trim().is_empty() { + sqlx::query( + r#"SELECT id, title, type, path, updated_at, archived_at + FROM documents WHERE workspace_id = $1 + AND archived_at IS NULL + ORDER BY updated_at DESC + LIMIT $2"#, + ) + .bind(workspace_id) + .bind(limit) + .fetch_all(&self.pool) + .await + .map_err(unexpected_sqlx)? + } else { + sqlx::query( + r#"SELECT id, title, type, path, updated_at, archived_at FROM documents + WHERE workspace_id = $1 AND archived_at IS NULL + AND (LOWER(title) LIKE LOWER($2) OR title ILIKE $2) + ORDER BY CASE WHEN LOWER(title) = LOWER($3) THEN 0 ELSE 1 END, LENGTH(title), updated_at DESC + LIMIT $4"#, + ) + .bind(workspace_id) + .bind(like) + .bind(&q) + .bind(limit) + .fetch_all(&self.pool) + .await + .map_err(unexpected_sqlx)? + }; + rows.into_iter() + .map(|r| { + let doc_type_str: String = r.get("type"); + let doc_type = DocumentType::try_from(doc_type_str.as_str()) + .context("invalid_document_type")?; + let title: String = r.get("title"); + Ok(SearchHit { + id: r.get("id"), + title: Title::new(title), + doc_type, + path: r.try_get("path").ok(), + updated_at: r.get("updated_at"), + }) + }) + .collect::>>() + .map_err(DocumentRepositoryError::from) + } + + async fn create_for_user( + &self, + workspace_id: Uuid, + created_by: Uuid, + title: &Title, + parent_id: Option, + doc_type: DocumentType, + created_by_plugin: Option<&str>, + slug: &doc_path::Slug, + desired_path: &doc_path::DesiredPath, + ) -> DocumentRepoResult { + let mut tx = self.pool.begin().await.map_err(unexpected_sqlx)?; + let doc = self + .create_for_user_tx( + &mut tx, + workspace_id, + created_by, + title, + parent_id, + doc_type, + created_by_plugin, + slug, + desired_path, + ) + .await?; + tx.commit().await.map_err(unexpected_sqlx)?; + Ok(doc) + } + + async fn update_title_and_parent_for_user( + &self, + id: Uuid, + workspace_id: Uuid, + title: &Title, + parent_id: Option>, + slug: &doc_path::Slug, + desired_path: &doc_path::DesiredPath, + ) -> DocumentRepoResult> { + let mut tx = self.pool.begin().await.map_err(unexpected_sqlx)?; + let doc = self + .update_title_and_parent_for_user_tx( + &mut tx, + id, + workspace_id, + title, + parent_id, + slug, + desired_path, + ) + .await?; + tx.commit().await.map_err(unexpected_sqlx)?; + Ok(doc) + } + + async fn delete_owned( + &self, + id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult> { + let mut tx = self.pool.begin().await.map_err(unexpected_sqlx)?; + let res = self + .delete_owned_tx(&mut tx, id, workspace_id) + .await + .map_err(DocumentRepositoryError::from)?; + tx.commit().await.map_err(unexpected_sqlx)?; + Ok(res) + } + + async fn get_meta_for_owner( + &self, + doc_id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult> { + let row = sqlx::query( + "SELECT workspace_id, type, path, slug, desired_path, title, archived_at FROM documents WHERE id = $1 AND workspace_id = $2", + ) + .bind(doc_id) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await + .map_err(unexpected_sqlx)?; + row.as_ref() + .map(SqlxDocumentRepository::map_row_to_meta) + .transpose() + .map_err(DocumentRepositoryError::from) + } + + async fn archive_subtree( + &self, + doc_id: Uuid, + workspace_id: Uuid, + archived_by: Uuid, + ) -> DocumentRepoResult> { + let mut tx = self.pool.begin().await.map_err(unexpected_sqlx)?; + let doc = self + .archive_subtree_tx(&mut tx, doc_id, workspace_id, archived_by) + .await + .map_err(DocumentRepositoryError::from)?; + tx.commit().await.map_err(unexpected_sqlx)?; + Ok(doc) + } + + async fn unarchive_subtree( + &self, + doc_id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult> { + let mut tx = self.pool.begin().await.map_err(unexpected_sqlx)?; + let doc = self + .unarchive_subtree_tx(&mut tx, doc_id, workspace_id) + .await + .map_err(DocumentRepositoryError::from)?; + tx.commit().await.map_err(unexpected_sqlx)?; + Ok(doc) + } + + async fn list_owned_subtree_documents( + &self, + workspace_id: Uuid, + root_id: Uuid, + ) -> DocumentRepoResult> { + let rows = sqlx::query( + r#" + WITH RECURSIVE subtree AS ( + SELECT id, type FROM documents WHERE id = $1 AND workspace_id = $2 + UNION ALL + SELECT d.id, d.type + FROM documents d + JOIN subtree sb ON COALESCE(d.parent_id, d.archived_parent_id) = sb.id + WHERE d.workspace_id = $2 + ) + SELECT id, type FROM subtree + "#, + ) + .bind(root_id) + .bind(workspace_id) + .fetch_all(&self.pool) + .await + .map_err(unexpected_sqlx)?; + rows.into_iter() + .map(|r| { + let doc_type_str: String = r.get("type"); + let doc_type = DocumentType::try_from(doc_type_str.as_str()) + .context("invalid_document_type")?; + Ok(SubtreeDocument { + id: r.get("id"), + doc_type, + }) + }) + .collect::>>() + .map_err(DocumentRepositoryError::from) + } +} + +#[async_trait] +impl DocumentPathRepository for SqlxDocumentRepository { + async fn list_paths_for_user(&self, workspace_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#" + SELECT path + FROM documents + WHERE workspace_id = $1 + AND path IS NOT NULL + AND type <> 'folder' + "#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + Ok(rows + .into_iter() + .filter_map(|r| r.try_get::("path").ok()) + .collect()) + } + .await; + out.map_err(Into::into) + } + + async fn get_by_owner_and_path( + &self, + workspace_id: Uuid, + relative_path: &str, + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT * + FROM documents + WHERE workspace_id = $1 AND path = $2 + LIMIT 1"#, + ) + .bind(workspace_id) + .bind(relative_path) + .fetch_optional(&self.pool) + .await?; + row.map(|r| Self::map_row_to_document(&r)).transpose() + } + .await; + out.map_err(Into::into) + } + + async fn update_repo_path( + &self, + doc_id: Uuid, + workspace_id: Uuid, + relative_path: &str, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let repo_path = doc_path::repo_relative_from_storage(workspace_id, relative_path) + .ok_or_else(|| anyhow!("invalid_relative_path"))?; + let desired_path = doc_path::DesiredPath::new(repo_path.as_str().to_string()) + .context("invalid_path")?; + let slug = doc_path::slug_from_desired_path(&desired_path)?; + let parent_path = doc_path::parent_desired_path(&desired_path); + let parent_id = self + .resolve_parent_folder_id(workspace_id, parent_path.as_ref()) + .await?; + let normalized_path = Self::owner_relative_path(workspace_id, desired_path.as_str()); + let path_digest = Self::hash_path(desired_path.as_str()); + sqlx::query( + r#"UPDATE documents SET + path = $3, + desired_path = $4, + path_digest = $5, + slug = $6, + parent_id = $7, + updated_at = now() + WHERE id = $1 AND workspace_id = $2"#, + ) + .bind(doc_id) + .bind(workspace_id) + .bind(&normalized_path) + .bind(desired_path.as_str()) + .bind(&path_digest) + .bind(slug.as_str()) + .bind(parent_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/tests.rs b/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/tests.rs new file mode 100644 index 00000000..8dbf9751 --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/document_repository_sqlx/tests.rs @@ -0,0 +1,19 @@ +use domain::documents::path as doc_path; + +#[test] +fn slug_preserves_unicode_and_case() { + assert_eq!(doc_path::Slug::from_title("Main").as_str(), "Main"); + assert_eq!( + doc_path::Slug::from_title("Résumé2025").as_str(), + "Résumé2025" + ); +} + +#[test] +fn slug_sanitizes_forbidden_chars() { + assert_eq!( + doc_path::Slug::from_title(" Foo / Bar ").as_str(), + "Foo - Bar" + ); + assert_eq!(doc_path::Slug::from_title("////").as_str(), "untitled"); +} diff --git a/api/src/infrastructure/db/repositories/document_snapshot_archive_repository_sqlx.rs b/api/crates/infrastructure/src/documents/db/repositories/document_snapshot_archive_repository_sqlx/mod.rs similarity index 57% rename from api/src/infrastructure/db/repositories/document_snapshot_archive_repository_sqlx.rs rename to api/crates/infrastructure/src/documents/db/repositories/document_snapshot_archive_repository_sqlx/mod.rs index 8b3ca46f..577e6fd6 100644 --- a/api/src/infrastructure/db/repositories/document_snapshot_archive_repository_sqlx.rs +++ b/api/crates/infrastructure/src/documents/db/repositories/document_snapshot_archive_repository_sqlx/mod.rs @@ -2,10 +2,12 @@ use async_trait::async_trait; use sqlx::Row; use uuid::Uuid; -use crate::application::ports::document_snapshot_archive_repository::{ - DocumentSnapshotArchiveRepository, SnapshotArchiveInsert, SnapshotArchiveRecord, +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::documents::ports::document_snapshot_archive_repository::{ + DocumentSnapshotArchiveRepository, SnapshotArchiveEntry, SnapshotArchiveInsert, + SnapshotArchiveRecord, }; -use crate::infrastructure::db::PgPool; pub struct SqlxDocumentSnapshotArchiveRepository { pool: PgPool, @@ -19,12 +21,10 @@ impl SqlxDocumentSnapshotArchiveRepository { #[async_trait] impl DocumentSnapshotArchiveRepository for SqlxDocumentSnapshotArchiveRepository { - async fn insert( - &self, - input: SnapshotArchiveInsert<'_>, - ) -> anyhow::Result { - let inserted = sqlx::query( - r#"INSERT INTO document_snapshot_archives ( + async fn insert(&self, input: SnapshotArchiveInsert<'_>) -> PortResult { + let out: anyhow::Result = async { + let inserted = sqlx::query( + r#"INSERT INTO document_snapshot_archives ( document_id, version, snapshot, @@ -48,24 +48,24 @@ impl DocumentSnapshotArchiveRepository for SqlxDocumentSnapshotArchiveRepository created_by, byte_size, content_hash"#, - ) - .bind(input.document_id) - .bind(input.version as i32) - .bind(input.snapshot) - .bind(input.label) - .bind(input.notes) - .bind(input.kind) - .bind(input.created_by) - .bind(input.byte_size) - .bind(input.content_hash) - .fetch_optional(&self.pool) - .await?; + ) + .bind(input.document_id) + .bind(input.version as i32) + .bind(input.snapshot) + .bind(input.label) + .bind(input.notes) + .bind(input.kind) + .bind(input.created_by) + .bind(input.byte_size) + .bind(input.content_hash) + .fetch_optional(&self.pool) + .await?; - let row = match inserted { - Some(row) => row, - None => { - sqlx::query( - r#"SELECT + let row = match inserted { + Some(row) => row, + None => { + sqlx::query( + r#"SELECT id, document_id, version, @@ -78,34 +78,35 @@ impl DocumentSnapshotArchiveRepository for SqlxDocumentSnapshotArchiveRepository content_hash FROM document_snapshot_archives WHERE document_id = $1 AND version = $2"#, - ) - .bind(input.document_id) - .bind(input.version as i32) - .fetch_one(&self.pool) - .await? - } - }; + ) + .bind(input.document_id) + .bind(input.version as i32) + .fetch_one(&self.pool) + .await? + } + }; - Ok(SnapshotArchiveRecord { - id: row.get("id"), - document_id: row.get("document_id"), - version: row.get::("version") as i64, - label: row.get("label"), - notes: row.try_get("notes").ok(), - kind: row.get("kind"), - created_at: row.get("created_at"), - created_by: row.try_get("created_by").ok(), - byte_size: row.get("byte_size"), - content_hash: row.get("content_hash"), - }) + Ok(SnapshotArchiveRecord { + id: row.get("id"), + document_id: row.get("document_id"), + version: row.get::("version") as i64, + label: row.get("label"), + notes: row.try_get("notes").ok(), + kind: row.get("kind"), + created_at: row.get("created_at"), + created_by: row.try_get("created_by").ok(), + byte_size: row.get("byte_size"), + content_hash: row.get("content_hash"), + }) + } + .await; + out.map_err(Into::into) } - async fn get_by_id( - &self, - id: Uuid, - ) -> anyhow::Result)>> { - let row = sqlx::query( - r#"SELECT + async fn get_by_id(&self, id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT id, document_id, version, @@ -119,15 +120,13 @@ impl DocumentSnapshotArchiveRepository for SqlxDocumentSnapshotArchiveRepository content_hash FROM document_snapshot_archives WHERE id = $1"#, - ) - .bind(id) - .fetch_optional(&self.pool) - .await?; + ) + .bind(id) + .fetch_optional(&self.pool) + .await?; - Ok(row.map(|row| { - let snapshot: Vec = row.get("snapshot"); - ( - SnapshotArchiveRecord { + Ok(row.map(|row| SnapshotArchiveEntry { + record: SnapshotArchiveRecord { id: row.get("id"), document_id: row.get("document_id"), version: row.get::("version") as i64, @@ -139,9 +138,11 @@ impl DocumentSnapshotArchiveRepository for SqlxDocumentSnapshotArchiveRepository byte_size: row.get("byte_size"), content_hash: row.get("content_hash"), }, - snapshot, - ) - })) + bytes: row.get("snapshot"), + })) + } + .await; + out.map_err(Into::into) } async fn list_for_document( @@ -149,9 +150,10 @@ impl DocumentSnapshotArchiveRepository for SqlxDocumentSnapshotArchiveRepository doc_id: Uuid, limit: i64, offset: i64, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT + ) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT id, document_id, version, @@ -166,37 +168,41 @@ impl DocumentSnapshotArchiveRepository for SqlxDocumentSnapshotArchiveRepository WHERE document_id = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3"#, - ) - .bind(doc_id) - .bind(limit.max(1)) - .bind(offset.max(0)) - .fetch_all(&self.pool) - .await?; + ) + .bind(doc_id) + .bind(limit.max(1)) + .bind(offset.max(0)) + .fetch_all(&self.pool) + .await?; - Ok(rows - .into_iter() - .map(|row| SnapshotArchiveRecord { - id: row.get("id"), - document_id: row.get("document_id"), - version: row.get::("version") as i64, - label: row.get("label"), - notes: row.try_get("notes").ok(), - kind: row.get("kind"), - created_at: row.get("created_at"), - created_by: row.try_get("created_by").ok(), - byte_size: row.get("byte_size"), - content_hash: row.get("content_hash"), - }) - .collect()) + Ok(rows + .into_iter() + .map(|row| SnapshotArchiveRecord { + id: row.get("id"), + document_id: row.get("document_id"), + version: row.get::("version") as i64, + label: row.get("label"), + notes: row.try_get("notes").ok(), + kind: row.get("kind"), + created_at: row.get("created_at"), + created_by: row.try_get("created_by").ok(), + byte_size: row.get("byte_size"), + content_hash: row.get("content_hash"), + }) + .collect()) + } + .await; + out.map_err(Into::into) } async fn latest_before( &self, doc_id: Uuid, version: i64, - ) -> anyhow::Result)>> { - let row = sqlx::query( - r#"SELECT + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT id, document_id, version, @@ -212,16 +218,14 @@ impl DocumentSnapshotArchiveRepository for SqlxDocumentSnapshotArchiveRepository WHERE document_id = $1 AND version < $2 ORDER BY version DESC LIMIT 1"#, - ) - .bind(doc_id) - .bind(version as i32) - .fetch_optional(&self.pool) - .await?; + ) + .bind(doc_id) + .bind(version as i32) + .fetch_optional(&self.pool) + .await?; - Ok(row.map(|row| { - let snapshot: Vec = row.get("snapshot"); - ( - SnapshotArchiveRecord { + Ok(row.map(|row| SnapshotArchiveEntry { + record: SnapshotArchiveRecord { id: row.get("id"), document_id: row.get("document_id"), version: row.get::("version") as i64, @@ -233,8 +237,10 @@ impl DocumentSnapshotArchiveRepository for SqlxDocumentSnapshotArchiveRepository byte_size: row.get("byte_size"), content_hash: row.get("content_hash"), }, - snapshot, - ) - })) + bytes: row.get("snapshot"), + })) + } + .await; + out.map_err(Into::into) } } diff --git a/api/crates/infrastructure/src/documents/db/repositories/files_repository_sqlx/mod.rs b/api/crates/infrastructure/src/documents/db/repositories/files_repository_sqlx/mod.rs new file mode 100644 index 00000000..ece2503c --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/files_repository_sqlx/mod.rs @@ -0,0 +1,265 @@ +use async_trait::async_trait; +use sqlx::{Postgres, Row, Transaction}; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::documents::ports::files::files_repository::{ + FileMeta, FilePathMeta, FileRecord, FilesRepository, StoredFileScope, +}; + +pub struct SqlxFilesRepository { + pub pool: PgPool, +} + +impl SqlxFilesRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } + + pub(crate) async fn list_storage_paths_for_document_tx( + &self, + tx: &mut Transaction<'_, Postgres>, + doc_id: Uuid, + ) -> anyhow::Result> { + let rows = sqlx::query("SELECT storage_path FROM files WHERE document_id = $1 FOR UPDATE") + .bind(doc_id) + .fetch_all(tx.as_mut()) + .await?; + Ok(rows + .into_iter() + .filter_map(|r| r.try_get::("storage_path").ok()) + .collect()) + } +} + +#[async_trait] +impl FilesRepository for SqlxFilesRepository { + async fn is_workspace_document(&self, doc_id: Uuid, workspace_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let n = sqlx::query_scalar::<_, i64>( + "SELECT COUNT(1) FROM documents WHERE id = $1 AND workspace_id = $2", + ) + .bind(doc_id) + .bind(workspace_id) + .fetch_one(&self.pool) + .await?; + Ok(n > 0) + } + .await; + out.map_err(Into::into) + } + + async fn insert_file( + &self, + doc_id: Uuid, + filename: &str, + content_type: Option<&str>, + size: i64, + storage_path: &str, + content_hash: &str, + ) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query( + r#"INSERT INTO files (document_id, filename, content_type, size, storage_path, content_hash) + VALUES ($1, $2, $3, $4, $5, $6) + RETURNING id"#, + ) + .bind(doc_id) + .bind(filename) + .bind(content_type) + .bind(size) + .bind(storage_path) + .bind(content_hash) + .fetch_one(&self.pool) + .await?; + Ok(row.get("id")) + } + .await; + out.map_err(Into::into) + } + + async fn get_file_meta(&self, file_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT f.storage_path, f.content_type, f.document_id, d.workspace_id + FROM files f JOIN documents d ON f.document_id = d.id + WHERE f.id = $1"#, + ) + .bind(file_id) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| FileMeta { + storage_path: r.get("storage_path"), + content_type: r.try_get("content_type").ok(), + document_id: r.get("document_id"), + workspace_id: r.get("workspace_id"), + })) + } + .await; + out.map_err(Into::into) + } + + async fn get_file_path_by_doc_and_name( + &self, + doc_id: Uuid, + filename: &str, + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT storage_path, content_type FROM files WHERE document_id = $1 AND filename = $2"#, + ) + .bind(doc_id) + .bind(filename) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| FilePathMeta { + storage_path: r.get("storage_path"), + content_type: r.try_get("content_type").ok(), + })) + } + .await; + out.map_err(Into::into) + } + + async fn list_storage_paths_for_document(&self, doc_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query("SELECT storage_path FROM files WHERE document_id = $1") + .bind(doc_id) + .fetch_all(&self.pool) + .await?; + Ok(rows + .into_iter() + .filter_map(|r| r.try_get::("storage_path").ok()) + .collect()) + } + .await; + out.map_err(Into::into) + } + + async fn list_files_for_document(&self, doc_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT id, filename, content_type, size, storage_path, content_hash + FROM files + WHERE document_id = $1"#, + ) + .bind(doc_id) + .fetch_all(&self.pool) + .await?; + Ok(rows + .into_iter() + .map(|r| FileRecord { + id: r.get("id"), + filename: r.get("filename"), + content_type: r.try_get("content_type").ok(), + size: r.get("size"), + storage_path: r.get("storage_path"), + content_hash: r.get("content_hash"), + }) + .collect()) + } + .await; + out.map_err(Into::into) + } + + async fn list_storage_paths_for_workspace( + &self, + workspace_id: Uuid, + ) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#" + SELECT f.storage_path + FROM files f + JOIN documents d ON d.id = f.document_id + WHERE d.workspace_id = $1 + "#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + Ok(rows + .into_iter() + .filter_map(|r| r.try_get::("storage_path").ok()) + .collect()) + } + .await; + out.map_err(Into::into) + } + + async fn find_by_storage_path( + &self, + storage_path: &str, + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT f.id as file_id, f.document_id, d.workspace_id + FROM files f + JOIN documents d ON d.id = f.document_id + WHERE f.storage_path = $1 + LIMIT 1"#, + ) + .bind(storage_path) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| StoredFileScope { + file_id: r.get("file_id"), + document_id: r.get("document_id"), + workspace_id: r.get("workspace_id"), + })) + } + .await; + out.map_err(Into::into) + } + + async fn update_storage_path(&self, file_id: Uuid, storage_path: &str) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#"UPDATE files SET storage_path = $2, updated_at = now() + WHERE id = $1"#, + ) + .bind(file_id) + .bind(storage_path) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn update_hash_and_size( + &self, + file_id: Uuid, + size: i64, + content_hash: &str, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#"UPDATE files SET size = $2, content_hash = $3, updated_at = now() + WHERE id = $1"#, + ) + .bind(file_id) + .bind(size) + .bind(content_hash) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn delete_by_id(&self, file_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query("DELETE FROM files WHERE id = $1") + .bind(file_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/documents/db/repositories/linkgraph_repository_sqlx/mod.rs b/api/crates/infrastructure/src/documents/db/repositories/linkgraph_repository_sqlx/mod.rs new file mode 100644 index 00000000..f4d0e687 --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/linkgraph_repository_sqlx/mod.rs @@ -0,0 +1,192 @@ +use anyhow::Context; +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::documents::ports::linkgraph_repository::LinkGraphRepository; +use domain::documents::doc_type::DocumentType; +use domain::documents::document::{BacklinkInfo, OutgoingLink}; +use domain::documents::title::Title; + +pub struct SqlxLinkGraphRepository { + pub pool: PgPool, +} + +impl SqlxLinkGraphRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl LinkGraphRepository for SqlxLinkGraphRepository { + async fn clear_links_for_source(&self, source_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query("DELETE FROM document_links WHERE source_document_id = $1") + .bind(source_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn exists_doc_for_owner(&self, doc_id: Uuid, owner_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let n = sqlx::query_scalar::<_, i64>( + "SELECT COUNT(1) FROM documents WHERE id = $1 AND owner_id = $2", + ) + .bind(doc_id) + .bind(owner_id) + .fetch_one(&self.pool) + .await?; + Ok(n > 0) + } + .await; + out.map_err(Into::into) + } + + async fn find_doc_id_by_owner_and_title( + &self, + owner_id: Uuid, + title: &str, + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT id FROM documents + WHERE owner_id = $1 AND LOWER(title) = LOWER($2) + ORDER BY updated_at DESC LIMIT 1"#, + ) + .bind(owner_id) + .bind(title) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| r.get::("id"))) + } + .await; + out.map_err(Into::into) + } + + async fn upsert_link( + &self, + source_id: Uuid, + target_id: Uuid, + link_type: &str, + link_text: Option, + position_start: i32, + position_end: i32, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#"INSERT INTO document_links ( + source_document_id, target_document_id, link_type, + link_text, position_start, position_end, created_at, updated_at + ) VALUES ($1, $2, $3, $4, $5, $6, now(), now()) + ON CONFLICT (source_document_id, target_document_id, position_start) + DO UPDATE SET link_type = EXCLUDED.link_type, + link_text = EXCLUDED.link_text, + position_end = EXCLUDED.position_end, + updated_at = now() + "#, + ) + .bind(source_id) + .bind(target_id) + .bind(link_type) + .bind(link_text) + .bind(position_start) + .bind(position_end) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn backlinks_for( + &self, + workspace_id: Uuid, + target_id: Uuid, + ) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT d.id as document_id, d.title, d.type as document_type, d.path as file_path, + dl.link_type, dl.link_text, COUNT(*)::BIGINT as link_count + FROM document_links dl + JOIN documents d ON d.id = dl.source_document_id + WHERE dl.target_document_id = $1 AND d.workspace_id = $2 + GROUP BY d.id, d.title, d.type, d.path, dl.link_type, dl.link_text + ORDER BY link_count DESC, d.title"#, + ) + .bind(target_id) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + + rows.into_iter() + .map(|r| { + let doc_type_str: String = r.get("document_type"); + let document_type = DocumentType::try_from(doc_type_str.as_str()) + .context("invalid_document_type")?; + let title: String = r.get("title"); + Ok(BacklinkInfo { + document_id: r.get("document_id"), + title: Title::new(title), + document_type, + file_path: r.try_get("file_path").ok(), + link_type: r.get("link_type"), + link_text: r.try_get("link_text").ok(), + link_count: r.try_get("link_count").unwrap_or(1_i64), + }) + }) + .collect() + } + .await; + out.map_err(Into::into) + } + + async fn outgoing_links_for( + &self, + workspace_id: Uuid, + source_id: Uuid, + ) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT d.id as document_id, d.title, d.type as document_type, d.path as file_path, + dl.link_type, dl.link_text, dl.position_start, dl.position_end + FROM document_links dl + JOIN documents d ON d.id = dl.target_document_id + WHERE dl.source_document_id = $1 AND d.workspace_id = $2 + ORDER BY dl.position_start"#, + ) + .bind(source_id) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + + rows.into_iter() + .map(|r| { + let doc_type_str: String = r.get("document_type"); + let document_type = DocumentType::try_from(doc_type_str.as_str()) + .context("invalid_document_type")?; + let title: String = r.get("title"); + Ok(OutgoingLink { + document_id: r.get("document_id"), + title: Title::new(title), + document_type, + file_path: r.try_get("file_path").ok(), + link_type: r.get("link_type"), + link_text: r.try_get("link_text").ok(), + position_start: r.try_get("position_start").ok(), + position_end: r.try_get("position_end").ok(), + }) + }) + .collect() + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/documents/db/repositories/mod.rs b/api/crates/infrastructure/src/documents/db/repositories/mod.rs new file mode 100644 index 00000000..ee581066 --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/mod.rs @@ -0,0 +1,9 @@ +pub mod access_repository_sqlx; +pub mod document_repository_sqlx; +pub mod document_snapshot_archive_repository_sqlx; +pub mod files_repository_sqlx; +pub mod linkgraph_repository_sqlx; +pub mod public_repository_sqlx; +pub mod shares_repository_sqlx; +pub mod tag_repository_sqlx; +pub mod tagging_repository_sqlx; diff --git a/api/crates/infrastructure/src/documents/db/repositories/public_repository_sqlx/mod.rs b/api/crates/infrastructure/src/documents/db/repositories/public_repository_sqlx/mod.rs new file mode 100644 index 00000000..002ce43c --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/public_repository_sqlx/mod.rs @@ -0,0 +1,253 @@ +use anyhow::Context; +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::documents::ports::publishing::public_repository::{ + PublicDocumentSummaryRow, PublicRepository, PublishStatusRow, WorkspaceTitleAndSlug, +}; +use domain::documents::doc_type::DocumentType; +use domain::documents::document::Document; +use domain::documents::path as doc_path; +use domain::documents::title::Title; + +pub struct SqlxPublicRepository { + pub pool: PgPool, +} + +impl SqlxPublicRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl PublicRepository for SqlxPublicRepository { + async fn ensure_workspace_title_and_slug( + &self, + doc_id: Uuid, + workspace_id: Uuid, + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + "SELECT d.title, w.slug as workspace_slug FROM documents d JOIN workspaces w ON d.workspace_id = w.id WHERE d.id = $1 AND d.workspace_id = $2", + ) + .bind(doc_id) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| WorkspaceTitleAndSlug { + title: r.get("title"), + workspace_slug: r.get("workspace_slug"), + })) + } + .await; + out.map_err(Into::into) + } + + async fn upsert_public_document(&self, doc_id: Uuid, slug: &str) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let _ = sqlx::query("INSERT INTO public_documents (document_id, slug, published_at) VALUES ($1, $2, now()) ON CONFLICT (document_id) DO UPDATE SET slug = EXCLUDED.slug, published_at = now()") + .bind(doc_id) + .bind(slug) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn slug_exists(&self, slug: &str) -> PortResult { + let out: anyhow::Result = async { + let n = sqlx::query_scalar::<_, i64>( + "SELECT COUNT(1) FROM public_documents WHERE slug = $1", + ) + .bind(slug) + .fetch_one(&self.pool) + .await?; + Ok(n > 0) + } + .await; + out.map_err(Into::into) + } + + async fn is_workspace_document(&self, doc_id: Uuid, workspace_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let n = sqlx::query_scalar::<_, i64>( + "SELECT COUNT(1) FROM documents WHERE id = $1 AND workspace_id = $2", + ) + .bind(doc_id) + .bind(workspace_id) + .fetch_one(&self.pool) + .await?; + Ok(n > 0) + } + .await; + out.map_err(Into::into) + } + + async fn delete_public_document(&self, doc_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let res = sqlx::query("DELETE FROM public_documents WHERE document_id = $1") + .bind(doc_id) + .execute(&self.pool) + .await?; + Ok(res.rows_affected() > 0) + } + .await; + out.map_err(Into::into) + } + + async fn get_publish_status( + &self, + workspace_id: Uuid, + doc_id: Uuid, + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT p.slug, w.slug as workspace_slug + FROM public_documents p + JOIN documents d ON p.document_id = d.id + JOIN workspaces w ON d.workspace_id = w.id + WHERE p.document_id = $1 AND d.workspace_id = $2"#, + ) + .bind(doc_id) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| PublishStatusRow { + slug: r.get("slug"), + workspace_slug: r.get("workspace_slug"), + })) + } + .await; + out.map_err(Into::into) + } + + async fn list_workspace_public_documents( + &self, + workspace_slug: &str, + ) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT d.id, d.title, d.updated_at, p.published_at + FROM public_documents p + JOIN documents d ON p.document_id = d.id + JOIN workspaces w ON d.workspace_id = w.id + WHERE w.slug = $1 + OR (w.is_personal AND EXISTS ( + SELECT 1 + FROM users u + WHERE u.id = w.id AND lower(u.name) = lower($1) + )) + ORDER BY d.updated_at DESC LIMIT 200"#, + ) + .bind(workspace_slug) + .fetch_all(&self.pool) + .await?; + Ok(rows + .into_iter() + .map(|r| PublicDocumentSummaryRow { + id: r.get("id"), + title: r.get("title"), + updated_at: r.get("updated_at"), + published_at: r.get("published_at"), + }) + .collect()) + } + .await; + out.map_err(Into::into) + } + + async fn get_public_meta_by_workspace_and_id( + &self, + workspace_slug: &str, + doc_id: Uuid, + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT d.id, d.owner_id, d.owner_user_id, d.workspace_id, d.title, d.parent_id, d.type, d.created_at, d.updated_at, + d.slug, d.desired_path, d.path, d.created_by, d.created_by_plugin, + d.archived_at, d.archived_by, d.archived_parent_id + FROM public_documents p + JOIN documents d ON p.document_id = d.id + JOIN workspaces w ON d.workspace_id = w.id + WHERE (w.slug = $1 + OR (w.is_personal AND EXISTS ( + SELECT 1 + FROM users u + WHERE u.id = w.id AND lower(u.name) = lower($1) + ))) + AND d.id = $2"#, + ) + .bind(workspace_slug) + .bind(doc_id) + .fetch_optional(&self.pool) + .await?; + row.map(|r| { + let doc_type_str: String = r.get("type"); + let doc_type = DocumentType::try_from(doc_type_str.as_str()) + .context("invalid_document_type")?; + let title: String = r.get("title"); + let slug_str: String = r.get("slug"); + let slug = doc_path::Slug::new(slug_str).context("invalid_slug")?; + let desired_path_str: String = r.get("desired_path"); + let desired_path = doc_path::DesiredPath::new(desired_path_str) + .context("invalid_desired_path")?; + Ok(Document::rehydrate( + r.get("id"), + r.try_get("owner_user_id").ok(), + r.get("workspace_id"), + Title::new(title), + r.try_get("parent_id").ok(), + doc_type, + r.get("created_at"), + r.get("updated_at"), + r.try_get("created_by_plugin").ok(), + slug, + desired_path, + r.try_get("path").ok(), + r.try_get("created_by").ok(), + r.try_get("archived_at").ok(), + r.try_get("archived_by").ok(), + r.try_get("archived_parent_id").ok(), + )) + }) + .transpose() + } + .await; + out.map_err(Into::into) + } + + async fn public_exists_by_workspace_and_id( + &self, + workspace_slug: &str, + doc_id: Uuid, + ) -> PortResult { + let out: anyhow::Result = async { + let n = sqlx::query_scalar::<_, i64>( + r#"SELECT COUNT(1) + FROM public_documents p + JOIN documents d ON p.document_id = d.id + JOIN workspaces w ON d.workspace_id = w.id + WHERE (w.slug = $1 + OR (w.is_personal AND EXISTS ( + SELECT 1 + FROM users u + WHERE u.id = w.id AND lower(u.name) = lower($1) + ))) + AND d.id = $2"#, + ) + .bind(workspace_slug) + .bind(doc_id) + .fetch_one(&self.pool) + .await?; + Ok(n > 0) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/documents/db/repositories/shares_repository_sqlx/mod.rs b/api/crates/infrastructure/src/documents/db/repositories/shares_repository_sqlx/mod.rs new file mode 100644 index 00000000..ecaa2b6d --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/shares_repository_sqlx/mod.rs @@ -0,0 +1,627 @@ +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::documents::ports::sharing::share_access_port::ShareAccessPort; +use application::documents::ports::sharing::shares_repository::{ + ApplicableShareRow, CreatedShare, ShareDocumentMeta, ShareMountRow, ShareRow, ShareSubtreeNode, + ShareTokenValidation, SharesRepository, +}; +use domain::documents::doc_type::DocumentType; +use domain::documents::share; +use domain::documents::title::Title; + +pub struct SqlxSharesRepository { + pub pool: PgPool, +} + +impl SqlxSharesRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } + + async fn fetch_share_resolution( + &self, + token: &str, + ) -> anyhow::Result> { + let row = sqlx::query( + r#"SELECT s.id as share_id, s.permission, s.expires_at, d.id as shared_id, d.type as shared_type, d.workspace_id + FROM shares s + JOIN documents d ON s.document_id = d.id + WHERE s.token = $1"#, + ) + .bind(token) + .fetch_optional(&self.pool) + .await?; + Ok(row.and_then(|r| { + let permission_raw: String = r.get("permission"); + let permission = share::SharePermission::parse(&permission_raw)?; + let shared_type_raw: String = r.get("shared_type"); + let shared_type = DocumentType::parse(&shared_type_raw)?; + Some(share::ShareContext { + share_id: r.get("share_id"), + permission, + expires_at: r.try_get("expires_at").ok(), + shared_id: r.get("shared_id"), + shared_type, + workspace_id: r.get("workspace_id"), + }) + })) + } + + fn parse_share_permission(raw: &str) -> anyhow::Result { + share::SharePermission::parse(raw) + .ok_or_else(|| anyhow::anyhow!("invalid_share_permission")) + } + + fn parse_document_type(raw: &str) -> anyhow::Result { + DocumentType::parse(raw).ok_or_else(|| anyhow::anyhow!("invalid_document_type")) + } +} + +#[async_trait] +impl SharesRepository for SqlxSharesRepository { + async fn create_share( + &self, + workspace_id: Uuid, + actor_id: Uuid, + document_id: Uuid, + permission: share::SharePermission, + expires_at: Option>, + ) -> PortResult { + let out: anyhow::Result = async { + // Verify ownership and type + let dtype_raw: String = sqlx::query_scalar( + "SELECT type FROM documents WHERE id = $1 AND workspace_id = $2", + ) + .bind(document_id) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await? + .ok_or_else(|| anyhow::anyhow!("forbidden"))?; + let dtype = Self::parse_document_type(&dtype_raw)?; + let token = Uuid::new_v4().to_string(); + let row = sqlx::query("INSERT INTO shares (document_id, token, permission, created_by, expires_at) VALUES ($1, $2, $3, $4, $5) RETURNING id, token") + .bind(document_id) + .bind(&token) + .bind(permission.as_str()) + .bind(actor_id) + .bind(expires_at) + .fetch_one(&self.pool) + .await?; + let token_saved: String = row.get("token"); + let share_id: Uuid = row.get("id"); + if dtype.is_folder() { + // Materialize per-document shares for folder subtree + let _created: i64 = sqlx::query_scalar( + r#" + WITH RECURSIVE subtree AS ( + SELECT id, type FROM documents WHERE id = $1 + UNION ALL + SELECT d.id, d.type FROM documents d JOIN subtree sb ON d.parent_id = sb.id + ), + targets AS ( + SELECT id FROM subtree WHERE type <> 'folder' + ), + inserted AS ( + INSERT INTO shares (document_id, token, permission, created_by, expires_at, parent_share_id) + SELECT t.id, gen_random_uuid()::text, $2, $3, $4, $5 + FROM targets t + WHERE NOT EXISTS ( + SELECT 1 + FROM shares s2 + WHERE s2.document_id = t.id + AND s2.parent_share_id = $5 + ) + RETURNING 1 + ) + SELECT COALESCE(COUNT(*),0) FROM inserted + "# + ) + .bind(document_id) + .bind(permission.as_str()) + .bind(actor_id) + .bind(expires_at) + .bind(share_id) + .fetch_one(&self.pool) + .await?; + } + Ok(CreatedShare { + token: token_saved, + share_id, + document_type: dtype, + }) + } + .await; + out.map_err(Into::into) + } + + async fn list_document_shares( + &self, + workspace_id: Uuid, + document_id: Uuid, + ) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT s.id, s.token, s.permission, s.expires_at, s.parent_share_id, s.created_at, + d.id as document_id, d.title as document_title, d.type as document_type + FROM shares s JOIN documents d ON d.id = s.document_id + WHERE s.document_id = $1 AND d.workspace_id = $2 + ORDER BY s.created_at DESC"#, + ) + .bind(document_id) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + let mut out = Vec::with_capacity(rows.len()); + for r in rows.into_iter() { + let permission_raw: String = r.get("permission"); + let document_type_raw: String = r.get("document_type"); + out.push(ShareRow { + id: r.get("id"), + token: r.get("token"), + permission: Self::parse_share_permission(&permission_raw)?, + expires_at: r.try_get("expires_at").ok(), + parent_share_id: r.try_get("parent_share_id").ok(), + document_id: r.get("document_id"), + document_type: Self::parse_document_type(&document_type_raw)?, + document_title: Title::new(r.get::("document_title")), + created_at: r.get("created_at"), + }); + } + Ok(out) + } + .await; + out.map_err(Into::into) + } + + async fn delete_share(&self, workspace_id: Uuid, token: &str) -> PortResult { + let out: anyhow::Result = async { + let res = sqlx::query( + "DELETE FROM shares s USING documents d WHERE s.token = $1 AND s.document_id = d.id AND d.workspace_id = $2", + ) + .bind(token) + .bind(workspace_id) + .execute(&self.pool) + .await?; + let deleted = res.rows_affected() > 0; + if deleted { + // Remove any saved mounts referencing this share token across workspaces + sqlx::query("DELETE FROM share_mounts WHERE share_token = $1") + .bind(token) + .execute(&self.pool) + .await?; + } + Ok(deleted) + } + .await; + out.map_err(Into::into) + } + + async fn list_share_mounts(&self, workspace_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + // Clean up mounts whose share token no longer exists or has expired + sqlx::query( + r#" + DELETE FROM share_mounts sm + WHERE sm.workspace_id = $1 + AND NOT EXISTS ( + SELECT 1 + FROM shares s + WHERE s.token = sm.share_token + AND (s.expires_at IS NULL OR s.expires_at > now()) + ) + "#, + ) + .bind(workspace_id) + .execute(&self.pool) + .await?; + + let rows = sqlx::query( + r#"SELECT id, share_token, target_document_id, target_document_type, target_title, permission, parent_folder_id, created_at + FROM share_mounts + WHERE workspace_id = $1 + ORDER BY created_at DESC"#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + let mut out = Vec::with_capacity(rows.len()); + for r in rows.into_iter() { + let permission_raw: String = r.get("permission"); + let target_document_type_raw: String = r.get("target_document_type"); + out.push(ShareMountRow { + id: r.get("id"), + token: r.get("share_token"), + target_document_id: r.get("target_document_id"), + target_document_type: Self::parse_document_type(&target_document_type_raw)?, + target_title: Title::new(r.get::("target_title")), + permission: Self::parse_share_permission(&permission_raw)?, + parent_folder_id: r.try_get("parent_folder_id").ok(), + created_at: r.get("created_at"), + }); + } + Ok(out) + } + .await; + out.map_err(Into::into) + } + + async fn create_share_mount( + &self, + workspace_id: Uuid, + actor_id: Uuid, + token: &str, + target_document_id: Uuid, + target_document_type: DocumentType, + target_title: Title, + permission: share::SharePermission, + parent_folder_id: Option, + ) -> PortResult { + let out: anyhow::Result = async { + if let Some(parent_id) = parent_folder_id { + let exists = sqlx::query_scalar::<_, i64>( + "SELECT 1 FROM documents WHERE id = $1 AND workspace_id = $2 AND type = 'folder'", + ) + .bind(parent_id) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + if exists.is_none() { + anyhow::bail!("invalid_parent"); + } + } + let row = sqlx::query( + r#" + INSERT INTO share_mounts (workspace_id, created_by, share_token, target_document_id, target_document_type, target_title, permission, parent_folder_id) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + ON CONFLICT (workspace_id, share_token, target_document_id) + DO UPDATE SET target_title = EXCLUDED.target_title, + permission = EXCLUDED.permission, + parent_folder_id = EXCLUDED.parent_folder_id + RETURNING id, share_token, target_document_id, target_document_type, target_title, permission, parent_folder_id, created_at + "#, + ) + .bind(workspace_id) + .bind(actor_id) + .bind(token) + .bind(target_document_id) + .bind(target_document_type.as_str()) + .bind(target_title.as_str()) + .bind(permission.as_str()) + .bind(parent_folder_id) + .fetch_one(&self.pool) + .await?; + + let target_document_type_raw: String = row.get("target_document_type"); + let permission_raw: String = row.get("permission"); + Ok(ShareMountRow { + id: row.get("id"), + token: row.get("share_token"), + target_document_id: row.get("target_document_id"), + target_document_type: Self::parse_document_type(&target_document_type_raw)?, + target_title: Title::new(row.get::("target_title")), + permission: Self::parse_share_permission(&permission_raw)?, + parent_folder_id: row.try_get("parent_folder_id").ok(), + created_at: row.get("created_at"), + }) + } + .await; + out.map_err(Into::into) + } + + async fn delete_share_mount(&self, workspace_id: Uuid, mount_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let res = sqlx::query("DELETE FROM share_mounts WHERE id = $1 AND workspace_id = $2") + .bind(mount_id) + .bind(workspace_id) + .execute(&self.pool) + .await?; + Ok(res.rows_affected() > 0) + } + .await; + out.map_err(Into::into) + } + + async fn validate_share_token(&self, token: &str) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT s.document_id, s.permission, s.expires_at, d.title + FROM shares s JOIN documents d ON d.id = s.document_id + WHERE s.token = $1"#, + ) + .bind(token) + .fetch_optional(&self.pool) + .await?; + match row { + None => Ok(None), + Some(r) => { + let permission_raw: String = r.get("permission"); + Ok(Some(ShareTokenValidation { + document_id: r.get("document_id"), + permission: Self::parse_share_permission(&permission_raw)?, + expires_at: r.try_get("expires_at").ok(), + title: Title::new(r.get::("title")), + })) + } + } + } + .await; + out.map_err(Into::into) + } + + async fn list_applicable_shares_for_doc( + &self, + workspace_id: Uuid, + doc_id: Uuid, + ) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT s.token, s.permission, s.expires_at + FROM shares s + JOIN documents d ON d.id = s.document_id + WHERE s.document_id = $1 AND d.workspace_id = $2"#, + ) + .bind(doc_id) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + let mut out = Vec::with_capacity(rows.len()); + for r in rows.into_iter() { + let permission_raw: String = r.get("permission"); + out.push(ApplicableShareRow { + token: r.get("token"), + permission: Self::parse_share_permission(&permission_raw)?, + expires_at: r.try_get("expires_at").ok(), + }); + } + Ok(out) + } + .await; + out.map_err(Into::into) + } + + async fn list_active_shares(&self, workspace_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT s.id, s.token, s.permission, s.expires_at, s.created_at, s.parent_share_id, + d.id as document_id, d.title as document_title, d.type as document_type + FROM shares s + JOIN documents d ON d.id = s.document_id + WHERE d.workspace_id = $1 AND (s.expires_at IS NULL OR s.expires_at > now()) + ORDER BY s.created_at DESC"#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + let mut out = Vec::with_capacity(rows.len()); + for r in rows.into_iter() { + let permission_raw: String = r.get("permission"); + let document_type_raw: String = r.get("document_type"); + out.push(ShareRow { + id: r.get("id"), + token: r.get("token"), + permission: Self::parse_share_permission(&permission_raw)?, + expires_at: r.try_get("expires_at").ok(), + parent_share_id: r.try_get("parent_share_id").ok(), + document_id: r.get("document_id"), + document_type: Self::parse_document_type(&document_type_raw)?, + document_title: Title::new(r.get::("document_title")), + created_at: r.get("created_at"), + }); + } + Ok(out) + } + .await; + out.map_err(Into::into) + } + + async fn resolve_share_by_token(&self, token: &str) -> PortResult> { + self.fetch_share_resolution(token).await.map_err(Into::into) + } + + async fn get_share_document_meta(&self, token: &str) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + "SELECT d.id as document_id, d.owner_id, d.workspace_id FROM shares s JOIN documents d ON d.id = s.document_id WHERE s.token = $1", + ) + .bind(token) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| ShareDocumentMeta { + document_id: r.get("document_id"), + owner_id: r.get("owner_id"), + workspace_id: r.get("workspace_id"), + })) + } + .await; + out.map_err(Into::into) + } + + async fn list_subtree_nodes(&self, root_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#" + WITH RECURSIVE subtree AS ( + SELECT id, title, type, parent_id, created_at, updated_at FROM documents WHERE id = $1 + UNION ALL + SELECT d.id, d.title, d.type, d.parent_id, d.created_at, d.updated_at + FROM documents d JOIN subtree s ON d.parent_id = s.id + ) + SELECT id, title, type, parent_id, created_at, updated_at FROM subtree + "# + ) + .bind(root_id) + .fetch_all(&self.pool) + .await?; + rows + .into_iter() + .map(|r| { + let document_type_raw: String = r.get("type"); + Ok(ShareSubtreeNode { + id: r.get("id"), + title: Title::new(r.get::("title")), + document_type: Self::parse_document_type(&document_type_raw)?, + parent_id: r.try_get("parent_id").ok(), + created_at: r.get("created_at"), + updated_at: r.get("updated_at"), + }) + }) + .collect::>>() + } + .await; + out.map_err(Into::into) + } + + async fn list_materialized_children(&self, parent_share_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let ids = sqlx::query_scalar("SELECT document_id FROM shares WHERE parent_share_id = $1 AND (expires_at IS NULL OR expires_at > now())") + .bind(parent_share_id) + .fetch_all(&self.pool) + .await?; + Ok(ids) + } + .await; + out.map_err(Into::into) + } + + async fn materialize_folder_share( + &self, + workspace_id: Uuid, + actor_id: Uuid, + token: &str, + ) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query( + r#"SELECT s.id as share_id, s.permission, s.expires_at, d.id as folder_id, d.workspace_id, d.type + FROM shares s JOIN documents d ON d.id = s.document_id + WHERE s.token = $1"# + ) + .bind(token) + .fetch_optional(&self.pool) + .await?; + let row = match row { + Some(r) => r, + None => anyhow::bail!("not_found"), + }; + let workspace: Uuid = row.get("workspace_id"); + if workspace != workspace_id { + anyhow::bail!("forbidden"); + } + let dtype_raw: String = row.get("type"); + let dtype = Self::parse_document_type(&dtype_raw)?; + if !dtype.is_folder() { + anyhow::bail!("bad_request"); + } + let folder_id: Uuid = row.get("folder_id"); + let share_id: Uuid = row.get("share_id"); + let permission_raw: String = row.get("permission"); + let permission = Self::parse_share_permission(&permission_raw)?; + let expires_at: Option> = row.try_get("expires_at").ok(); + + if share::is_expired(expires_at.as_ref(), chrono::Utc::now()) { + anyhow::bail!("not_found"); + } + + let created = sqlx::query_scalar::<_, i64>( + r#" + WITH RECURSIVE subtree AS ( + SELECT id, type FROM documents WHERE id = $1 + UNION ALL + SELECT d.id, d.type FROM documents d JOIN subtree sb ON d.parent_id = sb.id + ), + targets AS ( + SELECT id FROM subtree WHERE type <> 'folder' + ), + inserted AS ( + INSERT INTO shares (document_id, token, permission, created_by, expires_at, parent_share_id) + SELECT t.id, gen_random_uuid()::text, $3, $4, $5, $2 + FROM targets t + WHERE NOT EXISTS ( + SELECT 1 + FROM shares s2 + WHERE s2.document_id = t.id + AND s2.parent_share_id = $2 + ) + RETURNING 1 + ) + SELECT COALESCE(COUNT(*),0) FROM inserted + "# + ) + .bind(folder_id) + .bind(share_id) + .bind(permission.as_str()) + .bind(actor_id) + .bind(expires_at) + .fetch_one(&self.pool) + .await?; + Ok(created) + } + .await; + out.map_err(Into::into) + } + + async fn revoke_subtree_shares(&self, workspace_id: Uuid, root_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let deleted = sqlx::query_scalar::<_, i64>( + r#" + WITH RECURSIVE subtree AS ( + SELECT id FROM documents WHERE id = $1 AND workspace_id = $2 + UNION ALL + SELECT d.id + FROM documents d + JOIN subtree sb ON d.parent_id = sb.id + WHERE d.workspace_id = $2 + ), + removed AS ( + DELETE FROM shares s + USING subtree sb + WHERE s.document_id = sb.id + RETURNING 1 + ) + SELECT COALESCE(COUNT(*), 0) FROM removed + "#, + ) + .bind(root_id) + .bind(workspace_id) + .fetch_one(&self.pool) + .await?; + Ok(deleted) + } + .await; + out.map_err(Into::into) + } +} + +#[async_trait] +impl ShareAccessPort for SqlxSharesRepository { + async fn resolve_share_by_token(&self, token: &str) -> PortResult> { + self.fetch_share_resolution(token).await.map_err(Into::into) + } + + async fn get_materialized_permission( + &self, + parent_share_id: Uuid, + doc_id: Uuid, + ) -> PortResult> { + let out: anyhow::Result> = async { + let perm = sqlx::query_scalar::<_, String>( + "SELECT permission FROM shares WHERE parent_share_id = $1 AND document_id = $2 AND (expires_at IS NULL OR expires_at > now())", + ) + .bind(parent_share_id) + .bind(doc_id) + .fetch_optional(&self.pool) + .await?; + match perm { + None => Ok(None), + Some(raw) => share::SharePermission::parse(&raw) + .ok_or_else(|| anyhow::anyhow!("invalid_share_permission")) + .map(Some), + } + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/documents/db/repositories/tag_repository_sqlx/mod.rs b/api/crates/infrastructure/src/documents/db/repositories/tag_repository_sqlx/mod.rs new file mode 100644 index 00000000..5cf7d889 --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/tag_repository_sqlx/mod.rs @@ -0,0 +1,66 @@ +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::documents::ports::tagging::tag_repository::{TagRepository, TagSummary}; + +pub struct SqlxTagRepository { + pub pool: PgPool, +} + +impl SqlxTagRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl TagRepository for SqlxTagRepository { + async fn list_tags( + &self, + owner_id: Uuid, + filter: Option, + ) -> PortResult> { + let out: anyhow::Result> = async { + let rows = if let Some(f) = filter.filter(|s| !s.trim().is_empty()) { + let like = format!("%{}%", f); + sqlx::query( + r#"SELECT t.name, COUNT(*)::BIGINT AS count + FROM document_tags dt + JOIN tags t ON t.id = dt.tag_id + JOIN documents d ON d.id = dt.document_id AND d.owner_id = $1 + WHERE t.name ILIKE $2 + GROUP BY t.name + ORDER BY count DESC, t.name ASC"#, + ) + .bind(owner_id) + .bind(like) + .fetch_all(&self.pool) + .await? + } else { + sqlx::query( + r#"SELECT t.name, COUNT(*)::BIGINT AS count + FROM document_tags dt + JOIN tags t ON t.id = dt.tag_id + JOIN documents d ON d.id = dt.document_id AND d.owner_id = $1 + GROUP BY t.name + ORDER BY count DESC, t.name ASC"#, + ) + .bind(owner_id) + .fetch_all(&self.pool) + .await? + }; + Ok(rows + .into_iter() + .map(|r| TagSummary { + name: r.get("name"), + count: r.get("count"), + }) + .collect()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/documents/db/repositories/tagging_repository_sqlx/mod.rs b/api/crates/infrastructure/src/documents/db/repositories/tagging_repository_sqlx/mod.rs new file mode 100644 index 00000000..a5acbc53 --- /dev/null +++ b/api/crates/infrastructure/src/documents/db/repositories/tagging_repository_sqlx/mod.rs @@ -0,0 +1,72 @@ +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::documents::ports::tagging::tagging_repository::TaggingRepository; + +pub struct SqlxTaggingRepository { + pub pool: PgPool, +} + +impl SqlxTaggingRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl TaggingRepository for SqlxTaggingRepository { + async fn clear_document_tags(&self, doc_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query("DELETE FROM document_tags WHERE document_id = $1") + .bind(doc_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn upsert_tag_return_id(&self, name: &str) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query("INSERT INTO tags(name) VALUES ($1) ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name RETURNING id") + .bind(name) + .fetch_one(&self.pool) + .await?; + Ok(row.get("id")) + } + .await; + out.map_err(Into::into) + } + + async fn owner_doc_exists(&self, doc_id: Uuid, owner_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let n = sqlx::query_scalar::<_, i64>( + "SELECT COUNT(1) FROM documents WHERE id = $1 AND owner_id = $2", + ) + .bind(doc_id) + .bind(owner_id) + .fetch_one(&self.pool) + .await?; + Ok(n > 0) + } + .await; + out.map_err(Into::into) + } + + async fn associate_document_tag(&self, doc_id: Uuid, tag_id: i64) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query("INSERT INTO document_tags(document_id, tag_id) VALUES ($1, $2)") + .bind(doc_id) + .bind(tag_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/documents/doc_event_log.rs b/api/crates/infrastructure/src/documents/doc_event_log.rs new file mode 100644 index 00000000..e83252c9 --- /dev/null +++ b/api/crates/infrastructure/src/documents/doc_event_log.rs @@ -0,0 +1,46 @@ +use async_trait::async_trait; +use serde_json::Value; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::documents::ports::doc_event_log::DocEventLog; + +pub struct PgDocEventLog { + pool: PgPool, +} + +impl PgDocEventLog { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl DocEventLog for PgDocEventLog { + async fn append( + &self, + workspace_id: Uuid, + doc_id: Uuid, + event_type: &str, + payload: Option, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#" + INSERT INTO doc_events (workspace_id, doc_id, event_type, payload) + VALUES ($1, $2, $3, $4) + "#, + ) + .bind(workspace_id) + .bind(doc_id) + .bind(event_type) + .bind(payload) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/src/infrastructure/documents/event_poller.rs b/api/crates/infrastructure/src/documents/event_poller.rs similarity index 97% rename from api/src/infrastructure/documents/event_poller.rs rename to api/crates/infrastructure/src/documents/event_poller.rs index 8a9963a3..851b29b2 100644 --- a/api/src/infrastructure/documents/event_poller.rs +++ b/api/crates/infrastructure/src/documents/event_poller.rs @@ -5,8 +5,8 @@ use serde_json::Value; use sqlx::{Row, pool::PoolConnection, postgres::Postgres}; use tracing::{error, info, warn}; -use crate::application::services::doc_events::{DocEventRecord, DocEventSubscriber}; -use crate::infrastructure::db::PgPool; +use crate::core::db::PgPool; +use application::core::services::doc_events::{DocEventRecord, DocEventSubscriber}; pub struct DocEventPoller { pool: PgPool, diff --git a/api/src/infrastructure/documents/exporter.rs b/api/crates/infrastructure/src/documents/exporter.rs similarity index 89% rename from api/src/infrastructure/documents/exporter.rs rename to api/crates/infrastructure/src/documents/exporter.rs index 00c39f6f..bca7b09f 100644 --- a/api/src/infrastructure/documents/exporter.rs +++ b/api/crates/infrastructure/src/documents/exporter.rs @@ -12,8 +12,9 @@ use tokio::task; use zip::write::FileOptions; use zip::{self, CompressionMethod}; -use crate::application::dto::document_export::{DocumentDownload, DocumentDownloadFormat}; -use crate::application::ports::document_exporter::{ +use application::core::ports::errors::PortResult; +use application::documents::dtos::{DocumentDownload, DocumentDownloadFormat}; +use application::documents::ports::document_exporter::{ DocumentExportAssets, DocumentExportAttachment, DocumentExporter, }; @@ -35,6 +36,7 @@ pre { static PANDOC_WORKDIR_LOCK: Lazy> = Lazy::new(|| Mutex::new(())); +#[derive(Default)] pub struct DefaultDocumentExporter; impl DefaultDocumentExporter { @@ -49,19 +51,23 @@ impl DocumentExporter for DefaultDocumentExporter { &self, assets: DocumentExportAssets, format: DocumentDownloadFormat, - ) -> anyhow::Result { - let bytes = match format { - DocumentDownloadFormat::Archive => build_archive(&assets)?, - DocumentDownloadFormat::Markdown => assets.markdown_bytes().to_vec(), - _ if needs_pandoc(&format) => render_with_pandoc(format, &assets).await?, - _ => unreachable!("unsupported format"), - }; + ) -> PortResult { + let out: anyhow::Result = async { + let bytes = match format { + DocumentDownloadFormat::Archive => build_archive(&assets)?, + DocumentDownloadFormat::Markdown => assets.markdown.clone(), + _ if needs_pandoc(&format) => render_with_pandoc(format, &assets).await?, + _ => unreachable!("unsupported format"), + }; - Ok(DocumentDownload { - filename: assets.file_name(format), - content_type: format.content_type().to_string(), - bytes, - }) + Ok(DocumentDownload { + filename: format.file_name(&assets.safe_title), + content_type: format.content_type().to_string(), + bytes, + }) + } + .await; + out.map_err(Into::into) } } @@ -72,52 +78,6 @@ fn needs_pandoc(format: &DocumentDownloadFormat) -> bool { ) } -impl DocumentExportAssets { - fn file_name(&self, format: DocumentDownloadFormat) -> String { - format.file_name(&self.safe_title) - } - - fn markdown_bytes(&self) -> &[u8] { - &self.markdown - } - - fn attachments(&self) -> &[DocumentExportAttachment] { - &self.attachments - } - - fn markdown_string(&self) -> anyhow::Result { - String::from_utf8(self.markdown.clone()) - .map_err(|_| anyhow::anyhow!("document markdown is not valid UTF-8")) - } - - fn display_title(&self) -> Option<&str> { - self.display_title.as_deref() - } -} - -impl DocumentExportAttachment { - async fn materialize_under(&self, root: &Path) -> anyhow::Result<()> { - let clean_path = Path::new(&self.relative_path); - if clean_path.as_os_str().is_empty() { - return Ok(()); - } - let target = root.join(clean_path); - if let Some(parent) = target.parent() { - fs::create_dir_all(parent) - .await - .with_context(|| format!("failed to prepare {}", parent.display()))?; - } - fs::write(&target, &self.bytes) - .await - .with_context(|| format!("failed to write attachment {}", self.relative_path))?; - Ok(()) - } - - fn trimmed_path(&self) -> &str { - self.relative_path.trim_start_matches('/') - } -} - fn build_archive(assets: &DocumentExportAssets) -> anyhow::Result> { let markdown_entry = format!("{}/{}.md", assets.safe_title, assets.safe_title); let mut cursor = std::io::Cursor::new(Vec::new()); @@ -127,9 +87,13 @@ fn build_archive(assets: &DocumentExportAssets) -> anyhow::Result> { .compression_method(CompressionMethod::Deflated) .unix_permissions(0o644); zip.start_file(markdown_entry, options)?; - zip.write_all(assets.markdown_bytes())?; - for attachment in assets.attachments() { - let entry = format!("{}/{}", assets.safe_title, attachment.trimmed_path()); + zip.write_all(&assets.markdown)?; + for attachment in &assets.attachments { + let entry = format!( + "{}/{}", + assets.safe_title, + attachment_trimmed_path(attachment) + ); zip.start_file(entry, options)?; zip.write_all(&attachment.bytes)?; } @@ -143,11 +107,11 @@ async fn render_with_pandoc( assets: &DocumentExportAssets, ) -> anyhow::Result> { let tmp_dir = tempdir().context("unable to create temporary directory for pandoc")?; - let markdown_source = assets.markdown_string()?; - let display_title = assets.display_title().map(|s| s.to_owned()); + let markdown_source = markdown_string(assets)?; + let display_title = assets.display_title.clone(); - for attachment in assets.attachments() { - attachment.materialize_under(tmp_dir.path()).await?; + for attachment in &assets.attachments { + materialize_attachment_under(attachment, tmp_dir.path()).await?; } let resource_dir = tmp_dir.path().to_path_buf(); @@ -240,6 +204,35 @@ async fn render_with_pandoc( Ok(output_bytes) } +fn markdown_string(assets: &DocumentExportAssets) -> anyhow::Result { + String::from_utf8(assets.markdown.clone()) + .map_err(|_| anyhow::anyhow!("document markdown is not valid UTF-8")) +} + +async fn materialize_attachment_under( + attachment: &DocumentExportAttachment, + root: &Path, +) -> anyhow::Result<()> { + let clean_path = Path::new(&attachment.relative_path); + if clean_path.as_os_str().is_empty() { + return Ok(()); + } + let target = root.join(clean_path); + if let Some(parent) = target.parent() { + fs::create_dir_all(parent) + .await + .with_context(|| format!("failed to prepare {}", parent.display()))?; + } + fs::write(&target, &attachment.bytes) + .await + .with_context(|| format!("failed to write attachment {}", attachment.relative_path))?; + Ok(()) +} + +fn attachment_trimmed_path(attachment: &DocumentExportAttachment) -> &str { + attachment.relative_path.trim_start_matches('/') +} + struct WorkingDirGuard { original: Option, } diff --git a/api/src/infrastructure/documents/git_dirty_subscriber.rs b/api/crates/infrastructure/src/documents/git_dirty_subscriber.rs similarity index 95% rename from api/src/infrastructure/documents/git_dirty_subscriber.rs rename to api/crates/infrastructure/src/documents/git_dirty_subscriber.rs index 29165be3..7536626a 100644 --- a/api/src/infrastructure/documents/git_dirty_subscriber.rs +++ b/api/crates/infrastructure/src/documents/git_dirty_subscriber.rs @@ -5,9 +5,10 @@ use serde_json::Value; use tracing::info; use uuid::Uuid; -use crate::application::services::doc_events::{DocEventRecord, DocEventSubscriber}; -use crate::infrastructure::db::PgPool; -use crate::infrastructure::storage::{mark_dirty_delete_relative, mark_dirty_upsert_relative}; +use crate::core::db::PgPool; +use crate::core::storage::{mark_dirty_delete_relative, mark_dirty_upsert_relative}; +use application::core::services::doc_events::{DocEventRecord, DocEventSubscriber}; +use domain::documents::doc_type::DOC_TYPE_FOLDER; pub struct GitDirtyDocEventSubscriber { pool: PgPool, @@ -51,13 +52,13 @@ impl GitDirtyDocEventSubscriber { doc_id: Uuid, doc_type_hint: &mut Option, ) -> anyhow::Result { - if matches!(doc_type_hint.as_deref(), Some("folder")) { + if matches!(doc_type_hint.as_deref(), Some(DOC_TYPE_FOLDER)) { return Ok(true); } if doc_type_hint.is_none() { *doc_type_hint = self.doc_type(doc_id).await?; } - Ok(matches!(doc_type_hint.as_deref(), Some("folder"))) + Ok(matches!(doc_type_hint.as_deref(), Some(DOC_TYPE_FOLDER))) } async fn mark_upsert( diff --git a/api/src/infrastructure/documents/mod.rs b/api/crates/infrastructure/src/documents/mod.rs similarity index 63% rename from api/src/infrastructure/documents/mod.rs rename to api/crates/infrastructure/src/documents/mod.rs index 0a42c560..1d3f4177 100644 --- a/api/src/infrastructure/documents/mod.rs +++ b/api/crates/infrastructure/src/documents/mod.rs @@ -1,4 +1,7 @@ +pub mod db; pub mod doc_event_log; pub mod event_poller; pub mod exporter; pub mod git_dirty_subscriber; +pub mod realtime; +pub mod tx_runner_sqlx; diff --git a/api/src/infrastructure/realtime/awareness.rs b/api/crates/infrastructure/src/documents/realtime/awareness.rs similarity index 96% rename from api/src/infrastructure/realtime/awareness.rs rename to api/crates/infrastructure/src/documents/realtime/awareness.rs index 0ea753cc..f03f1951 100644 --- a/api/src/infrastructure/realtime/awareness.rs +++ b/api/crates/infrastructure/src/documents/realtime/awareness.rs @@ -15,7 +15,7 @@ use yrs::sync::{Message, MessageReader}; use yrs::updates::decoder::DecoderV1; use yrs::updates::encoder::Encode; -use crate::application::ports::awareness_port::AwarenessPublisher; +use application::documents::ports::realtime::awareness_port::AwarenessPublisher; #[derive(Clone)] pub struct AwarenessService { @@ -83,20 +83,21 @@ impl AwarenessService { self.publisher .publish_awareness(&self.doc_id, frame) .await + .map_err(anyhow::Error::from) .context("awareness_publish_local_clear")?; Ok(()) } async fn process_frame(&self, frame: &[u8], origin: FrameOrigin) -> anyhow::Result<()> { let mut decoder = DecoderV1::new(Cursor::new(frame)); - let mut reader = MessageReader::new(&mut decoder); + let reader = MessageReader::new(&mut decoder); let mut combined = AwarenessUpdateSummary { added: Vec::new(), updated: Vec::new(), removed: Vec::new(), }; let mut any = false; - while let Some(message) = reader.next() { + for message in reader { let message = message?; if let Message::Awareness(update) = message { if let Some(summary) = self @@ -190,6 +191,7 @@ impl AwarenessService { self.publisher .publish_awareness(&self.doc_id, frame) .await + .map_err(anyhow::Error::from) .context("awareness_publish_removal")?; Ok(()) } diff --git a/api/crates/infrastructure/src/documents/realtime/doc_persistence.rs b/api/crates/infrastructure/src/documents/realtime/doc_persistence.rs new file mode 100644 index 00000000..1e3826ce --- /dev/null +++ b/api/crates/infrastructure/src/documents/realtime/doc_persistence.rs @@ -0,0 +1,166 @@ +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::documents::ports::realtime::realtime_persistence_port::{ + DocPersistencePort, DocumentMissingError, SnapshotEntry, +}; + +#[derive(Clone)] +pub struct SqlxDocPersistenceAdapter { + pool: PgPool, +} + +impl SqlxDocPersistenceAdapter { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl DocPersistencePort for SqlxDocPersistenceAdapter { + async fn append_update_with_seq( + &self, + doc_id: &Uuid, + seq: i64, + update: &[u8], + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + "INSERT INTO document_updates (document_id, seq, update) VALUES ($1, $2, $3)", + ) + .bind(doc_id) + .bind(seq) + .bind(update) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn latest_update_seq(&self, doc_id: &Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + "SELECT MAX(seq) AS max_seq FROM document_updates WHERE document_id = $1", + ) + .bind(doc_id) + .fetch_optional(&self.pool) + .await?; + Ok(row.and_then(|row| row.try_get("max_seq").ok())) + } + .await; + out.map_err(Into::into) + } + + async fn persist_snapshot( + &self, + doc_id: &Uuid, + version: i64, + snapshot: &[u8], + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let result = sqlx::query( + "INSERT INTO document_snapshots (document_id, version, snapshot) VALUES ($1, $2, $3) + ON CONFLICT (document_id, version) DO UPDATE SET snapshot = EXCLUDED.snapshot", + ) + .bind(doc_id) + .bind(version as i32) + .bind(snapshot) + .execute(&self.pool) + .await; + + match result { + Ok(_) => Ok(()), + Err(sqlx::Error::Database(db_err)) + if matches!( + db_err.constraint(), + Some("document_snapshots_document_id_fkey") + ) => + { + Err(DocumentMissingError { + document_id: *doc_id, + } + .into()) + } + Err(err) => Err(err.into()), + } + } + .await; + out.map_err(Into::into) + } + + async fn latest_snapshot_entry(&self, doc_id: &Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + "SELECT version, snapshot FROM document_snapshots WHERE document_id = $1 + ORDER BY version DESC LIMIT 1", + ) + .bind(doc_id) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|row| SnapshotEntry { + version: row.get::("version") as i64, + bytes: row.get("snapshot"), + })) + } + .await; + out.map_err(Into::into) + } + + async fn latest_snapshot_version(&self, doc_id: &Uuid) -> PortResult> { + let out: anyhow::Result> = async { + Ok(self + .latest_snapshot_entry(doc_id) + .await? + .map(|entry| entry.version)) + } + .await; + out.map_err(Into::into) + } + + async fn prune_snapshots(&self, doc_id: &Uuid, keep_latest: i64) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + "DELETE FROM document_snapshots WHERE document_id = $1 AND version NOT IN ( + SELECT version FROM document_snapshots WHERE document_id = $1 ORDER BY version DESC LIMIT $2 + )", + ) + .bind(doc_id) + .bind(keep_latest) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn prune_updates_before(&self, doc_id: &Uuid, seq_inclusive: i64) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query("DELETE FROM document_updates WHERE document_id = $1 AND seq <= $2") + .bind(doc_id) + .bind(seq_inclusive) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn clear_updates(&self, doc_id: &Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query("DELETE FROM document_updates WHERE document_id = $1") + .bind(doc_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/documents/realtime/doc_state_reader.rs b/api/crates/infrastructure/src/documents/realtime/doc_state_reader.rs new file mode 100644 index 00000000..1785e8f6 --- /dev/null +++ b/api/crates/infrastructure/src/documents/realtime/doc_state_reader.rs @@ -0,0 +1,103 @@ +use anyhow::Context; +use async_trait::async_trait; +use futures_util::TryStreamExt; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::documents::ports::realtime::realtime_hydration_port::{ + DocSnapshot, DocStateReader, DocUpdate, DocumentRecord, +}; +use domain::documents::doc_type::DocumentType; + +#[derive(Clone)] +pub struct SqlxDocStateReader { + pool: PgPool, +} + +impl SqlxDocStateReader { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl DocStateReader for SqlxDocStateReader { + async fn latest_snapshot(&self, doc_id: &Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + "SELECT version, snapshot FROM document_snapshots WHERE document_id = $1 ORDER BY version DESC LIMIT 1", + ) + .bind(doc_id) + .fetch_optional(&self.pool) + .await?; + + if let Some(row) = row { + let version: i32 = row.get("version"); + let snapshot = row + .try_get::, _>("snapshot") + .context("doc_snapshot_missing")?; + Ok(Some(DocSnapshot { + version: version as i64, + snapshot, + })) + } else { + Ok(None) + } + } + .await; + out.map_err(Into::into) + } + + async fn updates_since(&self, doc_id: &Uuid, from_seq: i64) -> PortResult> { + let out: anyhow::Result> = async { + let mut rows = sqlx::query( + "SELECT seq, update FROM document_updates WHERE document_id = $1 AND seq > $2 ORDER BY seq ASC", + ) + .bind(doc_id) + .bind(from_seq) + .fetch(&self.pool); + + let mut result = Vec::new(); + while let Some(row) = rows.try_next().await? { + let seq: i64 = row.get("seq"); + let update = row + .try_get::, _>("update") + .context("doc_update_missing")?; + result.push(DocUpdate { seq, update }); + } + Ok(result) + } + .await; + out.map_err(Into::into) + } + + async fn document_record(&self, doc_id: &Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + "SELECT type, path, desired_path, title, owner_id, workspace_id FROM documents WHERE id = $1", + ) + .bind(doc_id) + .fetch_optional(&self.pool) + .await?; + + row.map(|row| { + let doc_type_str: String = row.get("type"); + let doc_type = DocumentType::try_from(doc_type_str.as_str()) + .context("invalid_document_type")?; + Ok(DocumentRecord { + doc_type, + path: row.try_get("path").ok(), + desired_path: row.try_get("desired_path").ok(), + title: row.get("title"), + owner_id: row.try_get("owner_id").ok(), + workspace_id: row.get("workspace_id"), + }) + }) + .transpose() + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/src/infrastructure/realtime/hub.rs b/api/crates/infrastructure/src/documents/realtime/hub.rs similarity index 98% rename from api/src/infrastructure/realtime/hub.rs rename to api/crates/infrastructure/src/documents/realtime/hub.rs index cf7086a1..68723993 100644 --- a/api/src/infrastructure/realtime/hub.rs +++ b/api/crates/infrastructure/src/documents/realtime/hub.rs @@ -22,17 +22,19 @@ use yrs::{Doc, ReadTxn, StateVector, Text, Transact, Update}; use yrs_warp::AwarenessRef; use yrs_warp::broadcast::BroadcastGroup; -use crate::application::ports::realtime_persistence_port::{ +use crate::documents::realtime::utils::wrap_stream_with_edit_guard; +use crate::documents::realtime::{DynRealtimeSink, DynRealtimeStream}; +use application::documents::ports::realtime::realtime_persistence_port::{ DocPersistencePort, DocumentMissingError, }; -use crate::application::services::realtime::doc_hydration::{ +use application::documents::services::realtime::doc_hydration::{ DocHydrationService, HydrationOptions, }; -use crate::application::services::realtime::snapshot::{ +use application::documents::services::realtime::snapshot::{ SnapshotArchiveKind, SnapshotArchiveOptions, SnapshotPersistOptions, SnapshotService, }; -use crate::infrastructure::realtime::utils::wrap_stream_with_edit_guard; -use crate::infrastructure::realtime::{DynRealtimeSink, DynRealtimeStream}; + +type SharedRealtimeSink = Arc>; #[derive(Clone)] pub struct DocumentRoom { @@ -456,6 +458,7 @@ impl Hub { can_edit: bool, ) -> anyhow::Result<()> { let room = self.get_or_create(doc_id).await?; + let sink: SharedRealtimeSink = Arc::new(Mutex::new(sink)); let edit_flag = self.ensure_edit_flag(doc_id).await; let effective_can_edit = can_edit && edit_flag.load(Ordering::Relaxed); let guarded_stream = @@ -644,7 +647,7 @@ where impl Hub { async fn send_protocol_start

( - sink: DynRealtimeSink, + sink: SharedRealtimeSink, awareness: AwarenessRef, protocol: P, ) -> anyhow::Result<()> diff --git a/api/crates/infrastructure/src/documents/realtime/local_engine.rs b/api/crates/infrastructure/src/documents/realtime/local_engine.rs new file mode 100644 index 00000000..79304df2 --- /dev/null +++ b/api/crates/infrastructure/src/documents/realtime/local_engine.rs @@ -0,0 +1,48 @@ +use application::core::ports::errors::PortResult; +use application::documents::ports::realtime::realtime_port::RealtimeEngine; +use application::documents::ports::realtime::realtime_types::{DynRealtimeSink, DynRealtimeStream}; +use application::documents::services::realtime::snapshot::doc_from_snapshot_bytes; + +pub struct LocalRealtimeEngine { + pub hub: crate::documents::realtime::Hub, +} + +#[async_trait::async_trait] +impl RealtimeEngine for LocalRealtimeEngine { + async fn subscribe( + &self, + doc_id: &str, + sink: DynRealtimeSink, + stream: DynRealtimeStream, + can_edit: bool, + ) -> PortResult<()> { + self.hub + .subscribe(doc_id, sink, stream, can_edit) + .await + .map_err(Into::into) + } + + async fn get_content(&self, doc_id: &str) -> PortResult> { + self.hub.get_content(doc_id).await.map_err(Into::into) + } + + async fn force_persist(&self, doc_id: &str) -> PortResult<()> { + self.hub.force_save_to_fs(doc_id).await.map_err(Into::into) + } + + async fn apply_snapshot(&self, doc_id: &str, snapshot: &[u8]) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let doc = doc_from_snapshot_bytes(snapshot)?; + self.hub.apply_snapshot(doc_id, &doc).await + } + .await; + out.map_err(Into::into) + } + + async fn set_document_editable(&self, doc_id: &str, editable: bool) -> PortResult<()> { + self.hub + .set_document_editable(doc_id, editable) + .await + .map_err(Into::into) + } +} diff --git a/api/src/infrastructure/realtime/mod.rs b/api/crates/infrastructure/src/documents/realtime/mod.rs similarity index 81% rename from api/src/infrastructure/realtime/mod.rs rename to api/crates/infrastructure/src/documents/realtime/mod.rs index 5133b823..75848126 100644 --- a/api/src/infrastructure/realtime/mod.rs +++ b/api/crates/infrastructure/src/documents/realtime/mod.rs @@ -1,4 +1,6 @@ -pub use crate::application::ports::realtime_types::{DynRealtimeSink, DynRealtimeStream}; +pub use application::documents::ports::realtime::realtime_types::{ + DynRealtimeSink, DynRealtimeStream, +}; mod awareness; mod doc_persistence; diff --git a/api/src/infrastructure/realtime/noop_ports.rs b/api/crates/infrastructure/src/documents/realtime/noop_ports.rs similarity index 65% rename from api/src/infrastructure/realtime/noop_ports.rs rename to api/crates/infrastructure/src/documents/realtime/noop_ports.rs index 250f2683..1e93e48d 100644 --- a/api/src/infrastructure/realtime/noop_ports.rs +++ b/api/crates/infrastructure/src/documents/realtime/noop_ports.rs @@ -1,7 +1,10 @@ use async_trait::async_trait; -use crate::application::ports::awareness_port::AwarenessPublisher; -use crate::application::ports::realtime_hydration_port::{RealtimeBacklogReader, StreamFrame}; +use application::core::ports::errors::PortResult; +use application::documents::ports::realtime::awareness_port::AwarenessPublisher; +use application::documents::ports::realtime::realtime_hydration_port::{ + RealtimeBacklogReader, StreamFrame, +}; #[derive(Debug, Clone, Default)] pub struct NoopBacklogReader; @@ -15,7 +18,7 @@ impl RealtimeBacklogReader for NoopBacklogReader { &self, _doc_id: &str, _last_stream_id: Option<&str>, - ) -> anyhow::Result> { + ) -> PortResult> { Ok(Vec::new()) } @@ -23,14 +26,14 @@ impl RealtimeBacklogReader for NoopBacklogReader { &self, _doc_id: &str, _last_stream_id: Option<&str>, - ) -> anyhow::Result> { + ) -> PortResult> { Ok(Vec::new()) } } #[async_trait] impl AwarenessPublisher for NoopAwarenessPublisher { - async fn publish_awareness(&self, _doc_id: &str, _frame: Vec) -> anyhow::Result<()> { + async fn publish_awareness(&self, _doc_id: &str, _frame: Vec) -> PortResult<()> { Ok(()) } } diff --git a/api/src/infrastructure/realtime/redis/cluster_bus.rs b/api/crates/infrastructure/src/documents/realtime/redis/cluster_bus.rs similarity index 90% rename from api/src/infrastructure/realtime/redis/cluster_bus.rs rename to api/crates/infrastructure/src/documents/realtime/redis/cluster_bus.rs index 0575c47b..599090e5 100644 --- a/api/src/infrastructure/realtime/redis/cluster_bus.rs +++ b/api/crates/infrastructure/src/documents/realtime/redis/cluster_bus.rs @@ -1,9 +1,12 @@ use std::sync::Arc; use std::time::Duration; -use crate::application::ports::awareness_port::AwarenessPublisher; -use crate::application::ports::realtime_hydration_port::{RealtimeBacklogReader, StreamFrame}; use anyhow::Context; +use application::core::ports::errors::PortResult; +use application::documents::ports::realtime::awareness_port::AwarenessPublisher; +use application::documents::ports::realtime::realtime_hydration_port::{ + RealtimeBacklogReader, StreamFrame, +}; use async_trait::async_trait; use redis::AsyncCommands; use redis::streams::{StreamRangeReply, StreamReadOptions, StreamReadReply}; @@ -356,31 +359,44 @@ impl RealtimeBacklogReader for RedisClusterBus { &self, doc_id: &str, last_stream_id: Option<&str>, - ) -> anyhow::Result> { - let items = RedisClusterBus::read_update_backlog(self, doc_id, last_stream_id).await?; - Ok(items - .into_iter() - .map(|(id, payload)| StreamFrame { id, payload }) - .collect()) + ) -> PortResult> { + let out: anyhow::Result> = async { + let items = RedisClusterBus::read_update_backlog(self, doc_id, last_stream_id).await?; + Ok(items + .into_iter() + .map(|(id, payload)| StreamFrame { id, payload }) + .collect()) + } + .await; + out.map_err(Into::into) } async fn read_awareness_backlog( &self, doc_id: &str, last_stream_id: Option<&str>, - ) -> anyhow::Result> { - let items = RedisClusterBus::read_awareness_backlog(self, doc_id, last_stream_id).await?; - Ok(items - .into_iter() - .map(|(id, payload)| StreamFrame { id, payload }) - .collect()) + ) -> PortResult> { + let out: anyhow::Result> = async { + let items = + RedisClusterBus::read_awareness_backlog(self, doc_id, last_stream_id).await?; + Ok(items + .into_iter() + .map(|(id, payload)| StreamFrame { id, payload }) + .collect()) + } + .await; + out.map_err(Into::into) } } #[async_trait] impl AwarenessPublisher for RedisClusterBus { - async fn publish_awareness(&self, doc_id: &str, frame: Vec) -> anyhow::Result<()> { - let _id = RedisClusterBus::publish_awareness(self, doc_id, frame).await?; - Ok(()) + async fn publish_awareness(&self, doc_id: &str, frame: Vec) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let _id = RedisClusterBus::publish_awareness(self, doc_id, frame).await?; + Ok(()) + } + .await; + out.map_err(Into::into) } } diff --git a/api/src/infrastructure/realtime/redis/engine.rs b/api/crates/infrastructure/src/documents/realtime/redis/engine.rs similarity index 91% rename from api/src/infrastructure/realtime/redis/engine.rs rename to api/crates/infrastructure/src/documents/realtime/redis/engine.rs index 8d7c2e43..022552aa 100644 --- a/api/src/infrastructure/realtime/redis/engine.rs +++ b/api/crates/infrastructure/src/documents/realtime/redis/engine.rs @@ -18,35 +18,40 @@ use yrs::sync::{DefaultProtocol, Protocol}; use yrs::updates::encoder::{Encoder, EncoderV1}; use yrs::{Doc, GetString, ReadTxn, StateVector, Text, Transact}; -use crate::application::ports::awareness_port::AwarenessPublisher; -use crate::application::ports::document_snapshot_archive_repository::DocumentSnapshotArchiveRepository; -use crate::application::ports::linkgraph_repository::LinkGraphRepository; -use crate::application::ports::realtime_hydration_port::{DocStateReader, RealtimeBacklogReader}; -use crate::application::ports::realtime_persistence_port::{ +use crate::core::db::PgPool; +use crate::documents::db::repositories::document_snapshot_archive_repository_sqlx::SqlxDocumentSnapshotArchiveRepository; +use crate::documents::db::repositories::linkgraph_repository_sqlx::SqlxLinkGraphRepository; +use crate::documents::db::repositories::tagging_repository_sqlx::SqlxTaggingRepository; +use crate::documents::realtime::awareness::{AwarenessService, encode_awareness_state}; +use crate::documents::realtime::utils::{analyse_frame, wrap_stream_with_edit_guard}; +use crate::documents::realtime::{SqlxDocPersistenceAdapter, SqlxDocStateReader}; +use application::core::ports::errors::PortResult; +use application::core::ports::storage::storage_port::StorageResolverPort; +use application::core::ports::storage::storage_projection_queue::StorageProjectionQueue; +use application::documents::ports::document_snapshot_archive_repository::DocumentSnapshotArchiveRepository; +use application::documents::ports::linkgraph_repository::LinkGraphRepository; +use application::documents::ports::realtime::awareness_port::AwarenessPublisher; +use application::documents::ports::realtime::realtime_hydration_port::{ + DocStateReader, RealtimeBacklogReader, +}; +use application::documents::ports::realtime::realtime_persistence_port::{ DocPersistencePort, DocumentMissingError, }; -use crate::application::ports::realtime_port::RealtimeEngine as RealtimeEngineTrait; -use crate::application::ports::realtime_types::{DynRealtimeSink, DynRealtimeStream}; -use crate::application::ports::storage_port::StorageResolverPort; -use crate::application::ports::storage_projection_queue::StorageProjectionQueue; -use crate::application::ports::tagging_repository::TaggingRepository; -use crate::application::services::realtime::doc_hydration::{ +use application::documents::ports::realtime::realtime_port::RealtimeEngine as RealtimeEngineTrait; +use application::documents::ports::realtime::realtime_types::{DynRealtimeSink, DynRealtimeStream}; +use application::documents::ports::tagging::tagging_repository::TaggingRepository; +use application::documents::services::realtime::doc_hydration::{ DocHydrationService, HydrationOptions, }; -use crate::application::services::realtime::snapshot::{ +use application::documents::services::realtime::snapshot::{ SnapshotArchiveKind, SnapshotArchiveOptions, SnapshotPersistOptions, SnapshotService, doc_from_snapshot_bytes, }; -use crate::infrastructure::db::PgPool; -use crate::infrastructure::db::repositories::document_snapshot_archive_repository_sqlx::SqlxDocumentSnapshotArchiveRepository; -use crate::infrastructure::db::repositories::linkgraph_repository_sqlx::SqlxLinkGraphRepository; -use crate::infrastructure::db::repositories::tagging_repository_sqlx::SqlxTaggingRepository; -use crate::infrastructure::realtime::awareness::{AwarenessService, encode_awareness_state}; -use crate::infrastructure::realtime::utils::{analyse_frame, wrap_stream_with_edit_guard}; -use crate::infrastructure::realtime::{SqlxDocPersistenceAdapter, SqlxDocStateReader}; use super::cluster_bus::{RedisClusterBus, StreamItem}; +type SharedRealtimeSink = Arc>; + pub struct RedisRealtimeEngine { bus: Arc, hydration_service: Arc, @@ -143,7 +148,7 @@ impl RedisRealtimeEngine { self.snapshot_service.clone() } - async fn send_initial_sync(&self, doc: &Doc, sink: &DynRealtimeSink) -> anyhow::Result<()> { + async fn send_initial_sync(&self, doc: &Doc, sink: &SharedRealtimeSink) -> anyhow::Result<()> { let bin = { let txn = doc.transact(); txn.encode_state_as_update_v1(&StateVector::default()) @@ -164,7 +169,7 @@ impl RedisRealtimeEngine { async fn flush_awareness_backlog( &self, - sink: &DynRealtimeSink, + sink: &SharedRealtimeSink, frames: &[Vec], doc_id: &str, awareness_manager: &AwarenessService, @@ -186,7 +191,7 @@ impl RedisRealtimeEngine { fn spawn_forward_task( mut stream: UnboundedReceiverStream>, - sink: DynRealtimeSink, + sink: SharedRealtimeSink, doc_id: String, channel: &'static str, awareness_manager: Option, @@ -236,8 +241,9 @@ impl RealtimeEngineTrait for RedisRealtimeEngine { sink: DynRealtimeSink, stream: DynRealtimeStream, can_edit: bool, - ) -> anyhow::Result<()> { - let doc_uuid = Uuid::parse_str(doc_id)?; + ) -> PortResult<()> { + let sink: SharedRealtimeSink = Arc::new(Mutex::new(sink)); + let doc_uuid = Uuid::parse_str(doc_id).map_err(anyhow::Error::from)?; let hydrated = self .hydration_service .hydrate(&doc_uuid, HydrationOptions::default()) @@ -378,11 +384,11 @@ impl RealtimeEngineTrait for RedisRealtimeEngine { } ttl_handle.abort(); - result + result.map_err(Into::into) } - async fn get_content(&self, doc_id: &str) -> anyhow::Result> { - let uuid = Uuid::parse_str(doc_id)?; + async fn get_content(&self, doc_id: &str) -> PortResult> { + let uuid = Uuid::parse_str(doc_id).map_err(anyhow::Error::from)?; let hydrated = self .hydration_service .hydrate(&uuid, HydrationOptions::default()) @@ -392,8 +398,8 @@ impl RealtimeEngineTrait for RedisRealtimeEngine { Ok(Some(txt.get_string(&txn))) } - async fn force_persist(&self, doc_id: &str) -> anyhow::Result<()> { - let uuid = Uuid::parse_str(doc_id)?; + async fn force_persist(&self, doc_id: &str) -> PortResult<()> { + let uuid = Uuid::parse_str(doc_id).map_err(anyhow::Error::from)?; let hydrated = self .hydration_service .hydrate(&uuid, HydrationOptions::default()) @@ -414,9 +420,9 @@ impl RealtimeEngineTrait for RedisRealtimeEngine { Ok(()) } - async fn apply_snapshot(&self, doc_id: &str, snapshot: &[u8]) -> anyhow::Result<()> { + async fn apply_snapshot(&self, doc_id: &str, snapshot: &[u8]) -> PortResult<()> { let doc = doc_from_snapshot_bytes(snapshot)?; - let uuid = Uuid::parse_str(doc_id)?; + let uuid = Uuid::parse_str(doc_id).map_err(anyhow::Error::from)?; let hydrated = self .hydration_service .hydrate(&uuid, HydrationOptions::default()) @@ -450,7 +456,7 @@ impl RealtimeEngineTrait for RedisRealtimeEngine { Ok(()) } - async fn set_document_editable(&self, doc_id: &str, editable: bool) -> anyhow::Result<()> { + async fn set_document_editable(&self, doc_id: &str, editable: bool) -> PortResult<()> { let flag = self.ensure_edit_flag(doc_id).await; flag.store(editable, Ordering::SeqCst); Ok(()) @@ -647,7 +653,7 @@ fn spawn_persistence_worker( impl RedisRealtimeEngine { async fn send_protocol_start( - sink: DynRealtimeSink, + sink: SharedRealtimeSink, awareness: Arc, writable: bool, ) -> anyhow::Result<()> { diff --git a/api/src/infrastructure/realtime/redis/mod.rs b/api/crates/infrastructure/src/documents/realtime/redis/mod.rs similarity index 100% rename from api/src/infrastructure/realtime/redis/mod.rs rename to api/crates/infrastructure/src/documents/realtime/redis/mod.rs diff --git a/api/src/infrastructure/realtime/utils.rs b/api/crates/infrastructure/src/documents/realtime/utils.rs similarity index 92% rename from api/src/infrastructure/realtime/utils.rs rename to api/crates/infrastructure/src/documents/realtime/utils.rs index 34928653..bd26e697 100644 --- a/api/src/infrastructure/realtime/utils.rs +++ b/api/crates/infrastructure/src/documents/realtime/utils.rs @@ -11,14 +11,14 @@ use yrs::encoding::read::Cursor; use yrs::sync::{Message, MessageReader, SyncMessage}; use yrs::updates::decoder::DecoderV1; -use crate::application::ports::realtime_port::RealtimeError; -use crate::application::ports::realtime_types::DynRealtimeStream; +use application::documents::ports::realtime::realtime_port::RealtimeError; +use application::documents::ports::realtime::realtime_types::DynRealtimeStream; pub fn analyse_frame(frame: &[u8]) -> Result { let mut decoder = DecoderV1::new(Cursor::new(frame)); - let mut reader = MessageReader::new(&mut decoder); + let reader = MessageReader::new(&mut decoder); let mut summary = FrameSummary::default(); - while let Some(message) = reader.next() { + for message in reader { match message? { Message::Sync(SyncMessage::Update(_)) | Message::Sync(SyncMessage::SyncStep2(_)) => { summary.has_update = true; diff --git a/api/crates/infrastructure/src/documents/tx_runner_sqlx.rs b/api/crates/infrastructure/src/documents/tx_runner_sqlx.rs new file mode 100644 index 00000000..5497ee2c --- /dev/null +++ b/api/crates/infrastructure/src/documents/tx_runner_sqlx.rs @@ -0,0 +1,322 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use sqlx::{Postgres, Transaction}; +use uuid::Uuid; + +use application::core::ports::errors::PortResult; +use application::core::ports::storage::storage_projection_queue::{ + StorageProjectionJobKind, StorageProjectionQueueTx, +}; +use application::documents::ports::document_repository::{ + DocumentRepoResult, DocumentRepositoryError, DocumentRepositoryTx, SubtreeDocument, +}; +use application::documents::ports::files::files_repository::FilesRepositoryTx; +use application::documents::ports::tx_runner::{ + BoxedTxResult, DocumentsTx, DocumentsTxFn, DocumentsTxRunner, +}; +use domain::documents::doc_type::DocumentType; +use domain::documents::path as doc_path; +use domain::documents::title::Title; + +use crate::core::db::PgPool; +use crate::documents::db::repositories::document_repository_sqlx::SqlxDocumentRepository; +use crate::documents::db::repositories::files_repository_sqlx::SqlxFilesRepository; + +pub struct SqlxDocumentsTxRunner { + pool: PgPool, + documents_repo: Arc, + files_repo: Arc, +} + +impl SqlxDocumentsTxRunner { + pub fn new( + pool: PgPool, + documents_repo: Arc, + files_repo: Arc, + ) -> Self { + Self { + pool, + documents_repo, + files_repo, + } + } +} + +struct SqlxDocumentsTx<'repo, 'tx, 'c> { + documents_repo: &'repo SqlxDocumentRepository, + files_repo: &'repo SqlxFilesRepository, + tx: &'tx mut Transaction<'c, Postgres>, +} + +impl<'repo, 'tx, 'c> DocumentsTx for SqlxDocumentsTx<'repo, 'tx, 'c> { + fn documents(&mut self) -> &mut dyn DocumentRepositoryTx { + self + } + + fn files(&mut self) -> &mut dyn FilesRepositoryTx { + self + } + + fn storage_jobs(&mut self) -> &mut dyn StorageProjectionQueueTx { + self + } +} + +#[async_trait] +impl<'repo, 'tx, 'c> DocumentRepositoryTx for SqlxDocumentsTx<'repo, 'tx, 'c> { + async fn create_for_user( + &mut self, + workspace_id: Uuid, + created_by: Uuid, + title: &Title, + parent_id: Option, + doc_type: DocumentType, + created_by_plugin: Option<&str>, + slug: &doc_path::Slug, + desired_path: &doc_path::DesiredPath, + ) -> DocumentRepoResult { + self.documents_repo + .create_for_user_tx( + self.tx, + workspace_id, + created_by, + title, + parent_id, + doc_type, + created_by_plugin, + slug, + desired_path, + ) + .await + } + + async fn update_title_and_parent_for_user( + &mut self, + id: Uuid, + workspace_id: Uuid, + title: &Title, + parent_id: Option>, + slug: &doc_path::Slug, + desired_path: &doc_path::DesiredPath, + ) -> DocumentRepoResult> { + self.documents_repo + .update_title_and_parent_for_user_tx( + self.tx, + id, + workspace_id, + title, + parent_id, + slug, + desired_path, + ) + .await + } + + async fn delete_owned( + &mut self, + id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult> { + self.documents_repo + .delete_owned_tx(self.tx, id, workspace_id) + .await + .map_err(DocumentRepositoryError::from) + } + + async fn get_meta_for_owner( + &mut self, + doc_id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult> + { + self.documents_repo + .get_meta_for_owner_tx(self.tx, doc_id, workspace_id) + .await + .map_err(DocumentRepositoryError::from) + } + + async fn archive_subtree( + &mut self, + doc_id: Uuid, + workspace_id: Uuid, + archived_by: Uuid, + ) -> DocumentRepoResult> { + self.documents_repo + .archive_subtree_tx(self.tx, doc_id, workspace_id, archived_by) + .await + .map_err(DocumentRepositoryError::from) + } + + async fn unarchive_subtree( + &mut self, + doc_id: Uuid, + workspace_id: Uuid, + ) -> DocumentRepoResult> { + self.documents_repo + .unarchive_subtree_tx(self.tx, doc_id, workspace_id) + .await + .map_err(DocumentRepositoryError::from) + } + + async fn list_owned_subtree_documents( + &mut self, + workspace_id: Uuid, + root_id: Uuid, + ) -> DocumentRepoResult> { + self.documents_repo + .list_owned_subtree_documents_tx(self.tx, workspace_id, root_id) + .await + .map_err(DocumentRepositoryError::from) + } +} + +#[async_trait] +impl<'repo, 'tx, 'c> FilesRepositoryTx for SqlxDocumentsTx<'repo, 'tx, 'c> { + async fn list_storage_paths_for_document(&mut self, doc_id: Uuid) -> PortResult> { + self.files_repo + .list_storage_paths_for_document_tx(self.tx, doc_id) + .await + .map_err(Into::into) + } +} + +#[async_trait] +impl<'repo, 'tx, 'c> StorageProjectionQueueTx for SqlxDocumentsTx<'repo, 'tx, 'c> { + async fn enqueue_doc_job( + &mut self, + workspace_id: Uuid, + doc_id: Uuid, + kind: StorageProjectionJobKind, + reason: Option<&str>, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + match kind { + StorageProjectionJobKind::DocSync | StorageProjectionJobKind::DeleteDoc => {} + other => anyhow::bail!("job_kind {other:?} requires a folder_id"), + } + + let job_type = kind_to_str(kind); + sqlx::query( + r#" + INSERT INTO storage_projection_jobs (workspace_id, job_type, doc_id, reason, attempts, locked_at, last_error) + VALUES ($1, $2, $3, $4, 0, NULL, NULL) + ON CONFLICT (job_type, doc_id) WHERE doc_id IS NOT NULL + DO UPDATE SET reason = EXCLUDED.reason, + locked_at = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN NULL + ELSE storage_projection_jobs.locked_at + END, + attempts = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN 0 + ELSE storage_projection_jobs.attempts + END, + last_error = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN NULL + ELSE storage_projection_jobs.last_error + END, + workspace_id = EXCLUDED.workspace_id, + pending_retry = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN false + ELSE true + END, + updated_at = now() + "#, + ) + .bind(workspace_id) + .bind(job_type) + .bind(doc_id) + .bind(reason) + .execute(self.tx.as_mut()) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn enqueue_folder_job( + &mut self, + workspace_id: Uuid, + folder_id: Uuid, + kind: StorageProjectionJobKind, + reason: Option<&str>, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + match kind { + StorageProjectionJobKind::FolderSync | StorageProjectionJobKind::DeleteFolder => {} + other => anyhow::bail!("job_kind {other:?} requires a doc_id"), + } + + let job_type = kind_to_str(kind); + sqlx::query( + r#" + INSERT INTO storage_projection_jobs (workspace_id, job_type, folder_id, reason, attempts, locked_at, last_error) + VALUES ($1, $2, $3, $4, 0, NULL, NULL) + ON CONFLICT (job_type, folder_id) WHERE folder_id IS NOT NULL + DO UPDATE SET reason = EXCLUDED.reason, + locked_at = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN NULL + ELSE storage_projection_jobs.locked_at + END, + attempts = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN 0 + ELSE storage_projection_jobs.attempts + END, + last_error = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN NULL + ELSE storage_projection_jobs.last_error + END, + workspace_id = EXCLUDED.workspace_id, + pending_retry = CASE + WHEN storage_projection_jobs.locked_at IS NULL THEN false + ELSE true + END, + updated_at = now() + "#, + ) + .bind(workspace_id) + .bind(job_type) + .bind(folder_id) + .bind(reason) + .execute(self.tx.as_mut()) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } +} + +fn kind_to_str(kind: StorageProjectionJobKind) -> &'static str { + match kind { + StorageProjectionJobKind::DocSync => "doc_sync", + StorageProjectionJobKind::FolderSync => "folder_sync", + StorageProjectionJobKind::DeleteDoc => "delete_doc", + StorageProjectionJobKind::DeleteFolder => "delete_folder", + } +} + +#[async_trait] +impl DocumentsTxRunner for SqlxDocumentsTxRunner { + async fn run_boxed(&self, f: DocumentsTxFn) -> anyhow::Result { + let mut tx = self.pool.begin().await?; + let mut uow = SqlxDocumentsTx { + documents_repo: self.documents_repo.as_ref(), + files_repo: self.files_repo.as_ref(), + tx: &mut tx, + }; + + let result = f(&mut uow).await; + match result { + Ok(out) => { + tx.commit().await?; + Ok(out) + } + Err(err) => { + tx.rollback().await.ok(); + Err(err) + } + } + } +} diff --git a/api/crates/infrastructure/src/git/db/mod.rs b/api/crates/infrastructure/src/git/db/mod.rs new file mode 100644 index 00000000..21b552a0 --- /dev/null +++ b/api/crates/infrastructure/src/git/db/mod.rs @@ -0,0 +1 @@ +pub mod repositories; diff --git a/api/crates/infrastructure/src/git/db/repositories/git_pull_session_repository_sqlx/mod.rs b/api/crates/infrastructure/src/git/db/repositories/git_pull_session_repository_sqlx/mod.rs new file mode 100644 index 00000000..fb26a877 --- /dev/null +++ b/api/crates/infrastructure/src/git/db/repositories/git_pull_session_repository_sqlx/mod.rs @@ -0,0 +1,100 @@ +use async_trait::async_trait; +use sqlx::types::Json; +use sqlx::{PgPool, Row}; +use uuid::Uuid; + +use application::core::ports::errors::PortResult; +use application::git::dtos::{GitPullConflictItemDto, GitPullResolutionDto, GitPullSessionDto}; +use application::git::ports::git_pull_session_repository::GitPullSessionRepository; +use domain::git::pull_session::GitPullSessionStatus; + +pub struct GitPullSessionRepositorySqlx { + pool: PgPool, +} + +impl GitPullSessionRepositorySqlx { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl GitPullSessionRepository for GitPullSessionRepositorySqlx { + async fn upsert(&self, session: GitPullSessionDto) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let GitPullSessionDto { + id, + workspace_id, + status, + conflicts, + resolutions, + message, + base_commit, + remote_commit, + } = session; + sqlx::query( + r#"INSERT INTO git_pull_sessions (id, workspace_id, status, conflicts, resolutions, created_at, updated_at, message, base_commit, remote_commit) + VALUES ($1, $2, $3, $4, $5, now(), now(), $6, $7, $8) + ON CONFLICT (id) DO UPDATE SET + status = EXCLUDED.status, + conflicts = EXCLUDED.conflicts, + resolutions = EXCLUDED.resolutions, + message = EXCLUDED.message, + base_commit = EXCLUDED.base_commit, + remote_commit = EXCLUDED.remote_commit, + updated_at = now()"#, + ) + .bind(id) + .bind(workspace_id) + .bind(status.as_str()) + .bind(Json(conflicts)) + .bind(Json(resolutions)) + .bind(message.clone()) + .bind(base_commit.clone()) + .bind(remote_commit.clone()) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn get(&self, workspace_id: Uuid, id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT id, workspace_id, status, conflicts, resolutions, message, base_commit, remote_commit FROM git_pull_sessions + WHERE id = $1 AND workspace_id = $2"#, + ) + .bind(id) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + + let Some(row) = row else { + return Ok(None); + }; + let conflicts: Vec = row + .get::>, _>("conflicts") + .0; + let resolutions: Vec = row + .get::>, _>("resolutions") + .0; + let status_raw: String = row.get::("status"); + let status = GitPullSessionStatus::parse(&status_raw) + .ok_or_else(|| anyhow::anyhow!("invalid_git_pull_session_status"))?; + Ok(Some(GitPullSessionDto { + id, + workspace_id, + status, + conflicts, + resolutions, + message: row.try_get::, _>("message").unwrap_or(None), + base_commit: row.get::>, _>("base_commit"), + remote_commit: row.get::>, _>("remote_commit"), + })) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/git/db/repositories/git_repository_sqlx/mod.rs b/api/crates/infrastructure/src/git/db/repositories/git_repository_sqlx/mod.rs new file mode 100644 index 00000000..22a15cb1 --- /dev/null +++ b/api/crates/infrastructure/src/git/db/repositories/git_repository_sqlx/mod.rs @@ -0,0 +1,371 @@ +use async_trait::async_trait; +use sqlx::{Row, error::DatabaseError}; +use std::sync::atomic::{AtomicBool, Ordering}; +use tokio::sync::Mutex; +use tracing::warn; +use uuid::Uuid; + +use crate::core::crypto; +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::git::ports::git_repository::{ + GitConfigRecord, GitLastSyncLog, GitRepository, UserGitCfg, +}; +use domain::git::auth::GitAuthType; +use domain::git::sync_log::{GitSyncOperation, GitSyncStatus}; + +pub struct SqlxGitRepository { + pub pool: PgPool, + encryption_key: String, + workspace_constraint_checked: AtomicBool, + workspace_constraint_check_lock: Mutex<()>, +} + +impl SqlxGitRepository { + pub fn new(pool: PgPool, encryption_key: impl Into) -> Self { + Self { + pool, + encryption_key: encryption_key.into(), + workspace_constraint_checked: AtomicBool::new(false), + workspace_constraint_check_lock: Mutex::new(()), + } + } + + async fn ensure_workspace_unique_constraint_ready(&self) -> anyhow::Result<()> { + if self.workspace_constraint_checked.load(Ordering::Relaxed) { + return Ok(()); + } + + let _guard = self.workspace_constraint_check_lock.lock().await; + if self.workspace_constraint_checked.load(Ordering::Relaxed) { + return Ok(()); + } + + let constraint_exists: bool = sqlx::query_scalar( + "SELECT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'git_configs_workspace_unique')", + ) + .fetch_one(&self.pool) + .await?; + + if !constraint_exists { + self.repair_workspace_unique_constraint().await?; + } + + self.workspace_constraint_checked + .store(true, Ordering::Relaxed); + Ok(()) + } + + async fn repair_workspace_unique_constraint(&self) -> anyhow::Result<()> { + let mut tx = self.pool.begin().await?; + sqlx::query("LOCK TABLE git_configs IN EXCLUSIVE MODE") + .execute(&mut *tx) + .await?; + + let constraint_exists: bool = sqlx::query_scalar( + "SELECT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'git_configs_workspace_unique')", + ) + .fetch_one(&mut *tx) + .await?; + + if constraint_exists { + tx.commit().await?; + return Ok(()); + } + + let dedup = sqlx::query( + r#"WITH ranked AS ( + SELECT + id, + ROW_NUMBER() OVER ( + PARTITION BY workspace_id + ORDER BY updated_at DESC, created_at DESC, id DESC + ) AS rn + FROM git_configs + ) + DELETE FROM git_configs gc + USING ranked r + WHERE gc.id = r.id + AND r.rn > 1;"#, + ) + .execute(&mut *tx) + .await?; + if dedup.rows_affected() > 0 { + warn!( + rows = dedup.rows_affected(), + "git_configs_workspace_unique_repair_deduped" + ); + } + + if let Err(err) = sqlx::query( + "ALTER TABLE git_configs ADD CONSTRAINT git_configs_workspace_unique UNIQUE (workspace_id)", + ) + .execute(&mut *tx) + .await + { + match err { + sqlx::Error::Database(db_err) => { + let is_duplicate = db_err.code().map(|c| c == "42710").unwrap_or(false); + if !is_duplicate { + return Err(sqlx::Error::Database(db_err).into()); + } + } + other => return Err(other.into()), + } + } + + tx.commit().await?; + Ok(()) + } +} + +#[async_trait] +impl GitRepository for SqlxGitRepository { + async fn get_config(&self, workspace_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query("SELECT id, repository_url, branch_name, auth_type, auto_sync, created_at, updated_at FROM git_configs WHERE workspace_id = $1 LIMIT 1") + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + row.map(|r| { + let auth_type_raw: String = r.get("auth_type"); + let auth_type = GitAuthType::parse(&auth_type_raw) + .ok_or_else(|| anyhow::anyhow!("invalid_git_auth_type"))?; + Ok(GitConfigRecord { + id: r.get("id"), + repository_url: r.get("repository_url"), + branch_name: r.get("branch_name"), + auth_type, + auto_sync: r.get("auto_sync"), + created_at: r.get("created_at"), + updated_at: r.get("updated_at"), + }) + }) + .transpose() + } + .await; + out.map_err(Into::into) + } + + async fn upsert_config( + &self, + workspace_id: Uuid, + repository_url: &str, + branch_name: Option<&str>, + auth_type: GitAuthType, + auth_data: &serde_json::Value, + auto_sync: Option, + ) -> PortResult { + let out: anyhow::Result = async { + self.ensure_workspace_unique_constraint_ready().await?; + let enc_auth = crypto::encrypt_auth_data(&self.encryption_key, auth_data); + let mut repaired_constraint = false; + loop { + let query = sqlx::query( + r#"INSERT INTO git_configs (workspace_id, repository_url, branch_name, auth_type, auth_data, auto_sync) + VALUES ($1, $2, COALESCE($3, 'main'), $4, $5, COALESCE($6, true)) + ON CONFLICT ON CONSTRAINT git_configs_workspace_unique DO UPDATE SET + repository_url = EXCLUDED.repository_url, + branch_name = EXCLUDED.branch_name, + auth_type = EXCLUDED.auth_type, + auth_data = EXCLUDED.auth_data, + auto_sync = EXCLUDED.auto_sync, + updated_at = now() + RETURNING id, repository_url, branch_name, auth_type, auto_sync, created_at, updated_at"# + ) + .bind(workspace_id) + .bind(repository_url) + .bind(branch_name) + .bind(auth_type.as_str()) + .bind(&enc_auth) + .bind(auto_sync); + + match query.fetch_one(&self.pool).await { + Ok(row) => { + let auth_type_raw: String = row.get("auth_type"); + let parsed_auth_type = GitAuthType::parse(&auth_type_raw) + .ok_or_else(|| anyhow::anyhow!("invalid_git_auth_type"))?; + break Ok(GitConfigRecord { + id: row.get("id"), + repository_url: row.get("repository_url"), + branch_name: row.get("branch_name"), + auth_type: parsed_auth_type, + auto_sync: row.get("auto_sync"), + created_at: row.get("created_at"), + updated_at: row.get("updated_at"), + }); + } + Err(sqlx::Error::Database(db_err)) => { + if !repaired_constraint + && is_missing_workspace_unique_error(db_err.as_ref()) + { + warn!( + workspace_id = %workspace_id, + "git_configs_workspace_unique_missing_repair" + ); + self.repair_workspace_unique_constraint().await?; + repaired_constraint = true; + continue; + } + break Err(sqlx::Error::Database(db_err).into()); + } + Err(err) => break Err(err.into()), + } + } + } + .await; + out.map_err(Into::into) + } + + async fn delete_config(&self, workspace_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let res = sqlx::query("DELETE FROM git_configs WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&self.pool) + .await?; + Ok(res.rows_affected() > 0) + } + .await; + out.map_err(Into::into) + } + + async fn load_user_git_cfg(&self, workspace_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query("SELECT repository_url, branch_name, auth_type, auth_data, auto_sync FROM git_configs WHERE workspace_id = $1 LIMIT 1") + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + row.map(|r| { + let repository_url: String = r.get("repository_url"); + let branch_name: String = r.get("branch_name"); + let auth_type_raw: Option = r.try_get("auth_type").ok(); + let auth_type = match auth_type_raw.as_deref() { + None => None, + Some(value) => Some( + GitAuthType::parse(value) + .ok_or_else(|| anyhow::anyhow!("invalid_git_auth_type"))?, + ), + }; + let raw_auth: Option = r.try_get("auth_data").ok(); + let auth_data = + raw_auth.map(|v| crypto::decrypt_auth_data(&self.encryption_key, &v)); + let auto_sync: bool = r.try_get("auto_sync").unwrap_or(true); + Ok(UserGitCfg { + repository_url, + branch_name, + auth_type, + auth_data, + auto_sync, + }) + }) + .transpose() + } + .await; + out.map_err(Into::into) + } + + async fn get_last_sync_log(&self, workspace_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query("SELECT status, message, commit_hash, created_at FROM git_sync_logs WHERE workspace_id = $1 ORDER BY created_at DESC LIMIT 1") + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + row.map(|r| { + let status_raw: Option = r.try_get("status").ok(); + let status = match status_raw.as_deref() { + None => None, + Some(value) => Some( + GitSyncStatus::parse(value) + .ok_or_else(|| anyhow::anyhow!("invalid_git_sync_status"))?, + ), + }; + Ok(GitLastSyncLog { + created_at: r.try_get("created_at").ok(), + status, + message: r.try_get("message").ok(), + commit_hash: r.try_get("commit_hash").ok(), + }) + }) + .transpose() + } + .await; + out.map_err(Into::into) + } + + async fn log_sync_operation( + &self, + workspace_id: Uuid, + operation: GitSyncOperation, + status: GitSyncStatus, + message: Option<&str>, + commit_hash: Option<&str>, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let _ = sqlx::query("INSERT INTO git_sync_logs (workspace_id, operation, status, message, commit_hash) VALUES ($1, $2, $3, $4, $5)") + .bind(workspace_id) + .bind(operation.as_str()) + .bind(status.as_str()) + .bind(message) + .bind(commit_hash) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn delete_sync_logs(&self, workspace_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query("DELETE FROM git_sync_logs WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn delete_repository_state(&self, workspace_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query("DELETE FROM git_repository_state WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn list_auto_sync_workspaces(&self) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + "SELECT workspace_id FROM git_configs WHERE auto_sync IS DISTINCT FROM false", + ) + .fetch_all(&self.pool) + .await?; + Ok(rows + .into_iter() + .filter_map(|r| r.try_get("workspace_id").ok()) + .collect()) + } + .await; + out.map_err(Into::into) + } +} + +fn is_missing_workspace_unique_error(err: &dyn DatabaseError) -> bool { + let code_matches = err + .code() + .map(|c| c == "42P10" || c == "42704") + .unwrap_or(false); + code_matches + || err.message().contains( + "there is no unique or exclusion constraint matching the ON CONFLICT specification", + ) + || err.message().contains( + "constraint \"git_configs_workspace_unique\" for table \"git_configs\" does not exist", + ) +} diff --git a/api/crates/infrastructure/src/git/db/repositories/mod.rs b/api/crates/infrastructure/src/git/db/repositories/mod.rs new file mode 100644 index 00000000..116e9830 --- /dev/null +++ b/api/crates/infrastructure/src/git/db/repositories/mod.rs @@ -0,0 +1,2 @@ +pub mod git_pull_session_repository_sqlx; +pub mod git_repository_sqlx; diff --git a/api/src/infrastructure/git/mod.rs b/api/crates/infrastructure/src/git/mod.rs similarity index 89% rename from api/src/infrastructure/git/mod.rs rename to api/crates/infrastructure/src/git/mod.rs index 2595d7d5..9a7f1929 100644 --- a/api/src/infrastructure/git/mod.rs +++ b/api/crates/infrastructure/src/git/mod.rs @@ -1,3 +1,4 @@ +pub mod db; pub mod rebuild_queue; pub mod storage; pub mod workspace; diff --git a/api/src/infrastructure/git/rebuild_queue.rs b/api/crates/infrastructure/src/git/rebuild_queue.rs similarity index 56% rename from api/src/infrastructure/git/rebuild_queue.rs rename to api/crates/infrastructure/src/git/rebuild_queue.rs index 7f999231..13b5c634 100644 --- a/api/src/infrastructure/git/rebuild_queue.rs +++ b/api/crates/infrastructure/src/git/rebuild_queue.rs @@ -3,8 +3,9 @@ use sqlx::Row; use tracing::debug; use uuid::Uuid; -use crate::application::ports::git_rebuild_job_queue::{GitRebuildJob, GitRebuildJobQueue}; -use crate::infrastructure::db::PgPool; +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::git::ports::git_rebuild_job_queue::{GitRebuildJob, GitRebuildJobQueue}; pub struct PgGitRebuildJobQueue { pool: PgPool, @@ -23,9 +24,10 @@ impl GitRebuildJobQueue for PgGitRebuildJobQueue { workspace_id: Uuid, actor_id: Option, permission_snapshot: &[String], - ) -> anyhow::Result<()> { - sqlx::query( - r#" + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#" INSERT INTO git_rebuild_jobs (workspace_id, actor_id, permission_snapshot, attempts, locked_at, last_error) VALUES ($1, $2, $3, 0, NULL, NULL) ON CONFLICT (workspace_id) @@ -46,19 +48,23 @@ impl GitRebuildJobQueue for PgGitRebuildJobQueue { END, updated_at = now() "#, - ) - .bind(workspace_id) - .bind(actor_id) - .bind(serde_json::json!(permission_snapshot)) - .execute(&self.pool) - .await?; - debug!(workspace_id = %workspace_id, "git_rebuild_job_queued"); - Ok(()) + ) + .bind(workspace_id) + .bind(actor_id) + .bind(serde_json::json!(permission_snapshot)) + .execute(&self.pool) + .await?; + debug!(workspace_id = %workspace_id, "git_rebuild_job_queued"); + Ok(()) + } + .await; + out.map_err(Into::into) } - async fn fetch_next(&self, lock_timeout_secs: i64) -> anyhow::Result> { - let row = sqlx::query( - r#" + async fn fetch_next(&self, lock_timeout_secs: i64) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#" WITH next_job AS ( SELECT id FROM git_rebuild_jobs @@ -75,23 +81,29 @@ impl GitRebuildJobQueue for PgGitRebuildJobQueue { WHERE j.id IN (SELECT id FROM next_job) RETURNING j.id, j.workspace_id, j.actor_id, j.permission_snapshot, j.attempts "#, - ) - .bind(lock_timeout_secs.max(1)) - .fetch_optional(&self.pool) - .await?; + ) + .bind(lock_timeout_secs.max(1)) + .fetch_optional(&self.pool) + .await?; - Ok(row.map(|r| GitRebuildJob { - id: r.get("id"), - workspace_id: r.get("workspace_id"), - actor_id: r.try_get("actor_id").ok(), - attempts: r.get("attempts"), - permission_snapshot: parse_permission_snapshot(r.try_get("permission_snapshot").ok()), - })) + Ok(row.map(|r| GitRebuildJob { + id: r.get("id"), + workspace_id: r.get("workspace_id"), + actor_id: r.try_get("actor_id").ok(), + attempts: r.get("attempts"), + permission_snapshot: parse_permission_snapshot( + r.try_get("permission_snapshot").ok(), + ), + })) + } + .await; + out.map_err(Into::into) } - async fn complete(&self, job_id: i64) -> anyhow::Result<()> { - let res = sqlx::query( - r#" + async fn complete(&self, job_id: i64) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let res = sqlx::query( + r#" UPDATE git_rebuild_jobs SET locked_at = NULL, attempts = 0, @@ -100,23 +112,27 @@ impl GitRebuildJobQueue for PgGitRebuildJobQueue { updated_at = now() WHERE id = $1 AND pending_retry = true "#, - ) - .bind(job_id) - .execute(&self.pool) - .await?; + ) + .bind(job_id) + .execute(&self.pool) + .await?; - if res.rows_affected() == 0 { - sqlx::query("DELETE FROM git_rebuild_jobs WHERE id = $1") - .bind(job_id) - .execute(&self.pool) - .await?; + if res.rows_affected() == 0 { + sqlx::query("DELETE FROM git_rebuild_jobs WHERE id = $1") + .bind(job_id) + .execute(&self.pool) + .await?; + } + Ok(()) } - Ok(()) + .await; + out.map_err(Into::into) } - async fn fail(&self, job_id: i64, error: &str) -> anyhow::Result<()> { - sqlx::query( - r#" + async fn fail(&self, job_id: i64, error: &str) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#" UPDATE git_rebuild_jobs SET last_error = $2, locked_at = NULL, @@ -124,12 +140,15 @@ impl GitRebuildJobQueue for PgGitRebuildJobQueue { updated_at = now() WHERE id = $1 "#, - ) - .bind(job_id) - .bind(error) - .execute(&self.pool) - .await?; - Ok(()) + ) + .bind(job_id) + .bind(error) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) } } diff --git a/api/src/infrastructure/git/storage.rs b/api/crates/infrastructure/src/git/storage.rs similarity index 50% rename from api/src/infrastructure/git/storage.rs rename to api/crates/infrastructure/src/git/storage.rs index 28bb3e28..fc6e6540 100644 --- a/api/src/infrastructure/git/storage.rs +++ b/api/crates/infrastructure/src/git/storage.rs @@ -9,7 +9,8 @@ use tokio::io::AsyncReadExt; use tokio::sync::Mutex; use uuid::Uuid; -use crate::application::ports::git_storage::{ +use application::core::ports::errors::PortResult; +use application::git::ports::git_storage::{ BlobKey, CommitMeta, GitStorage, PackBlob, PackStream, encode_commit_id, }; @@ -142,155 +143,188 @@ impl FilesystemGitStorage { #[async_trait] impl GitStorage for FilesystemGitStorage { - async fn latest_commit(&self, user_id: Uuid) -> anyhow::Result> { - let path = self.latest_path(user_id); - self.read_meta(path.as_path()).await + async fn latest_commit(&self, user_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let path = self.latest_path(user_id); + self.read_meta(path.as_path()).await + } + .await; + out.map_err(Into::into) } - async fn store_pack( - &self, - user_id: Uuid, - pack: &[u8], - meta: &CommitMeta, - ) -> anyhow::Result<()> { - let commit_hex = encode_commit_id(&meta.commit_id); - let pack_path = self.pack_path(user_id, &commit_hex); - if let Some(parent) = pack_path.parent() { - fs::create_dir_all(parent).await?; + async fn store_pack(&self, user_id: Uuid, pack: &[u8], meta: &CommitMeta) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let commit_hex = encode_commit_id(&meta.commit_id); + let pack_path = self.pack_path(user_id, &commit_hex); + if let Some(parent) = pack_path.parent() { + fs::create_dir_all(parent).await?; + } + fs::write(&pack_path, pack).await?; + let meta_path = self.meta_path(user_id, &commit_hex); + self.write_meta(meta_path.as_path(), meta).await?; + Ok(()) } - fs::write(&pack_path, pack).await?; - let meta_path = self.meta_path(user_id, &commit_hex); - self.write_meta(meta_path.as_path(), meta).await?; - Ok(()) + .await; + out.map_err(Into::into) } - async fn load_pack_chain( - &self, - user_id: Uuid, - until: Option<&[u8]>, - ) -> anyhow::Result { - let metas = self.collect_meta_chain(user_id, until).await?; - if metas.is_empty() { - return Ok(Box::pin(stream::empty())); - } - let storage = self.clone(); - let storage_for_stream = storage.clone(); - let stream = stream::iter(metas.into_iter()).then(move |meta| { - let storage = storage_for_stream.clone(); - async move { - let commit_hex = encode_commit_id(&meta.commit_id); - let pack_path = storage.pack_path(user_id, &commit_hex); - if !fs::try_exists(&pack_path).await.unwrap_or(false) { - anyhow::bail!("pack not found for commit {}", commit_hex); - } - let bytes = fs::read(&pack_path).await?; - Ok(PackBlob { - commit_id: meta.commit_id.clone(), - bytes, - pack_key: meta.pack_key.clone(), - }) + async fn load_pack_chain(&self, user_id: Uuid, until: Option<&[u8]>) -> PortResult { + let out: anyhow::Result = async { + let metas = self.collect_meta_chain(user_id, until).await?; + if metas.is_empty() { + return Ok(Box::pin(stream::empty::>()) as PackStream); } - }); - Ok(Box::pin(stream)) + let storage = self.clone(); + let storage_for_stream = storage.clone(); + let stream = stream::iter(metas) + .then(move |meta| { + let storage = storage_for_stream.clone(); + async move { + let commit_hex = encode_commit_id(&meta.commit_id); + let pack_path = storage.pack_path(user_id, &commit_hex); + if !fs::try_exists(&pack_path).await.unwrap_or(false) { + anyhow::bail!("pack not found for commit {}", commit_hex); + } + let bytes = fs::read(&pack_path).await?; + Ok(PackBlob { + commit_id: meta.commit_id.clone(), + bytes, + pack_key: meta.pack_key.clone(), + }) + } + }) + .map(|r: anyhow::Result| r.map_err(Into::into)); + Ok(Box::pin(stream) as PackStream) + } + .await; + out.map_err(Into::into) } - async fn put_blob(&self, key: &BlobKey, data: &[u8]) -> anyhow::Result<()> { - let root = self.blobs_root(); - let path = sanitize_blob_path(root.as_path(), &key.path)?; - if let Some(parent) = path.parent() { - fs::create_dir_all(parent).await?; + async fn put_blob(&self, key: &BlobKey, data: &[u8]) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let root = self.blobs_root(); + let path = sanitize_blob_path(root.as_path(), &key.path)?; + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).await?; + } + fs::write(path, data).await?; + Ok(()) } - fs::write(path, data).await?; - Ok(()) + .await; + out.map_err(Into::into) } - async fn fetch_blob(&self, key: &BlobKey) -> anyhow::Result> { - let root = self.blobs_root(); - let path = sanitize_blob_path(root.as_path(), &key.path)?; - let bytes = fs::read(path).await?; - Ok(bytes) + async fn fetch_blob(&self, key: &BlobKey) -> PortResult> { + let out: anyhow::Result> = async { + let root = self.blobs_root(); + let path = sanitize_blob_path(root.as_path(), &key.path)?; + let bytes = fs::read(path).await?; + Ok(bytes) + } + .await; + out.map_err(Into::into) } - async fn commit_meta( - &self, - user_id: Uuid, - commit_id: &[u8], - ) -> anyhow::Result> { - let commit_hex = encode_commit_id(commit_id); - let meta_path = self.meta_path(user_id, &commit_hex); - self.read_meta(meta_path.as_path()).await + async fn commit_meta(&self, user_id: Uuid, commit_id: &[u8]) -> PortResult> { + let out: anyhow::Result> = async { + let commit_hex = encode_commit_id(commit_id); + let meta_path = self.meta_path(user_id, &commit_hex); + self.read_meta(meta_path.as_path()).await + } + .await; + out.map_err(Into::into) } - async fn restore_commit_meta(&self, user_id: Uuid, meta: &CommitMeta) -> anyhow::Result<()> { - let commit_hex = encode_commit_id(&meta.commit_id); - let meta_path = self.meta_path(user_id, &commit_hex); - self.write_meta(meta_path.as_path(), meta).await + async fn restore_commit_meta(&self, user_id: Uuid, meta: &CommitMeta) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let commit_hex = encode_commit_id(&meta.commit_id); + let meta_path = self.meta_path(user_id, &commit_hex); + self.write_meta(meta_path.as_path(), meta).await + } + .await; + out.map_err(Into::into) } async fn fetch_pack_for_commit( &self, user_id: Uuid, commit_id: &[u8], - ) -> anyhow::Result>> { - let commit_hex = encode_commit_id(commit_id); - let pack_path = self.pack_path(user_id, &commit_hex); - if !fs::try_exists(&pack_path).await.unwrap_or(false) { - return Ok(None); + ) -> PortResult>> { + let out: anyhow::Result>> = async { + let commit_hex = encode_commit_id(commit_id); + let pack_path = self.pack_path(user_id, &commit_hex); + if !fs::try_exists(&pack_path).await.unwrap_or(false) { + return Ok(None); + } + let bytes = fs::read(&pack_path).await?; + Ok(Some(bytes)) } - let bytes = fs::read(&pack_path).await?; - Ok(Some(bytes)) + .await; + out.map_err(Into::into) } - async fn delete_blob(&self, key: &BlobKey) -> anyhow::Result<()> { - let root = self.blobs_root(); - let path = sanitize_blob_path(root.as_path(), &key.path)?; - if fs::try_exists(&path).await.unwrap_or(false) { - fs::remove_file(path).await?; + async fn delete_blob(&self, key: &BlobKey) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let root = self.blobs_root(); + let path = sanitize_blob_path(root.as_path(), &key.path)?; + if fs::try_exists(&path).await.unwrap_or(false) { + fs::remove_file(path).await?; + } + Ok(()) } - Ok(()) + .await; + out.map_err(Into::into) } - async fn delete_pack(&self, user_id: Uuid, commit_id: &[u8]) -> anyhow::Result<()> { - let commit_hex = encode_commit_id(commit_id); - let pack_path = self.pack_path(user_id, &commit_hex); - if fs::try_exists(&pack_path).await.unwrap_or(false) { - fs::remove_file(&pack_path).await?; - } - let meta_path = self.meta_path(user_id, &commit_hex); - if fs::try_exists(&meta_path).await.unwrap_or(false) { - fs::remove_file(&meta_path).await?; + async fn delete_pack(&self, user_id: Uuid, commit_id: &[u8]) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let commit_hex = encode_commit_id(commit_id); + let pack_path = self.pack_path(user_id, &commit_hex); + if fs::try_exists(&pack_path).await.unwrap_or(false) { + fs::remove_file(&pack_path).await?; + } + let meta_path = self.meta_path(user_id, &commit_hex); + if fs::try_exists(&meta_path).await.unwrap_or(false) { + fs::remove_file(&meta_path).await?; + } + Ok(()) } - Ok(()) + .await; + out.map_err(Into::into) } - async fn set_latest_commit( - &self, - user_id: Uuid, - meta: Option<&CommitMeta>, - ) -> anyhow::Result<()> { - let latest_path = self.latest_path(user_id); - if let Some(meta) = meta { - self.write_meta(latest_path.as_path(), meta).await? - } else if fs::try_exists(&latest_path).await.unwrap_or(false) { - fs::remove_file(&latest_path).await?; + async fn set_latest_commit(&self, user_id: Uuid, meta: Option<&CommitMeta>) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let latest_path = self.latest_path(user_id); + if let Some(meta) = meta { + self.write_meta(latest_path.as_path(), meta).await? + } else if fs::try_exists(&latest_path).await.unwrap_or(false) { + fs::remove_file(&latest_path).await?; + } + Ok(()) } - Ok(()) + .await; + out.map_err(Into::into) } - async fn delete_all(&self, user_id: Uuid) -> anyhow::Result<()> { - let dir = self.user_dir(user_id); - if fs::try_exists(&dir).await.unwrap_or(false) { - fs::remove_dir_all(&dir).await?; - } - let blobs_root = self.blobs_root().join(user_id.to_string()); - if fs::try_exists(&blobs_root).await.unwrap_or(false) { - fs::remove_dir_all(&blobs_root).await?; - } - let latest_path = self.latest_path(user_id); - if fs::try_exists(&latest_path).await.unwrap_or(false) { - fs::remove_file(&latest_path).await?; + async fn delete_all(&self, user_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let dir = self.user_dir(user_id); + if fs::try_exists(&dir).await.unwrap_or(false) { + fs::remove_dir_all(&dir).await?; + } + let blobs_root = self.blobs_root().join(user_id.to_string()); + if fs::try_exists(&blobs_root).await.unwrap_or(false) { + fs::remove_dir_all(&blobs_root).await?; + } + let latest_path = self.latest_path(user_id); + if fs::try_exists(&latest_path).await.unwrap_or(false) { + fs::remove_file(&latest_path).await?; + } + Ok(()) } - Ok(()) + .await; + out.map_err(Into::into) } } @@ -325,9 +359,9 @@ impl StoredCommitMeta { fn into_meta(self) -> anyhow::Result { Ok(CommitMeta { - commit_id: crate::application::ports::git_storage::decode_commit_id(&self.commit_id)?, + commit_id: application::git::ports::git_storage::decode_commit_id(&self.commit_id)?, parent_commit_id: match self.parent_commit_id { - Some(hex) => Some(crate::application::ports::git_storage::decode_commit_id( + Some(hex) => Some(application::git::ports::git_storage::decode_commit_id( &hex, )?), None => None, @@ -548,162 +582,196 @@ impl S3GitStorage { #[async_trait] impl GitStorage for S3GitStorage { - async fn latest_commit(&self, user_id: Uuid) -> anyhow::Result> { - let key = self.key_for_latest(user_id); - self.fetch_meta(&key).await - } - - async fn store_pack( - &self, - user_id: Uuid, - pack: &[u8], - meta: &CommitMeta, - ) -> anyhow::Result<()> { - let commit_hex = encode_commit_id(&meta.commit_id); - let pack_key = self.key_for_pack(user_id, &commit_hex); - self.put_object(&pack_key, pack).await?; - let meta_key = self.key_for_meta(user_id, &commit_hex); - let stored = StoredCommitMeta::from_meta(meta); - let data = serde_json::to_vec_pretty(&stored)?; - self.put_object(&meta_key, &data).await + async fn latest_commit(&self, user_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let key = self.key_for_latest(user_id); + self.fetch_meta(&key).await + } + .await; + out.map_err(Into::into) + } + + async fn store_pack(&self, user_id: Uuid, pack: &[u8], meta: &CommitMeta) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let commit_hex = encode_commit_id(&meta.commit_id); + let pack_key = self.key_for_pack(user_id, &commit_hex); + self.put_object(&pack_key, pack).await?; + let meta_key = self.key_for_meta(user_id, &commit_hex); + let stored = StoredCommitMeta::from_meta(meta); + let data = serde_json::to_vec_pretty(&stored)?; + self.put_object(&meta_key, &data).await + } + .await; + out.map_err(Into::into) } - async fn load_pack_chain( - &self, - user_id: Uuid, - until: Option<&[u8]>, - ) -> anyhow::Result { - let metas = self.collect_meta_chain(user_id, until).await?; - if metas.is_empty() { - return Ok(Box::pin(stream::empty())); - } - let storage = self.clone(); - let storage_for_stream = storage.clone(); - let stream = stream::iter(metas.into_iter()).then(move |meta| { - let storage = storage_for_stream.clone(); - async move { - let commit_hex = encode_commit_id(&meta.commit_id); - let pack_key = storage.key_for_pack(user_id, &commit_hex); - let bytes = match storage.get_object(&pack_key).await? { - Some(b) => b, - None => anyhow::bail!("pack missing for commit {commit_hex}"), - }; - Ok(PackBlob { - commit_id: meta.commit_id.clone(), - bytes, - pack_key: meta.pack_key.clone(), - }) + async fn load_pack_chain(&self, user_id: Uuid, until: Option<&[u8]>) -> PortResult { + let out: anyhow::Result = async { + let metas = self.collect_meta_chain(user_id, until).await?; + if metas.is_empty() { + return Ok(Box::pin(stream::empty::>()) as PackStream); } - }); - Ok(Box::pin(stream)) + let storage = self.clone(); + let storage_for_stream = storage.clone(); + let stream = stream::iter(metas) + .then(move |meta| { + let storage = storage_for_stream.clone(); + async move { + let commit_hex = encode_commit_id(&meta.commit_id); + let pack_key = storage.key_for_pack(user_id, &commit_hex); + let bytes = match storage.get_object(&pack_key).await? { + Some(b) => b, + None => anyhow::bail!("pack missing for commit {commit_hex}"), + }; + Ok(PackBlob { + commit_id: meta.commit_id.clone(), + bytes, + pack_key: meta.pack_key.clone(), + }) + } + }) + .map(|r: anyhow::Result| r.map_err(Into::into)); + Ok(Box::pin(stream) as PackStream) + } + .await; + out.map_err(Into::into) } - async fn put_blob(&self, key: &BlobKey, data: &[u8]) -> anyhow::Result<()> { - let key = self.key_for_blob(&key.path); - self.put_object(&key, data).await + async fn put_blob(&self, key: &BlobKey, data: &[u8]) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let key = self.key_for_blob(&key.path); + self.put_object(&key, data).await + } + .await; + out.map_err(Into::into) } - async fn fetch_blob(&self, key: &BlobKey) -> anyhow::Result> { - let key = self.key_for_blob(&key.path); - match self.get_object(&key).await? { - Some(bytes) => Ok(bytes), - None => anyhow::bail!("blob not found"), + async fn fetch_blob(&self, key: &BlobKey) -> PortResult> { + let out: anyhow::Result> = async { + let key = self.key_for_blob(&key.path); + match self.get_object(&key).await? { + Some(bytes) => Ok(bytes), + None => anyhow::bail!("blob not found"), + } } + .await; + out.map_err(Into::into) } - async fn commit_meta( - &self, - user_id: Uuid, - commit_id: &[u8], - ) -> anyhow::Result> { - let commit_hex = encode_commit_id(commit_id); - let meta_key = self.key_for_meta(user_id, &commit_hex); - self.fetch_meta(&meta_key).await + async fn commit_meta(&self, user_id: Uuid, commit_id: &[u8]) -> PortResult> { + let out: anyhow::Result> = async { + let commit_hex = encode_commit_id(commit_id); + let meta_key = self.key_for_meta(user_id, &commit_hex); + self.fetch_meta(&meta_key).await + } + .await; + out.map_err(Into::into) } - async fn restore_commit_meta(&self, user_id: Uuid, meta: &CommitMeta) -> anyhow::Result<()> { - let commit_hex = encode_commit_id(&meta.commit_id); - let meta_key = self.key_for_meta(user_id, &commit_hex); - let stored = StoredCommitMeta::from_meta(meta); - let data = serde_json::to_vec_pretty(&stored)?; - self.put_object(&meta_key, &data).await + async fn restore_commit_meta(&self, user_id: Uuid, meta: &CommitMeta) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let commit_hex = encode_commit_id(&meta.commit_id); + let meta_key = self.key_for_meta(user_id, &commit_hex); + let stored = StoredCommitMeta::from_meta(meta); + let data = serde_json::to_vec_pretty(&stored)?; + self.put_object(&meta_key, &data).await + } + .await; + out.map_err(Into::into) } async fn fetch_pack_for_commit( &self, user_id: Uuid, commit_id: &[u8], - ) -> anyhow::Result>> { - let commit_hex = encode_commit_id(commit_id); - let pack_key = self.key_for_pack(user_id, &commit_hex); - self.get_object(&pack_key).await - } - - async fn delete_blob(&self, key: &BlobKey) -> anyhow::Result<()> { - let key = self.key_for_blob(&key.path); - let _ = self - .client - .delete_object() - .bucket(&self.bucket) - .key(&key) - .send() - .await; - Ok(()) + ) -> PortResult>> { + let out: anyhow::Result>> = async { + let commit_hex = encode_commit_id(commit_id); + let pack_key = self.key_for_pack(user_id, &commit_hex); + self.get_object(&pack_key).await + } + .await; + out.map_err(Into::into) } - async fn delete_pack(&self, user_id: Uuid, commit_id: &[u8]) -> anyhow::Result<()> { - let commit_hex = encode_commit_id(commit_id); - let pack_key = self.key_for_pack(user_id, &commit_hex); - let meta_key = self.key_for_meta(user_id, &commit_hex); - let _ = self - .client - .delete_object() - .bucket(&self.bucket) - .key(&pack_key) - .send() - .await; - let _ = self - .client - .delete_object() - .bucket(&self.bucket) - .key(&meta_key) - .send() - .await; - Ok(()) + async fn delete_blob(&self, key: &BlobKey) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let key = self.key_for_blob(&key.path); + let _ = self + .client + .delete_object() + .bucket(&self.bucket) + .key(&key) + .send() + .await; + Ok(()) + } + .await; + out.map_err(Into::into) } - async fn set_latest_commit( - &self, - user_id: Uuid, - meta: Option<&CommitMeta>, - ) -> anyhow::Result<()> { - let latest_key = self.key_for_latest(user_id); - match meta { - Some(meta) => { - let stored = StoredCommitMeta::from_meta(meta); - let data = serde_json::to_vec_pretty(&stored)?; - let _guard = self.latest_lock.lock().await; - self.put_object(&latest_key, &data).await - } - None => { - let _guard = self.latest_lock.lock().await; - let _ = self - .client - .delete_object() - .bucket(&self.bucket) - .key(&latest_key) - .send() - .await; - Ok(()) + async fn delete_pack(&self, user_id: Uuid, commit_id: &[u8]) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let commit_hex = encode_commit_id(commit_id); + let pack_key = self.key_for_pack(user_id, &commit_hex); + let meta_key = self.key_for_meta(user_id, &commit_hex); + let _ = self + .client + .delete_object() + .bucket(&self.bucket) + .key(&pack_key) + .send() + .await; + let _ = self + .client + .delete_object() + .bucket(&self.bucket) + .key(&meta_key) + .send() + .await; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn set_latest_commit(&self, user_id: Uuid, meta: Option<&CommitMeta>) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let latest_key = self.key_for_latest(user_id); + match meta { + Some(meta) => { + let stored = StoredCommitMeta::from_meta(meta); + let data = serde_json::to_vec_pretty(&stored)?; + let _guard = self.latest_lock.lock().await; + self.put_object(&latest_key, &data).await + } + None => { + let _guard = self.latest_lock.lock().await; + let _ = self + .client + .delete_object() + .bucket(&self.bucket) + .key(&latest_key) + .send() + .await; + Ok(()) + } } } - } - - async fn delete_all(&self, user_id: Uuid) -> anyhow::Result<()> { - let pack_prefix = format!("{}/git/packs/{}/", self.root_prefix, user_id); - self.delete_prefix(&pack_prefix).await?; - let blob_prefix = format!("{}/git/blobs/{}/", self.root_prefix, user_id); - self.delete_prefix(&blob_prefix).await?; - self.set_latest_commit(user_id, None).await + .await; + out.map_err(Into::into) + } + + async fn delete_all(&self, user_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let pack_prefix = format!("{}/git/packs/{}/", self.root_prefix, user_id); + self.delete_prefix(&pack_prefix).await?; + let blob_prefix = format!("{}/git/blobs/{}/", self.root_prefix, user_id); + self.delete_prefix(&blob_prefix).await?; + self.set_latest_commit(user_id, None).await?; + Ok(()) + } + .await; + out.map_err(Into::into) } } diff --git a/api/crates/infrastructure/src/git/workspace/helpers.rs b/api/crates/infrastructure/src/git/workspace/helpers.rs new file mode 100644 index 00000000..c798eb80 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/helpers.rs @@ -0,0 +1,17 @@ +mod conflicts; +mod db; +mod errors; +mod front_matter; +mod pack; +mod remote; +mod snapshots; +mod tree; + +pub(super) use conflicts::*; +pub(super) use db::*; +pub(super) use errors::*; +pub(super) use front_matter::*; +pub(super) use pack::*; +pub(super) use remote::*; +pub(super) use snapshots::*; +pub(super) use tree::*; diff --git a/api/crates/infrastructure/src/git/workspace/helpers/conflicts.rs b/api/crates/infrastructure/src/git/workspace/helpers/conflicts.rs new file mode 100644 index 00000000..41438e63 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/helpers/conflicts.rs @@ -0,0 +1,103 @@ +use super::super::*; + +pub(in super::super) fn collect_conflicts( + repo: &Repository, + index: &git2::Index, +) -> anyhow::Result> { + let mut out = Vec::new(); + let conflicts = index.conflicts()?; + for conflict in conflicts { + let conflict = conflict?; + let path = conflict + .our + .as_ref() + .or(conflict.their.as_ref()) + .or(conflict.ancestor.as_ref()) + .and_then(|e| std::str::from_utf8(&e.path).ok()) + .unwrap_or("") + .to_string(); + + let to_bytes = |entry: Option<&git2::IndexEntry>| -> anyhow::Result>> { + if let Some(e) = entry { + let blob = repo.find_blob(e.id)?; + Ok(Some(blob.content().to_vec())) + } else { + Ok(None) + } + }; + + let ours_bytes = to_bytes(conflict.our.as_ref())?; + let theirs_bytes = to_bytes(conflict.their.as_ref())?; + let base_bytes = to_bytes(conflict.ancestor.as_ref())?; + + let (mut ours, ours_bin) = as_text_or_binary(path.as_str(), ours_bytes.as_ref()); + let (mut theirs, theirs_bin) = as_text_or_binary(path.as_str(), theirs_bytes.as_ref()); + let (mut base, base_bin) = as_text_or_binary(path.as_str(), base_bytes.as_ref()); + let is_binary = ours_bin || theirs_bin || base_bin; + if !is_binary { + ours = super::strip_front_matter_body(path.as_str(), ours); + theirs = super::strip_front_matter_body(path.as_str(), theirs); + base = super::strip_front_matter_body(path.as_str(), base); + } + + out.push(GitPullConflictItemDto { + path, + is_binary, + ours, + theirs, + base, + document_id: None, + }); + } + Ok(out) +} + +pub(in super::super) fn index_entry_path(entry: &git2::IndexEntry) -> anyhow::Result { + let raw = &entry.path; + if raw.is_empty() { + anyhow::bail!("empty index entry path"); + } + if let Ok(cstr) = std::ffi::CStr::from_bytes_with_nul(raw) { + Ok(cstr + .to_str() + .unwrap_or_default() + .trim_end_matches('\0') + .to_string()) + } else { + Ok(String::from_utf8_lossy(raw) + .trim_end_matches('\0') + .to_string()) + } +} + +pub(in super::super) fn index_entry_stage(entry: &git2::IndexEntry) -> i32 { + ((entry.flags as u32 >> 12) & 0b11) as i32 +} + +pub(in super::super) fn as_text_or_binary( + path: &str, + data: Option<&Vec>, +) -> (Option, bool) { + let Some(bytes) = data else { + return (None, false); + }; + match std::str::from_utf8(bytes) { + Ok(s) => (Some(s.to_string()), false), + Err(_) => { + let lower = path.to_ascii_lowercase(); + let looks_text = lower.ends_with(".md") + || lower.ends_with(".markdown") + || lower.ends_with(".txt") + || lower.ends_with(".json") + || lower.ends_with(".yaml") + || lower.ends_with(".yml") + || lower.ends_with(".toml") + || lower.ends_with(".ini"); + if looks_text { + let lossy = String::from_utf8_lossy(bytes).to_string(); + return (Some(lossy), false); + } + (None, true) + } + } +} diff --git a/api/crates/infrastructure/src/git/workspace/helpers/db.rs b/api/crates/infrastructure/src/git/workspace/helpers/db.rs new file mode 100644 index 00000000..a78057de --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/helpers/db.rs @@ -0,0 +1,25 @@ +use super::super::*; + +pub(in super::super) fn row_to_commit_meta( + row: sqlx::postgres::PgRow, +) -> anyhow::Result { + let commit_id: Vec = row.get("commit_id"); + let parent_commit_id: Option> = row.try_get("parent_commit_id").ok(); + let message: Option = row.try_get("message").ok(); + let author_name: Option = row.try_get("author_name").ok(); + let author_email: Option = row.try_get("author_email").ok(); + let committed_at: DateTime = row.get("committed_at"); + let pack_key: String = row.get("pack_key"); + let file_hash_index: Json> = row.get("file_hash_index"); + + Ok(CommitMeta { + commit_id, + parent_commit_id, + message, + author_name, + author_email, + committed_at, + pack_key, + file_hash_index: file_hash_index.0, + }) +} diff --git a/api/crates/infrastructure/src/git/workspace/helpers/errors.rs b/api/crates/infrastructure/src/git/workspace/helpers/errors.rs new file mode 100644 index 00000000..97222e47 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/helpers/errors.rs @@ -0,0 +1,18 @@ +pub(in super::super) fn missing_metadata_commit(err: &anyhow::Error) -> Option { + let needle = "metadata not found for commit "; + for cause in err.chain() { + let msg = cause.to_string(); + if let Some(idx) = msg.find(needle) { + let start = idx + needle.len(); + let rest = &msg[start..]; + let commit: String = rest + .chars() + .take_while(|ch| ch.is_ascii_hexdigit()) + .collect(); + if !commit.is_empty() { + return Some(commit); + } + } + } + None +} diff --git a/api/crates/infrastructure/src/git/workspace/helpers/front_matter.rs b/api/crates/infrastructure/src/git/workspace/helpers/front_matter.rs new file mode 100644 index 00000000..dd6fa94e --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/helpers/front_matter.rs @@ -0,0 +1,63 @@ +pub(in super::super) fn find_front_matter_end(s: &str) -> Option<(usize, usize)> { + let bytes = s.as_bytes(); + let mut idx = 0; + while idx < bytes.len() { + if bytes[idx] == b'\n' { + let after_newline = &s[idx + 1..]; + if after_newline.starts_with("---") { + let mut body_start = idx + 1 + 3; + let mut remainder = &s[body_start..]; + // Skip trailing newlines after the closing delimiter to mirror ingest. + while remainder.starts_with("\r\n") || remainder.starts_with('\n') { + if remainder.starts_with("\r\n") { + body_start += 2; + remainder = &s[body_start..]; + } else { + body_start += 1; + remainder = &s[body_start..]; + } + } + return Some((idx, body_start)); + } + } + idx += 1; + } + None +} + +pub(in super::super) fn split_front_matter(input: &str) -> Option<(&str, &str)> { + let after_open = input + .strip_prefix("---\r\n") + .or_else(|| input.strip_prefix("---\n"))?; + if let Some((front_len, body_start)) = find_front_matter_end(after_open) { + let front = &after_open[..front_len]; + let body = &after_open[body_start..]; + return Some((front, body)); + } + None +} + +pub(in super::super) fn strip_front_matter_body( + path: &str, + text: Option, +) -> Option { + let txt = text?; + let lower = path.to_ascii_lowercase(); + let is_markdown = lower.ends_with(".md") || lower.ends_with(".markdown"); + if !is_markdown { + return Some(txt); + } + if let Some((_, body)) = split_front_matter(txt.as_str()) { + return Some(body.to_string()); + } + Some(txt) +} + +pub(in super::super) fn extract_markdown_body(bytes: &[u8]) -> Option { + let text = std::str::from_utf8(bytes).ok()?; + let trimmed = text.trim_start_matches('\u{feff}'); + if let Some((_, body)) = split_front_matter(trimmed) { + return Some(body.to_string()); + } + Some(trimmed.to_string()) +} diff --git a/api/crates/infrastructure/src/git/workspace/helpers/pack.rs b/api/crates/infrastructure/src/git/workspace/helpers/pack.rs new file mode 100644 index 00000000..1f54ee5d --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/helpers/pack.rs @@ -0,0 +1,44 @@ +use super::super::*; + +pub(in super::super) fn apply_pack_to_repo(repo: &Repository, pack: &[u8]) -> anyhow::Result<()> { + let objects_dir = repo.path().join("objects").join("pack"); + fs::create_dir_all(&objects_dir)?; + let odb = repo.odb()?; + let mut indexer = Indexer::new(Some(&odb), objects_dir.as_path(), 0o644, true)?; + indexer.write_all(pack)?; + indexer.commit()?; + Ok(()) +} + +pub(in super::super) fn read_first_pack(repo_path: &Path) -> anyhow::Result>> { + let pack_dir = repo_path.join("objects").join("pack"); + if !pack_dir.exists() { + return Ok(None); + } + let mut entries: Vec<_> = std::fs::read_dir(&pack_dir)? + .filter_map(|e| e.ok()) + .filter(|e| { + e.path() + .extension() + .map(|ext| ext == "pack") + .unwrap_or(false) + }) + .collect(); + entries.sort_by_key(|e| e.file_name()); + if let Some(entry) = entries.first() { + let bytes = std::fs::read(entry.path())?; + return Ok(Some(bytes)); + } + Ok(None) +} + +pub(in super::super) fn apply_pack_files( + repo: &Repository, + pack_paths: &[PathBuf], +) -> anyhow::Result<()> { + for path in pack_paths { + let bytes = fs::read(path)?; + apply_pack_to_repo(repo, &bytes)?; + } + Ok(()) +} diff --git a/api/crates/infrastructure/src/git/workspace/helpers/remote.rs b/api/crates/infrastructure/src/git/workspace/helpers/remote.rs new file mode 100644 index 00000000..cdf1114b --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/helpers/remote.rs @@ -0,0 +1,165 @@ +use super::super::*; + +pub(in super::super) fn extract_host(url: &str) -> Option { + let s = url.trim(); + let s = s + .strip_prefix("https://") + .or_else(|| s.strip_prefix("http://")) + .unwrap_or(s); + let mut parts = s.split('/'); + let host_port = parts.next().unwrap_or(""); + let host = host_port.split(':').next().unwrap_or(""); + if host.is_empty() { + None + } else { + Some(host.to_string()) + } +} + +pub(in super::super) fn default_token_username_for(host: Option<&str>) -> &'static str { + match host { + Some(h) if h.contains("github") => "x-access-token", + Some(h) if h.contains("gitlab") => "oauth2", + Some(h) if h.contains("dev.azure.com") || h.contains("visualstudio.com") => "pat", + _ => "git", + } +} + +pub(in super::super) fn build_remote_callbacks(cfg: &UserGitCfg) -> RemoteCallbacks<'static> { + let auth_type = cfg.auth_type; + let auth_data = cfg.auth_data.clone(); + let host_hint = extract_host(&cfg.repository_url); + let mut callbacks = RemoteCallbacks::new(); + callbacks.credentials( + move |_url, username_from_url, _allowed| match auth_type { + Some(domain::git::auth::GitAuthType::Token) => { + if let Some(token) = auth_data + .as_ref() + .and_then(|v| v.get("token")) + .and_then(|v| v.as_str()) + { + let user = username_from_url + .unwrap_or(default_token_username_for(host_hint.as_deref())); + Cred::userpass_plaintext(user, token) + } else { + Cred::default() + } + } + Some(domain::git::auth::GitAuthType::Ssh) => { + if let Some(key) = auth_data + .as_ref() + .and_then(|v| v.get("private_key")) + .and_then(|v| v.as_str()) + { + let user = username_from_url.unwrap_or("git"); + let passphrase = auth_data + .as_ref() + .and_then(|v| v.get("passphrase")) + .and_then(|v| v.as_str()) + .filter(|s| !s.is_empty()); + let trimmed = key.trim(); + if trimmed.starts_with("v1:") { + return Err(GitError::from_str( + "failed to decrypt stored SSH key; check ENCRYPTION_KEY and re-save credentials", + )); + } + if trimmed.contains("BEGIN OPENSSH PRIVATE KEY") { + return Err(GitError::from_str( + "OpenSSH private key format is not supported; provide PEM (BEGIN RSA/EC PRIVATE KEY)", + )); + } + let needs_passphrase = trimmed.contains("ENCRYPTED"); + if needs_passphrase && passphrase.is_none() { + return Err(GitError::from_str( + "SSH private key is encrypted; passphrase is required", + )); + } + Cred::ssh_key_from_memory(user, None, trimmed, passphrase) + } else { + Cred::default() + } + } + None => Cred::default(), + }, + ); + callbacks.certificate_check(|_, _| Ok(CertificateCheckStatus::CertificateOk)); + callbacks +} + +pub(in super::super) fn prepare_remote<'repo>( + repo: &'repo Repository, + cfg: &UserGitCfg, +) -> anyhow::Result> { + let mut remote = match repo.find_remote("origin") { + Ok(remote) => remote, + Err(_) => repo.remote("origin", &cfg.repository_url)?, + }; + if remote.url() != Some(cfg.repository_url.as_str()) { + repo.remote_set_url("origin", &cfg.repository_url)?; + remote = repo.find_remote("origin")?; + } + Ok(remote) +} + +pub(in super::super) fn fetch_remote_head( + repo: &Repository, + cfg: &UserGitCfg, + branch: &str, +) -> anyhow::Result> { + let mut remote = prepare_remote(repo, cfg)?; + let callbacks = build_remote_callbacks(cfg); + let mut fetch_options = FetchOptions::new(); + fetch_options.remote_callbacks(callbacks); + let refspec = format!("refs/heads/{branch}:refs/remotes/origin/{branch}"); + remote + .fetch(&[&refspec], Some(&mut fetch_options), None) + .map_err(map_git_http_error)?; + let reference_name = format!("refs/remotes/origin/{branch}"); + match repo.find_reference(&reference_name) { + Ok(reference) => Ok(reference.target()), + Err(err) if err.code() == git2::ErrorCode::NotFound => Ok(None), + Err(err) => Err(err.into()), + } +} + +pub(in super::super) fn perform_push( + repo: &Repository, + cfg: &UserGitCfg, + branch: &str, + commit_oid: git2::Oid, + force: bool, +) -> anyhow::Result { + let ref_name = format!("refs/heads/{}", branch); + repo.reference(&ref_name, commit_oid, true, "update branch for sync")?; + + let mut remote = prepare_remote(repo, cfg)?; + let callbacks = build_remote_callbacks(cfg); + let mut push_options = PushOptions::new(); + push_options.remote_callbacks(callbacks); + let refspec = if force { + format!("+refs/heads/{0}:refs/heads/{0}", branch) + } else { + format!("refs/heads/{0}:refs/heads/{0}", branch) + }; + remote + .push(&[&refspec], Some(&mut push_options)) + .map_err(map_git_http_error)?; + Ok(true) +} + +pub(in super::super) fn map_git_http_error(err: git2::Error) -> anyhow::Error { + if err.class() == ErrorClass::Http { + let msg = err.to_string().to_lowercase(); + if msg.contains("status code: 401") + || msg.contains("status code: 407") + || msg.contains("redirect") + { + // Avoid leaking raw libgit2 error strings to the user; normalize to a short tag. + return anyhow!("git_http_auth_redirect"); + } + if msg.contains("status code: 403") || msg.contains("status code: 404") { + return anyhow!("git_http_not_found"); + } + } + err.into() +} diff --git a/api/crates/infrastructure/src/git/workspace/helpers/snapshots.rs b/api/crates/infrastructure/src/git/workspace/helpers/snapshots.rs new file mode 100644 index 00000000..ef731816 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/helpers/snapshots.rs @@ -0,0 +1,172 @@ +use super::super::*; + +use super::tree::{DirEntry, DirNode, write_dir}; + +#[allow(dead_code)] +pub(in super::super) fn read_commit_files( + repo: &Repository, + commit_id: &[u8], +) -> anyhow::Result>> { + let oid = git2::Oid::from_bytes(commit_id)?; + let commit = repo.find_commit(oid)?; + let tree = commit.tree()?; + let mut files = HashMap::new(); + tree.walk(TreeWalkMode::PreOrder, |root, entry| { + if entry.kind() == Some(ObjectType::Blob) { + if let Some(name) = entry.name() { + if let Ok(blob) = repo.find_blob(entry.id()) { + let key = format!("{}{}", root, name); + files.insert(key, blob.content().to_vec()); + } + } + } + TreeWalkResult::Ok + })?; + Ok(files) +} + +pub(in super::super) enum FileSnapshotData { + Inline(Vec), + StoragePath(String), +} + +pub(in super::super) struct FileSnapshot { + pub(in super::super) hash: String, + pub(in super::super) data: FileSnapshotData, + pub(in super::super) is_text: bool, +} + +pub(in super::super) struct FileDeltaSummary { + pub(in super::super) added: Vec, + pub(in super::super) modified: Vec, + pub(in super::super) deleted: Vec, +} + +pub(in super::super) struct DirtyRow { + pub(in super::super) path: String, + pub(in super::super) is_text: bool, + pub(in super::super) op: String, + pub(in super::super) content_hash: Option, +} + +pub(in super::super) struct DirtyUpsert { + pub(in super::super) is_text: bool, + pub(in super::super) content_hash: Option, +} + +pub(in super::super) fn repo_relative_path(path: &str) -> anyhow::Result { + let trimmed = path.trim_start_matches('/'); + let mut parts = trimmed.splitn(2, '/'); + let leading = parts.next().unwrap_or(""); + if let Some(rest) = parts.next() { + Ok(rest.replace('\\', "/")) + } else if !leading.is_empty() { + Ok(leading.replace('\\', "/")) + } else { + Err(anyhow!("invalid storage path for repository: {path}")) + } +} + +pub(in super::super) fn normalize_repo_path(path: String) -> String { + let trimmed = path.trim_start_matches('/'); + if trimmed.is_empty() { + String::new() + } else { + trimmed + .replace('\\', "/") + .trim_start_matches("./") + .trim_start_matches('/') + .to_string() + } +} + +pub(in super::super) fn blob_key(workspace_id: Uuid, commit_id: &[u8], path: &str) -> BlobKey { + let encoded_path = urlencoding::encode(path); + let commit_hex = encode_commit_id(commit_id); + BlobKey { + path: format!("{}/{}/{}", workspace_id, commit_hex, encoded_path), + } +} + +pub(in super::super) enum FileSource { + Bytes(Vec), + Oid(git2::Oid), +} + +pub(in super::super) fn insert_source_into_dir( + dir: &mut DirNode, + parts: &[&str], + source: &FileSource, +) -> anyhow::Result<()> { + use std::collections::btree_map::Entry; + if parts.is_empty() { + return Ok(()); + } + if parts.len() == 1 { + match source { + FileSource::Bytes(data) => { + dir.entries + .insert(parts[0].to_string(), DirEntry::File(data.clone())); + } + FileSource::Oid(oid) => { + dir.entries + .insert(parts[0].to_string(), DirEntry::Oid(*oid)); + } + } + Ok(()) + } else { + match dir.entries.entry(parts[0].to_string()) { + Entry::Occupied(mut occ) => match occ.get_mut() { + DirEntry::Dir(child) => insert_source_into_dir(child, &parts[1..], source), + DirEntry::File(_) | DirEntry::Oid(_) => { + let mut new_dir = DirNode::default(); + insert_source_into_dir(&mut new_dir, &parts[1..], source)?; + *occ.get_mut() = DirEntry::Dir(Box::new(new_dir)); + Ok(()) + } + }, + Entry::Vacant(vac) => { + let mut new_dir = DirNode::default(); + insert_source_into_dir(&mut new_dir, &parts[1..], source)?; + vac.insert(DirEntry::Dir(Box::new(new_dir))); + Ok(()) + } + } + } +} + +pub(in super::super) fn read_commit_blob_oids( + repo: &Repository, + commit_id: &[u8], +) -> anyhow::Result> { + let oid = git2::Oid::from_bytes(commit_id)?; + let commit = repo.find_commit(oid)?; + let tree = commit.tree()?; + let mut blobs = HashMap::new(); + tree.walk(TreeWalkMode::PreOrder, |root, entry| { + if entry.kind() == Some(ObjectType::Blob) { + if let Some(name) = entry.name() { + let key = format!("{}{}", root, name); + blobs.insert(key, entry.id()); + } + } + TreeWalkResult::Ok + })?; + Ok(blobs) +} + +pub(in super::super) fn build_tree_from_sources( + repo: &Repository, + entries: &BTreeMap, +) -> anyhow::Result { + // We'll reconstruct a DirNode and then write it, but we need to preserve existing blob OIDs for FileSource::Oid. + let mut root = DirNode::default(); + for (path, src) in entries.iter() { + let parts: Vec<&str> = path.split('/').filter(|s| !s.is_empty()).collect(); + if parts.is_empty() { + continue; + } + insert_source_into_dir(&mut root, &parts, src)?; + } + write_dir(repo, &root) +} diff --git a/api/crates/infrastructure/src/git/workspace/helpers/tree.rs b/api/crates/infrastructure/src/git/workspace/helpers/tree.rs new file mode 100644 index 00000000..e2f8d48e --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/helpers/tree.rs @@ -0,0 +1,98 @@ +use super::super::*; + +pub(in super::super) fn build_tree_from_entries( + repo: &Repository, + entries: &BTreeMap>, +) -> anyhow::Result { + let mut root = DirNode::default(); + for (path, data) in entries.iter() { + let parts: Vec<&str> = path.split('/').filter(|s| !s.is_empty()).collect(); + if parts.is_empty() { + continue; + } + insert_into_dir(&mut root, &parts, data.clone()); + } + write_dir(repo, &root) +} + +pub(in super::super) fn signature_from_parts( + name: &str, + email: &str, + at: DateTime, +) -> anyhow::Result> { + let git_time = Time::new(at.timestamp(), 0); + Signature::new(name, email, &git_time).map_err(anyhow::Error::from) +} + +pub(in super::super) fn git_time_to_datetime(time: Time) -> anyhow::Result> { + DateTime::::from_timestamp(time.seconds(), 0) + .ok_or_else(|| anyhow!("invalid git timestamp")) +} + +#[derive(Default)] +pub(in super::super) struct DirNode { + pub(in super::super) entries: BTreeMap, +} + +pub(in super::super) enum DirEntry { + File(Vec), + Oid(git2::Oid), + Dir(Box), +} + +pub(in super::super) fn insert_into_dir(dir: &mut DirNode, parts: &[&str], data: Vec) { + use std::collections::btree_map::Entry; + + if parts.is_empty() { + return; + } + + if parts.len() == 1 { + dir.entries + .insert(parts[0].to_string(), DirEntry::File(data)); + return; + } + + match dir.entries.entry(parts[0].to_string()) { + Entry::Occupied(mut occ) => { + let next = occ.get_mut(); + match next { + DirEntry::Dir(child) => insert_into_dir(child, &parts[1..], data), + DirEntry::File(_) | DirEntry::Oid(_) => { + let mut new_dir = DirNode::default(); + insert_into_dir(&mut new_dir, &parts[1..], data); + *next = DirEntry::Dir(Box::new(new_dir)); + } + } + } + Entry::Vacant(vac) => { + if parts.len() == 1 { + vac.insert(DirEntry::File(data)); + } else { + let mut new_dir = DirNode::default(); + insert_into_dir(&mut new_dir, &parts[1..], data); + vac.insert(DirEntry::Dir(Box::new(new_dir))); + } + } + } +} + +pub(in super::super) fn write_dir(repo: &Repository, dir: &DirNode) -> anyhow::Result { + let mut builder = repo.treebuilder(None)?; + for (name, entry) in dir.entries.iter() { + match entry { + DirEntry::File(content) => { + let oid = repo.blob(content)?; + builder.insert(name, oid, FileMode::Blob.into())?; + } + DirEntry::Oid(oid) => { + builder.insert(name, *oid, FileMode::Blob.into())?; + } + DirEntry::Dir(child) => { + let oid = write_dir(repo, child)?; + builder.insert(name, oid, FileMode::Tree.into())?; + } + } + } + Ok(builder.write()?) +} diff --git a/api/crates/infrastructure/src/git/workspace/import.rs b/api/crates/infrastructure/src/git/workspace/import.rs new file mode 100644 index 00000000..b7312e62 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/import.rs @@ -0,0 +1,68 @@ +impl GitWorkspaceService { + async fn import_repository_inner( + &self, + workspace_id: Uuid, + actor_id: Uuid, + cfg: &UserGitCfg, + ) -> anyhow::Result { + // Suppress dirty tracking globally during import so filesystem watcher/ingest won't re-mark files. + let _global_dirty_guard = crate::core::storage::suppress_git_dirty_global(); + let branch = if cfg.branch_name.is_empty() { + "main".to_string() + } else { + cfg.branch_name.clone() + }; + self.ensure_repository(workspace_id, &branch).await?; + + let previous_index = self + .latest_commit_meta(workspace_id) + .await? + .map(|m| m.file_hash_index) + .unwrap_or_default(); + + // Populate storage and DB with remote history; surface errors so we don't proceed with missing packs. + self.bootstrap_remote_history(workspace_id, cfg, branch.as_str()) + .await?; + let latest = self.ensure_latest_meta(workspace_id).await?; + let Some(latest_meta) = latest else { + return Ok(GitImportOutcome { + files_changed: 0, + commit_hash: None, + docs_created: 0, + attachments_created: 0, + message: "remote has no commits".to_string(), + }); + }; + + let state = self + .state_from_commit_meta(workspace_id, &latest_meta) + .await?; + let files_changed = crate::core::storage::suppress_git_dirty(async { + self.apply_state_to_workspace(workspace_id, &state, &previous_index) + .await + }) + .await?; + + // Materialize documents and attachments from imported state; surface failures so Import can fail loudly. + let (docs_created, attachments_created) = + crate::core::storage::suppress_git_dirty(async { + self.materialize_documents_from_state(workspace_id, actor_id, &state) + .await + }) + .await?; + + self.apply_merged_to_documents(workspace_id, &state).await?; + self.clear_dirty(workspace_id).await.map_err(|err| { + error!(workspace_id = %workspace_id, error = %err, "git_import_clear_dirty_failed"); + err + })?; + + Ok(GitImportOutcome { + files_changed, + docs_created, + attachments_created, + commit_hash: Some(encode_commit_id(&latest_meta.commit_id)), + message: "import completed".to_string(), + }) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/mod.rs b/api/crates/infrastructure/src/git/workspace/mod.rs new file mode 100644 index 00000000..3a7a3d87 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/mod.rs @@ -0,0 +1,62 @@ +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::fs; +use std::io::{self, ErrorKind, Write}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use anyhow::{Context, anyhow}; +use async_trait::async_trait; +use chrono::{DateTime, Utc}; +use futures_util::StreamExt; +use git2::{ + CertificateCheckStatus, Commit, Cred, Error as GitError, ErrorClass, FetchOptions, FileMode, + Indexer, ObjectType, PushOptions, RemoteCallbacks, Repository, Signature, Sort, Time, + TreeWalkMode, TreeWalkResult, +}; +use sqlx::{Row, types::Json}; +use tempfile::{Builder as TempDirBuilder, TempDir}; +use tracing::{error, info, warn}; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::dtos::TextDiffResult; +use application::core::ports::errors::PortResult; +use application::core::ports::storage::storage_port::StorageResolverPort; +use application::core::services::diff::text_diff::compute_text_diff; +use application::core::services::utils::hash::sha256_hex; +use application::documents::ports::document_path_repository::DocumentPathRepository; +use application::documents::ports::document_repository::DocumentRepository; +use application::documents::ports::realtime::realtime_port::RealtimeEngine; +use application::documents::services::realtime::snapshot::{ + SnapshotService, snapshot_from_markdown, +}; +use application::git::dtos::{ + GitChangeItem, GitCommitInfo, GitImportOutcome, GitPullConflictItemDto, GitPullRequestDto, + GitPullResultDto, GitRemoteCheckDto, GitSyncOutcome, GitSyncRequestDto, GitWorkspaceStatus, +}; +use application::git::ports::git_repository::UserGitCfg; +use application::git::ports::git_storage::{ + BlobKey, CommitMeta, GitStorage, decode_commit_id, encode_commit_id, +}; +use application::git::ports::git_workspace::GitWorkspacePort; +use tokio::fs as async_fs; + +mod helpers; +use helpers::*; + +pub struct GitWorkspaceService { + pool: PgPool, + git_storage: Arc, + storage: Arc, + snapshot: Arc, + realtime: Arc, + docs: Arc, + doc_paths: Arc, +} + +include!("workspace_service.rs"); +include!("sync.rs"); +include!("import.rs"); +include!("remote.rs"); +include!("port.rs"); +include!("pull.rs"); diff --git a/api/crates/infrastructure/src/git/workspace/port.rs b/api/crates/infrastructure/src/git/workspace/port.rs new file mode 100644 index 00000000..e9695eec --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/port.rs @@ -0,0 +1,408 @@ +#[async_trait] +impl GitWorkspacePort for GitWorkspaceService { + + async fn ensure_repository( + &self, + workspace_id: Uuid, + default_branch: &str, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#"INSERT INTO git_repository_state (workspace_id, initialized, default_branch, initialized_at, updated_at) + VALUES ($1, true, $2, now(), now()) + ON CONFLICT (workspace_id) DO UPDATE SET + initialized = true, + default_branch = EXCLUDED.default_branch, + initialized_at = COALESCE(git_repository_state.initialized_at, EXCLUDED.initialized_at), + updated_at = now()"#, + ) + .bind(workspace_id) + .bind(default_branch) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn remove_repository(&self, workspace_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let mut tx = self.pool.begin().await?; + sqlx::query("DELETE FROM git_dirty_files WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&mut *tx) + .await?; + sqlx::query("DELETE FROM git_commits WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&mut *tx) + .await?; + sqlx::query( + "UPDATE git_repository_state SET initialized = false, updated_at = now() WHERE workspace_id = $1", + ) + .bind(workspace_id) + .execute(&mut *tx) + .await?; + tx.commit().await?; + self.git_storage.delete_all(workspace_id).await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn status(&self, workspace_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let state = self.load_repository_state(workspace_id).await?; + let Some((initialized, branch)) = state else { + return Ok(GitWorkspaceStatus { + repository_initialized: false, + current_branch: None, + uncommitted_changes: 0, + untracked_files: 0, + }); + }; + if !initialized { + return Ok(GitWorkspaceStatus { + repository_initialized: false, + current_branch: Some(branch), + uncommitted_changes: 0, + untracked_files: 0, + }); + } + // Dirty-driven status: avoid full workspace scan + let latest = self.latest_commit_meta(workspace_id).await?; + let previous_index: HashMap = latest + .as_ref() + .map(|c| c.file_hash_index.clone()) + .unwrap_or_default(); + + let dirty = self.fetch_dirty(workspace_id).await?; + let mut added: u32 = 0; + let mut modified: u32 = 0; + let mut deleted: u32 = 0; + + for d in dirty.iter() { + match d.op.as_str() { + "upsert" => { + if let Some(prev_hash) = previous_index.get(&d.path) { + // Existing file: if hash unchanged and hash known, ignore; else modified + match d.content_hash.as_ref() { + Some(h) if h == prev_hash => {} + _ => modified += 1, + } + } else { + // New file + added += 1; + } + } + "delete" => { + // Treat as deleted (even if not present in previous index) + deleted += 1; + } + _ => {} + } + } + + Ok(GitWorkspaceStatus { + repository_initialized: true, + current_branch: Some(branch), + uncommitted_changes: modified + deleted, + untracked_files: added, + }) + } + .await; + out.map_err(Into::into) + } + + async fn list_changes(&self, workspace_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + // If repository isn't initialized, nothing to report + if let Some((initialized, _branch)) = self.load_repository_state(workspace_id).await? { + if !initialized { + return Ok(Vec::new()); + } + } else { + return Ok(Vec::new()); + } + + // Use dirty set to derive changes without scanning storage + let latest = self.latest_commit_meta(workspace_id).await?; + let previous_index: HashMap = latest + .as_ref() + .map(|c| c.file_hash_index.clone()) + .unwrap_or_default(); + let dirty = self.fetch_dirty(workspace_id).await?; + + let mut change_map: BTreeMap = BTreeMap::new(); + for d in dirty.iter() { + match d.op.as_str() { + "upsert" => { + if let Some(prev_hash) = previous_index.get(&d.path) { + // If hash unchanged and we know the new hash, skip reporting + match d.content_hash.as_ref() { + Some(h) if h == prev_hash => { + change_map.remove(&d.path); + } + _ => { + change_map.insert(d.path.clone(), "modified".to_string()); + } + } + } else { + change_map.insert(d.path.clone(), "untracked".to_string()); + } + } + "delete" => { + change_map.insert(d.path.clone(), "deleted".to_string()); + } + _ => {} + } + } + + let changes = change_map + .into_iter() + .map(|(path, status)| GitChangeItem { path, status }) + .collect(); + Ok(changes) + } + .await; + out.map_err(Into::into) + } + + async fn working_diff(&self, workspace_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let latest = self.latest_commit_meta(workspace_id).await?; + let previous_index = latest + .as_ref() + .map(|c| c.file_hash_index.clone()) + .unwrap_or_default(); + let current = self.collect_current_state(workspace_id).await?; + let delta = self.compute_deltas(¤t, &previous_index); + let mut results = Vec::new(); + + let latest_commit_id = latest.as_ref().map(|c| c.commit_id.clone()); + + for path in delta.added.iter().chain(delta.modified.iter()) { + if let Some(snapshot) = current.get(path) { + if snapshot.is_text { + let new_bytes = self.snapshot_bytes(snapshot).await?; + let new_content = String::from_utf8_lossy(&new_bytes).to_string(); + let old_bytes = match (&latest_commit_id, previous_index.get(path)) { + (Some(commit_id), Some(_)) => { + self.load_file_snapshot(workspace_id, commit_id.as_slice(), path) + .await? + } + _ => None, + }; + let old_text = old_bytes.and_then(|b| String::from_utf8(b).ok()); + results.push(self.build_diff_result( + path, + old_text.as_deref(), + Some(&new_content), + )); + } else { + results.push(TextDiffResult { + file_path: path.clone(), + diff_lines: Vec::new(), + old_content: None, + new_content: None, + }); + } + } + } + + for path in delta.deleted { + let old_bytes = if let (Some(commit_id), Some(_)) = + (&latest_commit_id, previous_index.get(&path)) + { + self.load_file_snapshot(workspace_id, commit_id.as_slice(), &path) + .await? + } else { + None + }; + let old_text = old_bytes.and_then(|b| String::from_utf8(b).ok()); + results.push(self.build_diff_result(&path, old_text.as_deref(), None)); + } + + Ok(results) + } + .await; + out.map_err(Into::into) + } + + async fn commit_diff( + &self, + workspace_id: Uuid, + from: &str, + to: &str, + ) -> PortResult> { + let out: anyhow::Result> = async { + let from_meta = self.load_commit_meta_ref(workspace_id, from).await?; + let to_meta = self.load_commit_meta_ref(workspace_id, to).await?; + + if let Some(to_meta_ref) = to_meta.as_ref() { + match self + .commit_diff_via_packs(workspace_id, from_meta.as_ref(), to_meta_ref) + .await + { + Ok(results) => return Ok(results), + Err(err) => { + warn!( + %err, + from = from_meta + .as_ref() + .map(|m| encode_commit_id(&m.commit_id)) + .unwrap_or_else(|| "(root)".to_string()), + to = encode_commit_id(&to_meta_ref.commit_id), + "failed to compute commit diff from pack data, using stored snapshots" + ); + } + } + } + + self.commit_diff_from_storage(workspace_id, from_meta.as_ref(), to_meta.as_ref()) + .await + } + .await; + out.map_err(Into::into) + } + + async fn history(&self, workspace_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT commit_id, message, author_name, author_email, committed_at + FROM git_commits + WHERE workspace_id = $1 + ORDER BY committed_at DESC + LIMIT 200"#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + + let history = rows + .into_iter() + .map(|row| { + let commit_id: Vec = row.get("commit_id"); + let message: Option = row.try_get("message").ok(); + let author_name: Option = row.try_get("author_name").ok(); + let author_email: Option = row.try_get("author_email").ok(); + let committed_at: DateTime = row.get("committed_at"); + GitCommitInfo { + hash: encode_commit_id(&commit_id), + message: message.unwrap_or_default(), + author_name: author_name.unwrap_or_default(), + author_email: author_email.unwrap_or_default(), + time: committed_at, + } + }) + .collect(); + Ok(history) + } + .await; + out.map_err(Into::into) + } + + async fn sync( + &self, + workspace_id: Uuid, + req: &GitSyncRequestDto, + cfg: Option<&UserGitCfg>, + ) -> PortResult { + self.sync_inner(workspace_id, req, cfg) + .await + .map_err(Into::into) + } + + async fn import_repository( + &self, + workspace_id: Uuid, + actor_id: Uuid, + cfg: &UserGitCfg, + ) -> PortResult { + self.import_repository_inner(workspace_id, actor_id, cfg) + .await + .map_err(Into::into) + } + + async fn pull( + &self, + workspace_id: Uuid, + actor_id: Uuid, + req: &GitPullRequestDto, + cfg: &UserGitCfg, + ) -> PortResult { + self.pull_with_recovery(workspace_id, actor_id, req, cfg) + .await + .map_err(Into::into) + } + + async fn head_commit(&self, workspace_id: Uuid) -> PortResult>> { + let out: anyhow::Result>> = async { + Ok(self + .latest_commit_meta(workspace_id) + .await? + .map(|m| m.commit_id)) + } + .await; + out.map_err(Into::into) + } + + async fn remote_head( + &self, + workspace_id: Uuid, + cfg: &UserGitCfg, + ) -> PortResult>> { + self.remote_head_inner(workspace_id, cfg) + .await + .map_err(Into::into) + } + + async fn has_pending_changes(&self, workspace_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let dirty_rows = self.fetch_dirty(workspace_id).await?; + Ok(!dirty_rows.is_empty()) + } + .await; + out.map_err(Into::into) + } + + async fn drift_since_commit( + &self, + workspace_id: Uuid, + base_commit: &[u8], + ) -> PortResult { + let out: anyhow::Result = async { + let Some(meta) = self.commit_meta_by_id(workspace_id, base_commit).await? else { + return Ok(true); + }; + let base_index = meta.file_hash_index; + let current_state = self.collect_current_state(workspace_id).await?; + if base_index.len() != current_state.len() { + return Ok(true); + } + for (path, snapshot) in current_state.into_iter() { + let Some(base_hash) = base_index.get(&path) else { + return Ok(true); + }; + if base_hash != &snapshot.hash { + return Ok(true); + } + } + Ok(false) + } + .await; + out.map_err(Into::into) + } + + async fn check_remote( + &self, + workspace_id: Uuid, + cfg: &UserGitCfg, + ) -> PortResult { + self.check_remote_inner(workspace_id, cfg) + .await + .map_err(Into::into) + } + +} diff --git a/api/crates/infrastructure/src/git/workspace/pull.rs b/api/crates/infrastructure/src/git/workspace/pull.rs new file mode 100644 index 00000000..f4668a38 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull.rs @@ -0,0 +1,5 @@ +include!("pull/conflicts.rs"); +include!("pull/entry.rs"); +include!("pull/pull_once.rs"); +include!("pull/packs.rs"); +include!("pull/repair.rs"); diff --git a/api/crates/infrastructure/src/git/workspace/pull/conflicts.rs b/api/crates/infrastructure/src/git/workspace/pull/conflicts.rs new file mode 100644 index 00000000..40616462 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/conflicts.rs @@ -0,0 +1,46 @@ +impl GitWorkspaceService { + async fn build_conflict_item( + &self, + workspace_id: Uuid, + path: &str, + current_state: &HashMap, + remote_state: &HashMap, + local_meta: Option<&CommitMeta>, + ) -> anyhow::Result { + let ours_bytes = if let Some(snap) = current_state.get(path) { + Some(self.snapshot_bytes(snap).await?) + } else { + None + }; + let theirs_bytes = if let Some(snap) = remote_state.get(path) { + Some(self.snapshot_bytes(snap).await?) + } else { + Some(Vec::new()) + }; + let base_bytes = if let Some(meta) = local_meta.as_ref() { + self.load_file_snapshot(workspace_id, meta.commit_id.as_slice(), path) + .await? + } else { + None + }; + + let (mut ours, ours_bin) = as_text_or_binary(path, ours_bytes.as_ref()); + let (mut theirs, theirs_bin) = as_text_or_binary(path, theirs_bytes.as_ref()); + let (mut base, base_bin) = as_text_or_binary(path, base_bytes.as_ref()); + let is_binary = ours_bin || theirs_bin || base_bin; + if !is_binary { + ours = strip_front_matter_body(path, ours); + theirs = strip_front_matter_body(path, theirs); + base = strip_front_matter_body(path, base); + } + + Ok(GitPullConflictItemDto { + path: path.to_string(), + is_binary, + ours, + theirs, + base, + document_id: None, + }) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/pull/entry.rs b/api/crates/infrastructure/src/git/workspace/pull/entry.rs new file mode 100644 index 00000000..dbcfa620 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/entry.rs @@ -0,0 +1,37 @@ +impl GitWorkspaceService { + async fn pull_with_recovery( + &self, + workspace_id: Uuid, + actor_id: Uuid, + req: &GitPullRequestDto, + cfg: &UserGitCfg, + ) -> anyhow::Result { + let mut recover_attempts: u8 = 0; + let mut skip_local_pack_restore = false; + loop { + match self + .pull_once(workspace_id, actor_id, req, cfg, skip_local_pack_restore) + .await + { + Ok(dto) => return Ok(dto), + Err(err) => { + if Self::is_missing_objects(&err) { + if recover_attempts < 2 { + recover_attempts += 1; + skip_local_pack_restore = true; + warn!( + workspace_id = %workspace_id, + attempt = %recover_attempts, + error = %err, + "git_pull_missing_objects_recovering" + ); + self.recover_missing_objects(workspace_id, cfg).await?; + continue; + } + } + return Err(err); + } + } + } + } +} diff --git a/api/crates/infrastructure/src/git/workspace/pull/packs.rs b/api/crates/infrastructure/src/git/workspace/pull/packs.rs new file mode 100644 index 00000000..b8dac4de --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/packs.rs @@ -0,0 +1,205 @@ +impl GitWorkspaceService { + async fn persist_pack_chain( + &self, + workspace_id: Uuid, + until: Option<&[u8]>, + ) -> anyhow::Result)>> { + // Attempt to rebuild pack chain from stored snapshots if packs are missing or corrupted. + async fn rebuild_from_snapshots( + svc: &GitWorkspaceService, + workspace_id: Uuid, + until: Option<&[u8]>, + ) -> anyhow::Result)>> { + // Collect commit metas from oldest to newest + let mut chain: Vec = Vec::new(); + let mut cursor = match until { + Some(id) => svc.commit_meta_by_id(workspace_id, id).await?, + None => svc.latest_commit_meta(workspace_id).await?, + }; + while let Some(meta) = cursor { + chain.push(meta.clone()); + if let Some(parent) = meta.parent_commit_id.as_ref() { + cursor = svc.commit_meta_by_id(workspace_id, parent).await?; + } else { + break; + } + } + if chain.is_empty() { + return Ok(None); + } + chain.reverse(); + + type PreparedEntry = (String, Vec); + type PreparedCommit = (CommitMeta, Vec); + + // Preload snapshots async + let mut prepared: Vec = Vec::new(); + for meta in chain.iter() { + let mut entries: Vec<(String, Vec)> = Vec::new(); + for path in meta.file_hash_index.keys() { + let Some(bytes) = svc + .load_file_snapshot(workspace_id, meta.commit_id.as_slice(), path) + .await? + else { + anyhow::bail!( + "missing snapshot blob for {} at commit {}", + path, + encode_commit_id(&meta.commit_id) + ); + }; + entries.push((path.clone(), bytes)); + } + prepared.push((meta.clone(), entries)); + } + + // Build packs synchronously to avoid Send issues with git2 types + let (temp_dir, pack_paths) = tokio::task::block_in_place(|| -> anyhow::Result<_> { + let temp_dir = tempfile::tempdir()?; + let repo = Repository::init_bare(temp_dir.path())?; + let mut built_commits: HashMap, git2::Oid> = HashMap::new(); + let mut pack_paths: Vec = Vec::new(); + + for (meta, entries) in prepared.into_iter() { + let mut builder = repo.treebuilder(None)?; + for (path, bytes) in entries.iter() { + let blob_oid = repo.blob(bytes)?; + builder.insert(path, blob_oid, FileMode::Blob.into())?; + } + let tree_oid = builder.write()?; + let tree = repo.find_tree(tree_oid)?; + + let sig = signature_from_parts( + meta.author_name.as_deref().unwrap_or("RefMD"), + meta.author_email.as_deref().unwrap_or("refmd@example.com"), + meta.committed_at, + )?; + let mut parents = Vec::new(); + if let Some(parent) = meta.parent_commit_id.as_ref() { + if let Some(existing) = built_commits.get(parent) { + parents.push(repo.find_commit(*existing)?); + } + } + let parent_refs: Vec<&Commit> = parents.iter().collect(); + let commit_oid = repo.commit( + None, + &sig, + &sig, + meta.message + .as_deref() + .unwrap_or("Recovered commit from snapshots"), + &tree, + &parent_refs, + )?; + if commit_oid.as_bytes() != meta.commit_id.as_slice() { + anyhow::bail!( + "reconstructed commit id mismatch for {}", + encode_commit_id(&meta.commit_id) + ); + } + built_commits.insert(meta.commit_id.clone(), commit_oid); + + let mut pack_builder = repo.packbuilder()?; + pack_builder.insert_commit(commit_oid)?; + for p in parents.iter() { + pack_builder.insert_commit(p.id())?; + } + let mut pack_buf = git2::Buf::new(); + pack_builder.write_buf(&mut pack_buf)?; + let pack_bytes = pack_buf.to_vec(); + + let pack_path = temp_dir + .path() + .join(format!("{:08}.pack", pack_paths.len())); + std::fs::write(&pack_path, &pack_bytes)?; + pack_paths.push(pack_path); + } + + Ok((temp_dir, pack_paths)) + })?; + + // Persist rebuilt packs and metas back to storage + for (idx, meta) in chain.iter().enumerate() { + let pack_bytes = std::fs::read(&pack_paths[idx])?; + svc.git_storage + .store_pack(workspace_id, &pack_bytes, meta) + .await?; + svc.upsert_commit_record(workspace_id, meta).await?; + let _ = svc + .git_storage + .set_latest_commit(workspace_id, Some(meta)) + .await; + } + + Ok(Some((temp_dir, pack_paths))) + } + + let mut attempts = 0; + loop { + match self.git_storage.load_pack_chain(workspace_id, until).await { + Ok(mut stream) => { + let temp_dir = tempfile::tempdir()?; + let mut pack_paths = Vec::new(); + let mut index: usize = 0; + while let Some(pack) = stream.next().await { + let pack = pack?; + let path = temp_dir.path().join(format!("{:08}.pack", index)); + tokio::fs::write(&path, &pack.bytes).await?; + pack_paths.push(path); + index += 1; + } + if pack_paths.is_empty() { + return Ok(None); + } else { + return Ok(Some((temp_dir, pack_paths))); + } + } + Err(err) => { + let err_str = err.to_string(); + let is_missing_objects = err_str.to_lowercase().contains("missing") + && err_str.to_lowercase().contains("object"); + if let Some(rebuilt) = rebuild_from_snapshots(self, workspace_id, until).await? + { + return Ok(Some(rebuilt)); + } + if attempts == 0 { + if let Some(commit_hex) = missing_metadata_commit(&err) { + match self + .repair_missing_commit_metadata(workspace_id, &commit_hex) + .await + { + Ok(_) => { + attempts += 1; + continue; + } + Err(repair_err) => { + warn!( + workspace_id = %workspace_id, + commit = %commit_hex, + error = ?repair_err, + "git_commit_metadata_repair_failed" + ); + } + } + } + // If pack is missing objects, fall back by resetting git storage pointer and DB history. + if is_missing_objects { + warn!( + workspace_id = %workspace_id, + error = %err, + "git_pack_missing_objects_detected_resetting_history" + ); + // Drop storage latest pointer and DB commits for this workspace. + let _ = self.git_storage.set_latest_commit(workspace_id, None).await; + let _ = sqlx::query("DELETE FROM git_commits WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&self.pool) + .await; + return Ok(None); + } + } + return Err(err.into()); + } + } + } + } +} diff --git a/api/crates/infrastructure/src/git/workspace/pull/pull_once.rs b/api/crates/infrastructure/src/git/workspace/pull/pull_once.rs new file mode 100644 index 00000000..982a6aaf --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/pull_once.rs @@ -0,0 +1,7 @@ +include!("pull_once/main.rs"); +include!("pull_once/merge.rs"); +include!("pull_once/fallback_conflicts.rs"); +include!("pull_once/state.rs"); +include!("pull_once/pack.rs"); +include!("pull_once/persist.rs"); +include!("pull_once/response.rs"); diff --git a/api/crates/infrastructure/src/git/workspace/pull/pull_once/fallback_conflicts.rs b/api/crates/infrastructure/src/git/workspace/pull/pull_once/fallback_conflicts.rs new file mode 100644 index 00000000..a3a6a9be --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/pull_once/fallback_conflicts.rs @@ -0,0 +1,40 @@ +impl GitWorkspaceService { +async fn pull_build_fallback_diff_conflicts( + &self, + workspace_id: Uuid, + local_oid: Option, + remote_oid: git2::Oid, + current_state: &HashMap, + remote_state: &HashMap, + local_meta: Option<&CommitMeta>, +) -> anyhow::Result> { + let local_oid_val = local_oid.unwrap_or(remote_oid); + if remote_oid == local_oid_val { + return Ok(Vec::new()); + } + + let mut all_paths: HashSet = HashSet::new(); + for p in remote_state.keys() { + all_paths.insert(p.clone()); + } + for p in current_state.keys() { + all_paths.insert(p.clone()); + } + + let mut remote_conflicts: Vec = Vec::new(); + for path in all_paths { + let remote_hash = remote_state.get(&path).map(|s| &s.hash); + let local_hash = current_state.get(&path).map(|s| &s.hash); + if remote_hash == local_hash { + continue; + } + + let item = self + .build_conflict_item(workspace_id, &path, current_state, remote_state, local_meta) + .await?; + remote_conflicts.push(item); + } + + Ok(remote_conflicts) +} +} diff --git a/api/crates/infrastructure/src/git/workspace/pull/pull_once/main.rs b/api/crates/infrastructure/src/git/workspace/pull/pull_once/main.rs new file mode 100644 index 00000000..4306e326 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/pull_once/main.rs @@ -0,0 +1,302 @@ +impl GitWorkspaceService { +async fn pull_once( + &self, + workspace_id: Uuid, + actor_id: Uuid, + req: &GitPullRequestDto, + cfg: &UserGitCfg, + skip_local_pack_restore: bool, +) -> anyhow::Result { + let state = self.load_repository_state(workspace_id).await?; + let Some((initialized, branch_default)) = state else { + anyhow::bail!("repository not initialized"); + }; + if !initialized { + anyhow::bail!("repository not initialized"); + } + if cfg.repository_url.is_empty() { + anyhow::bail!("remote not configured"); + } + + let branch = if cfg.branch_name.is_empty() { + branch_default + } else { + cfg.branch_name.clone() + }; + + // Capture current workspace head before touching remote history. + let mut local_meta = self.latest_commit_meta(workspace_id).await?; + // After a recovery we want to treat pull as a fresh fast-forward from remote. + if skip_local_pack_restore { + local_meta = None; + } + let mut local_history_reset = false; + let mut base_index: HashMap = local_meta + .as_ref() + .map(|m| m.file_hash_index.clone()) + .unwrap_or_default(); + let mut previous_index = base_index.clone(); + let mut base_commit = local_meta.as_ref().map(|m| m.commit_id.clone()); + + let temp_dir = TempDirBuilder::new() + .prefix("git-pull-") + .tempdir() + .map_err(|e| anyhow::anyhow!(e))?; + let repo = Repository::init_bare(temp_dir.path())?; + if !skip_local_pack_restore { + match self + .persist_pack_chain( + workspace_id, + local_meta.as_ref().map(|m| m.commit_id.as_slice()), + ) + .await? + { + Some((_, pack_paths)) => { + apply_pack_files(&repo, &pack_paths)?; + } + None => { + warn!( + workspace_id = %workspace_id, + "git_pull_pack_restore_missing_resetting_base" + ); + // Storage/DB history was reset; treat as fresh pull with no local history. + local_meta = None; + local_history_reset = true; + base_index.clear(); + previous_index.clear(); + base_commit = None; + } + } + } else { + info!(workspace_id = %workspace_id, "git_pull_skip_local_pack_restore"); + } + + let remote_oid = { + let Some(head) = fetch_remote_head(&repo, cfg, &branch)? else { + return Ok(GitPullResultDto { + success: false, + message: format!("branch '{branch}' not found on remote"), + files_changed: 0, + commit_hash: None, + conflicts: None, + base_commit: base_commit.clone(), + remote_commit: None, + }); + }; + head + }; + let remote_commit = Some(remote_oid.as_bytes().to_vec()); + + let mut local_oid = if local_history_reset { + None + } else { + local_meta + .as_ref() + .and_then(|m| git2::Oid::from_bytes(&m.commit_id).ok()) + }; + // If workspace has no local commit recorded (fresh pull), fall back to latest known meta after bootstrap. + if local_oid.is_none() && !skip_local_pack_restore && !local_history_reset { + if let Some(meta) = self.latest_commit_meta(workspace_id).await? { + base_index = meta.file_hash_index.clone(); + previous_index = base_index.clone(); + base_commit = Some(meta.commit_id.clone()); + local_oid = git2::Oid::from_bytes(&meta.commit_id).ok(); + local_meta = Some(meta); + } + } + // Detect drift between latest commit and current workspace using the same dirty set as Git Changes/Status. + let dirty_rows = self.fetch_dirty(workspace_id).await?; + let current_state = self.collect_current_state(workspace_id).await?; + info!(workspace_id = %workspace_id, dirty_count = dirty_rows.len(), skip_local_pack_restore = skip_local_pack_restore, "git_pull_dirty_state"); + + #[derive(Clone, Copy, PartialEq, Eq)] + enum CommitRelation { + NoLocal, + Same, + LocalAhead, + RemoteAhead, + Diverged, + } + + let commit_relation = if let Some(local_oid_val) = local_oid { + if local_oid_val == remote_oid { + CommitRelation::Same + } else if repo.graph_descendant_of(local_oid_val, remote_oid)? { + CommitRelation::LocalAhead + } else if repo.graph_descendant_of(remote_oid, local_oid_val)? { + CommitRelation::RemoteAhead + } else { + CommitRelation::Diverged + } + } else { + CommitRelation::NoLocal + }; + + // Nothing to do when remote is identical to or behind the local head. + if matches!(commit_relation, CommitRelation::Same | CommitRelation::LocalAhead) { + let commit_hash = local_oid + .as_ref() + .map(|oid| encode_commit_id(oid.as_bytes())); + return Ok(GitPullResultDto { + success: true, + message: "no remote changes".to_string(), + files_changed: 0, + commit_hash, + conflicts: None, + base_commit: base_commit.clone(), + remote_commit: remote_commit.clone(), + }); + } + + let remote_state = Self::pull_collect_state_from_commit(&repo, remote_oid)?; + let remote_changed_paths_vec = Self::pull_remote_changed_paths(&base_index, &remote_state); + let mut remote_conflicts = self + .pull_build_conflicts_for_paths( + workspace_id, + &remote_changed_paths_vec, + ¤t_state, + &remote_state, + local_meta.as_ref(), + ) + .await?; + + // First-time pull with no local history and no dirty changes: allow fast-forward without forcing conflicts. + if local_meta.is_none() && dirty_rows.is_empty() { + remote_conflicts.clear(); + } + + // If commits differ but no conflict paths were detected above, fallback to diff of current vs remote trees. + if remote_conflicts.is_empty() { + remote_conflicts = self + .pull_build_fallback_diff_conflicts( + workspace_id, + local_oid, + remote_oid, + ¤t_state, + &remote_state, + local_meta.as_ref(), + ) + .await?; + } + let remote_changes = !remote_conflicts.is_empty(); + let remote_ahead_clean = matches!(commit_relation, CommitRelation::RemoteAhead) && dirty_rows.is_empty(); + let fast_forward_remote = matches!(commit_relation, CommitRelation::NoLocal) || remote_ahead_clean; + + // Detect overlap between remote-changed paths and dirty rows to avoid false conflicts. + let dirty_remote_overlap = Self::pull_dirty_remote_overlap(&dirty_rows, &remote_changed_paths_vec); + + info!( + workspace_id = %workspace_id, + dirty_count = dirty_rows.len(), + remote_conflict_count = remote_conflicts.len(), + remote_changes = remote_changes, + resolutions_count = req.resolutions.len(), + dirty_remote_overlap = dirty_remote_overlap, + "git_pull_debug_state" + ); + + // If workspace has dirty changes overlapping remote changes, require explicit resolutions. + if remote_changes && dirty_remote_overlap && req.resolutions.is_empty() { + let conflicts = if remote_conflicts.is_empty() { + vec![GitPullConflictItemDto { + path: "".to_string(), + is_binary: false, + ours: None, + theirs: None, + base: None, + document_id: None, + }] + } else { + remote_conflicts.clone() + }; + return Ok(Self::pull_conflicts_detected_response( + base_commit.clone(), + remote_commit.clone(), + conflicts, + )); + } + + // Ensure remote head commit metadata/pack exists locally for merge parent and future syncs. + let mut remote_pack: Option<(CommitMeta, Vec)> = None; + if self + .commit_meta_by_id(workspace_id, remote_oid.as_bytes()) + .await? + .is_none() + { + let remote_index: HashMap = remote_state + .iter() + .map(|(path, snap)| (path.clone(), snap.hash.clone())) + .collect(); + let (remote_meta, remote_pack_bytes) = + Self::pull_build_commit_meta_and_pack(&repo, workspace_id, remote_oid, remote_index)?; + remote_pack = Some((remote_meta, remote_pack_bytes)); + } + + // Fast-forward when there is no local history or the workspace head cleanly trails remote. + // For fresh workspaces with dirty changes, surface conflicts instead of overwriting. + if fast_forward_remote { + if matches!(commit_relation, CommitRelation::NoLocal) + && (!dirty_rows.is_empty() || !remote_conflicts.is_empty()) + { + return Ok(Self::pull_conflicts_detected_response( + base_commit.clone(), + remote_commit.clone(), + remote_conflicts.clone(), + )); + } + // Ensure we have pack data for the remote head regardless of existing metadata. + let (remote_meta, remote_pack_bytes) = if let Some((meta, pack)) = remote_pack.take() { + (meta, pack) + } else { + let remote_index: HashMap = remote_state + .iter() + .map(|(p, snap)| (p.clone(), snap.hash.clone())) + .collect(); + Self::pull_build_commit_meta_and_pack(&repo, workspace_id, remote_oid, remote_index)? + }; + return self + .pull_fast_forward_to_remote( + workspace_id, + actor_id, + base_commit.clone(), + &previous_index, + &remote_state, + &remote_meta, + Some(remote_pack_bytes.as_slice()), + ) + .await; + } + + // Diverged: merge local into remote (linear, parent = remote) + let Some(local_oid_val) = local_oid else { + anyhow::bail!("no local commit to merge"); + }; + + let (meta, pack_bytes, merged_snapshots, commit_hex) = match self.pull_build_diverged_merge_commit( + workspace_id, + &repo, + local_oid_val, + remote_oid, + req, + &base_commit, + &remote_commit, + )? { + Ok(out) => out, + Err(dto) => return Ok(dto), + }; + + self.pull_persist_merged_commit( + workspace_id, + actor_id, + &previous_index, + base_commit, + remote_commit, + remote_pack.take(), + meta, + pack_bytes, + merged_snapshots, + commit_hex, + ) + .await +} +} diff --git a/api/crates/infrastructure/src/git/workspace/pull/pull_once/merge.rs b/api/crates/infrastructure/src/git/workspace/pull/pull_once/merge.rs new file mode 100644 index 00000000..9b8d62f6 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/pull_once/merge.rs @@ -0,0 +1,206 @@ +type PullMergeOk = (CommitMeta, Vec, HashMap, String); +type PullMergeResult = Result; +type ConflictEntry = (String, Option>, Option>, Option>); + +impl GitWorkspaceService { + fn pull_build_diverged_merge_commit( + &self, + workspace_id: Uuid, + repo: &Repository, + local_oid_val: git2::Oid, + remote_oid: git2::Oid, + req: &GitPullRequestDto, + base_commit: &Option>, + remote_commit: &Option>, + ) -> anyhow::Result { + // Build a synthetic "ours" commit from the current workspace state anchored to the local head + // so dirty edits participate in the merge against remote changes. + let synthetic_ours = self.build_synthetic_commit(workspace_id, repo, local_oid_val)?; + let ours_commit = repo.find_commit(synthetic_ours)?; + let remote_commit_obj = repo.find_commit(remote_oid)?; + let index = repo.merge_commits(&ours_commit, &remote_commit_obj, None)?; + + let conflict_items = collect_conflicts(repo, &index)?; + if !conflict_items.is_empty() && req.resolutions.is_empty() { + return Ok(Err(Self::pull_conflicts_detected_response( + base_commit.clone(), + remote_commit.clone(), + conflict_items, + ))); + } + + // Collect conflict entries for resolution application. + let mut conflict_entries: Vec = Vec::new(); + { + let conflicts_iter = index.conflicts()?; + for conflict in conflicts_iter { + let conflict = conflict?; + let path = conflict + .our + .as_ref() + .or(conflict.their.as_ref()) + .or(conflict.ancestor.as_ref()) + .and_then(|e| std::str::from_utf8(&e.path).ok()) + .ok_or_else(|| anyhow!("missing conflict path"))? + .to_string(); + + let to_bytes = |entry: Option<&git2::IndexEntry>| -> anyhow::Result>> { + if let Some(e) = entry { + let blob = repo.find_blob(e.id)?; + Ok(Some(blob.content().to_vec())) + } else { + Ok(None) + } + }; + + conflict_entries.push(( + path, + to_bytes(conflict.our.as_ref())?, + to_bytes(conflict.their.as_ref())?, + to_bytes(conflict.ancestor.as_ref())?, + )); + } + } + + let resolution_map: std::collections::HashMap< + String, + &application::git::dtos::GitPullResolutionDto, + > = req.resolutions.iter().map(|r| (r.path.clone(), r)).collect(); + + // Build merged state from resolved index (stage 0) plus user resolutions. + let mut merged_snapshots: HashMap = HashMap::new(); + for entry in index.iter() { + if index_entry_stage(&entry) != 0 { + continue; + } + let path = index_entry_path(&entry)?; + let blob = repo.find_blob(entry.id)?; + let bytes = blob.content().to_vec(); + let hash = sha256_hex(&bytes); + let is_text = std::str::from_utf8(&bytes).is_ok(); + merged_snapshots.insert( + path, + FileSnapshot { + hash, + data: FileSnapshotData::Inline(bytes), + is_text, + }, + ); + } + + let mut unresolved: Vec = Vec::new(); + + for (path, ours_bytes, theirs_bytes, base_bytes) in conflict_entries { + let resolution = resolution_map.get(&path); + if resolution.is_none() { + let (mut ours_txt, ours_bin) = as_text_or_binary(path.as_str(), ours_bytes.as_ref()); + let (mut theirs_txt, theirs_bin) = as_text_or_binary(path.as_str(), theirs_bytes.as_ref()); + let (mut base_txt, base_bin) = as_text_or_binary(path.as_str(), base_bytes.as_ref()); + let is_binary = ours_bin || theirs_bin || base_bin; + if !is_binary { + ours_txt = strip_front_matter_body(path.as_str(), ours_txt); + theirs_txt = strip_front_matter_body(path.as_str(), theirs_txt); + base_txt = strip_front_matter_body(path.as_str(), base_txt); + } + unresolved.push(GitPullConflictItemDto { + path: path.clone(), + is_binary, + ours: ours_txt, + theirs: theirs_txt, + base: base_txt, + document_id: None, + }); + continue; + } + + let res = *resolution.unwrap(); + let selected_bytes = match res.choice.as_str() { + "ours" => ours_bytes.clone(), + "theirs" => theirs_bytes.clone(), + "base" => base_bytes.clone(), + "custom_text" => { + let content = res + .content + .as_ref() + .ok_or_else(|| anyhow!("custom_text content required"))?; + Some(content.as_bytes().to_vec()) + } + other => anyhow::bail!("unsupported resolution choice {other}"), + } + .unwrap_or_default(); + let hash = sha256_hex(&selected_bytes); + let is_text = std::str::from_utf8(&selected_bytes).is_ok(); + merged_snapshots.insert( + path.clone(), + FileSnapshot { + hash, + data: FileSnapshotData::Inline(selected_bytes), + is_text, + }, + ); + } + + if !unresolved.is_empty() { + return Ok(Err(Self::pull_conflicts_detected_response( + base_commit.clone(), + remote_commit.clone(), + unresolved, + ))); + } + + // Build tree from merged snapshots without async work. + let mut entry_map: BTreeMap> = BTreeMap::new(); + for (path, snap) in merged_snapshots.iter() { + let bytes = match &snap.data { + FileSnapshotData::Inline(b) => b.clone(), + FileSnapshotData::StoragePath(_) => { + anyhow::bail!("unexpected storage-backed snapshot during pull merge") + } + }; + entry_map.insert(path.clone(), bytes); + } + let tree_oid = build_tree_from_entries(repo, &entry_map)?; + let tree = repo.find_tree(tree_oid)?; + let sig = signature_from_parts("RefMD", "refmd@example.com", chrono::Utc::now())?; + let base_parent = repo.find_commit(local_oid_val)?; + let remote_parent = repo.find_commit(remote_oid)?; + let parent_refs: [&git2::Commit; 2] = [&base_parent, &remote_parent]; + let commit_oid = repo.commit( + None, + &sig, + &sig, + "Merge remote changes", + &tree, + &parent_refs, + )?; + + let mut file_hash_index: HashMap = HashMap::new(); + for (path, snap) in merged_snapshots.iter() { + file_hash_index.insert(path.clone(), snap.hash.clone()); + } + + let mut pack_builder = repo.packbuilder()?; + pack_builder.insert_commit(commit_oid)?; + // Include both parents to avoid missing bases when applying packs later. + pack_builder.insert_commit(base_parent.id())?; + pack_builder.insert_commit(remote_parent.id())?; + let mut pack_buf = git2::Buf::new(); + pack_builder.write_buf(&mut pack_buf)?; + let pack_bytes = pack_buf.to_vec(); + + let commit_hex = encode_commit_id(commit_oid.as_bytes()); + let meta = CommitMeta { + commit_id: commit_oid.as_bytes().to_vec(), + // Keep workspace history linear: parent is previous workspace head. + parent_commit_id: base_commit.clone(), + message: Some("Merge remote changes".to_string()), + author_name: Some("RefMD".to_string()), + author_email: Some("refmd@example.com".to_string()), + committed_at: chrono::Utc::now(), + pack_key: format!("git/packs/{}/{}.pack", workspace_id, commit_hex), + file_hash_index, + }; + + Ok(Ok((meta, pack_bytes, merged_snapshots, commit_hex))) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/pull/pull_once/pack.rs b/api/crates/infrastructure/src/git/workspace/pull/pull_once/pack.rs new file mode 100644 index 00000000..da629605 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/pull_once/pack.rs @@ -0,0 +1,48 @@ +impl GitWorkspaceService { +fn pull_build_commit_meta_and_pack( + repo: &Repository, + workspace_id: Uuid, + oid: git2::Oid, + file_hash_index: HashMap, +) -> anyhow::Result<(CommitMeta, Vec)> { + let commit = repo.find_commit(oid)?; + let committed_at = git_time_to_datetime(commit.time())?; + let message = commit + .message() + .map(|m| m.trim_end_matches('\n').to_string()) + .filter(|m| !m.trim().is_empty()); + let author = commit.author(); + let author_name = author.name().map(|s| s.to_string()); + let author_email = author.email().map(|s| s.to_string()); + let parent_commit_id = if commit.parent_count() > 0 { + Some(commit.parent_id(0)?.as_bytes().to_vec()) + } else { + None + }; + + let mut pack_builder = repo.packbuilder()?; + pack_builder.insert_commit(oid)?; + if let Some(parent_id) = parent_commit_id.as_ref() { + if let Ok(parent_oid) = git2::Oid::from_bytes(parent_id) { + let _ = pack_builder.insert_commit(parent_oid); + } + } + let mut pack_buf = git2::Buf::new(); + pack_builder.write_buf(&mut pack_buf)?; + let pack_bytes = pack_buf.to_vec(); + + let commit_hex = encode_commit_id(oid.as_bytes()); + let meta = CommitMeta { + commit_id: oid.as_bytes().to_vec(), + parent_commit_id, + message, + author_name, + author_email, + committed_at, + pack_key: format!("git/packs/{}/{}.pack", workspace_id, commit_hex), + file_hash_index, + }; + + Ok((meta, pack_bytes)) +} +} diff --git a/api/crates/infrastructure/src/git/workspace/pull/pull_once/persist.rs b/api/crates/infrastructure/src/git/workspace/pull/pull_once/persist.rs new file mode 100644 index 00000000..f853cc6d --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/pull_once/persist.rs @@ -0,0 +1,222 @@ +impl GitWorkspaceService { +async fn pull_fast_forward_to_remote( + &self, + workspace_id: Uuid, + actor_id: Uuid, + base_commit: Option>, + previous_index: &HashMap, + remote_state: &HashMap, + remote_meta: &CommitMeta, + remote_pack_bytes: Option<&[u8]>, +) -> anyhow::Result { + if let Some(pack_bytes) = remote_pack_bytes { + self.git_storage + .store_pack(workspace_id, pack_bytes, remote_meta) + .await?; + } + self.upsert_commit_record(workspace_id, remote_meta).await?; + + let snapshot_keys = self + .store_commit_snapshots(workspace_id, &remote_meta.commit_id, remote_state) + .await?; + + if let Err(err) = self + .git_storage + .set_latest_commit(workspace_id, Some(remote_meta)) + .await + { + for key in snapshot_keys.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + return Err(err.into()); + } + + let mut tx = self.pool.begin().await?; + let repo_row = sqlx::query("SELECT initialized FROM git_repository_state WHERE workspace_id = $1") + .bind(workspace_id) + .fetch_optional(&mut *tx) + .await?; + let Some(repo_row) = repo_row else { + tx.rollback().await.ok(); + anyhow::bail!("repository not initialized") + }; + let initialized: bool = repo_row.get("initialized"); + if !initialized { + tx.rollback().await.ok(); + anyhow::bail!("repository not initialized") + } + + sqlx::query( + r#"INSERT INTO git_commits ( + commit_id, + parent_commit_id, + workspace_id, + message, + author_name, + author_email, + committed_at, + pack_key, + file_hash_index + ) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9) + ON CONFLICT (commit_id, workspace_id) DO NOTHING"#, + ) + .bind(remote_meta.commit_id.clone()) + .bind(remote_meta.parent_commit_id.clone()) + .bind(workspace_id) + .bind(remote_meta.message.clone()) + .bind(remote_meta.author_name.clone()) + .bind(remote_meta.author_email.clone()) + .bind(remote_meta.committed_at) + .bind(remote_meta.pack_key.clone()) + .bind(Json(&remote_meta.file_hash_index)) + .execute(&mut *tx) + .await?; + + sqlx::query("UPDATE git_repository_state SET updated_at = now() WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&mut *tx) + .await?; + tx.commit().await?; + + let files_changed = self + .apply_state_to_workspace(workspace_id, remote_state, previous_index) + .await?; + + self.materialize_documents_from_state(workspace_id, actor_id, remote_state) + .await?; + self.apply_merged_to_documents(workspace_id, remote_state) + .await?; + self.clear_dirty(workspace_id).await.map_err(|err| { + error!( + workspace_id = %workspace_id, + error = %err, + "git_pull_clear_dirty_failed" + ); + err + })?; + + info!( + workspace_id = %workspace_id, + commit = %encode_commit_id(&remote_meta.commit_id), + "git_pull_fast_forward_remote" + ); + + Ok(GitPullResultDto { + success: true, + message: "fast-forwarded to remote".to_string(), + files_changed, + commit_hash: Some(encode_commit_id(&remote_meta.commit_id)), + conflicts: None, + base_commit, + remote_commit: Some(remote_meta.commit_id.clone()), + }) +} + +async fn pull_persist_merged_commit( + &self, + workspace_id: Uuid, + actor_id: Uuid, + previous_index: &HashMap, + base_commit: Option>, + remote_commit: Option>, + remote_pack: Option<(CommitMeta, Vec)>, + meta: CommitMeta, + pack_bytes: Vec, + merged_snapshots: HashMap, + commit_hex: String, +) -> anyhow::Result { + if let Some((remote_meta, remote_pack_bytes)) = remote_pack { + self.git_storage + .store_pack(workspace_id, &remote_pack_bytes, &remote_meta) + .await?; + self.upsert_commit_record(workspace_id, &remote_meta).await?; + } + + let snapshot_keys = self + .store_commit_snapshots(workspace_id, &meta.commit_id, &merged_snapshots) + .await?; + + if let Err(err) = self + .git_storage + .store_pack(workspace_id, &pack_bytes, &meta) + .await + { + for key in snapshot_keys.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + return Err(err.into()); + } + + if let Err(err) = self + .git_storage + .set_latest_commit(workspace_id, Some(&meta)) + .await + { + let _ = self.git_storage.delete_pack(workspace_id, &meta.commit_id).await; + for key in snapshot_keys.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + return Err(err.into()); + } + + let mut tx = self.pool.begin().await?; + sqlx::query( + r#"INSERT INTO git_commits ( + commit_id, + parent_commit_id, + workspace_id, + message, + author_name, + author_email, + committed_at, + pack_key, + file_hash_index + ) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9)"#, + ) + .bind(meta.commit_id.clone()) + .bind(meta.parent_commit_id.clone()) + .bind(workspace_id) + .bind(meta.message.clone()) + .bind(meta.author_name.clone()) + .bind(meta.author_email.clone()) + .bind(meta.committed_at) + .bind(meta.pack_key.clone()) + .bind(Json(&meta.file_hash_index)) + .execute(&mut *tx) + .await?; + + sqlx::query("UPDATE git_repository_state SET updated_at = now() WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&mut *tx) + .await?; + tx.commit().await?; + + let files_changed = self + .apply_state_to_workspace(workspace_id, &merged_snapshots, previous_index) + .await?; + + self.materialize_documents_from_state(workspace_id, actor_id, &merged_snapshots) + .await?; + self.apply_merged_to_documents(workspace_id, &merged_snapshots) + .await?; + + self.clear_dirty(workspace_id).await.map_err(|err| { + error!( + workspace_id = %workspace_id, + error = %err, + "git_pull_merge_clear_dirty_failed" + ); + err + })?; + + Ok(GitPullResultDto { + success: true, + message: "remote changes merged".to_string(), + files_changed, + commit_hash: Some(commit_hex), + conflicts: None, + base_commit, + remote_commit, + }) +} +} diff --git a/api/crates/infrastructure/src/git/workspace/pull/pull_once/response.rs b/api/crates/infrastructure/src/git/workspace/pull/pull_once/response.rs new file mode 100644 index 00000000..7ff2b71b --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/pull_once/response.rs @@ -0,0 +1,22 @@ +impl GitWorkspaceService { +fn pull_dirty_remote_overlap(dirty_rows: &[DirtyRow], remote_changed_paths: &[String]) -> bool { + let dirty_paths: HashSet = dirty_rows.iter().map(|r| r.path.clone()).collect(); + remote_changed_paths.iter().any(|p| dirty_paths.contains(p)) +} + +fn pull_conflicts_detected_response( + base_commit: Option>, + remote_commit: Option>, + conflicts: Vec, +) -> GitPullResultDto { + GitPullResultDto { + success: false, + message: "conflicts detected".to_string(), + files_changed: 0, + commit_hash: None, + conflicts: Some(conflicts), + base_commit, + remote_commit, + } +} +} diff --git a/api/crates/infrastructure/src/git/workspace/pull/pull_once/state.rs b/api/crates/infrastructure/src/git/workspace/pull/pull_once/state.rs new file mode 100644 index 00000000..b857208c --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/pull_once/state.rs @@ -0,0 +1,88 @@ +impl GitWorkspaceService { +fn pull_collect_state_from_commit( + repo: &Repository, + oid: git2::Oid, +) -> anyhow::Result> { + let commit = repo.find_commit(oid)?; + let tree = commit.tree()?; + let mut out: HashMap = HashMap::new(); + + fn walk( + repo: &Repository, + tree: &git2::Tree, + prefix: &str, + out: &mut HashMap, + ) -> anyhow::Result<()> { + for entry in tree.iter() { + let name = entry.name().unwrap_or_default(); + let path = if prefix.is_empty() { + name.to_string() + } else { + format!("{prefix}{name}") + }; + match entry.kind() { + Some(git2::ObjectType::Tree) => { + if let Some(sub) = entry.to_object(repo)?.as_tree() { + walk(repo, sub, &(path.clone() + "/"), out)?; + } + } + Some(git2::ObjectType::Blob) => { + let blob = repo.find_blob(entry.id())?; + let bytes = blob.content().to_vec(); + let hash = sha256_hex(&bytes); + let is_text = std::str::from_utf8(&bytes).is_ok(); + out.insert( + path, + FileSnapshot { + hash, + data: FileSnapshotData::Inline(bytes), + is_text, + }, + ); + } + _ => {} + } + } + Ok(()) + } + + walk(repo, &tree, "", &mut out)?; + Ok(out) +} + +fn pull_remote_changed_paths( + base_index: &HashMap, + remote_state: &HashMap, +) -> Vec { + let mut remote_changed_paths: HashSet = HashSet::new(); + for (path, snap) in remote_state.iter() { + if base_index.get(path) != Some(&snap.hash) { + remote_changed_paths.insert(path.clone()); + } + } + for path in base_index.keys() { + if !remote_state.contains_key(path) { + remote_changed_paths.insert(path.clone()); + } + } + remote_changed_paths.into_iter().collect() +} + +async fn pull_build_conflicts_for_paths( + &self, + workspace_id: Uuid, + paths: &[String], + current_state: &HashMap, + remote_state: &HashMap, + local_meta: Option<&CommitMeta>, +) -> anyhow::Result> { + let mut remote_conflicts: Vec = Vec::new(); + for path in paths.iter() { + let item = self + .build_conflict_item(workspace_id, path, current_state, remote_state, local_meta) + .await?; + remote_conflicts.push(item); + } + Ok(remote_conflicts) +} +} diff --git a/api/crates/infrastructure/src/git/workspace/pull/repair.rs b/api/crates/infrastructure/src/git/workspace/pull/repair.rs new file mode 100644 index 00000000..783bdbc1 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/pull/repair.rs @@ -0,0 +1,129 @@ +impl GitWorkspaceService { + async fn repair_missing_commit_metadata( + &self, + workspace_id: Uuid, + start_hex: &str, + ) -> anyhow::Result<()> { + let mut current_hex = start_hex.to_string(); + let mut visited = HashSet::new(); + loop { + if !visited.insert(current_hex.clone()) { + break; + } + let meta = + if let Some(meta) = self.commit_meta_by_hex(workspace_id, ¤t_hex).await? { + meta + } else if let Some(meta) = self + .reconstruct_commit_meta_from_pack(workspace_id, ¤t_hex) + .await? + { + meta + } else { + anyhow::bail!( + "commit {} not found in database or pack storage", + current_hex + ); + }; + self.git_storage + .restore_commit_meta(workspace_id, &meta) + .await?; + self.upsert_commit_record(workspace_id, &meta).await?; + if let Some(parent) = meta.parent_commit_id.as_ref() { + current_hex = encode_commit_id(parent); + } else { + break; + } + } + Ok(()) + } + + async fn upsert_commit_record( + &self, + workspace_id: Uuid, + meta: &CommitMeta, + ) -> anyhow::Result<()> { + sqlx::query( + r#"INSERT INTO git_commits ( + commit_id, + parent_commit_id, + workspace_id, + message, + author_name, + author_email, + committed_at, + pack_key, + file_hash_index + ) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9) + ON CONFLICT (workspace_id, commit_id) DO UPDATE SET + parent_commit_id = EXCLUDED.parent_commit_id, + message = EXCLUDED.message, + author_name = EXCLUDED.author_name, + author_email = EXCLUDED.author_email, + committed_at = EXCLUDED.committed_at, + pack_key = EXCLUDED.pack_key, + file_hash_index = EXCLUDED.file_hash_index"#, + ) + .bind(meta.commit_id.clone()) + .bind(meta.parent_commit_id.clone()) + .bind(workspace_id) + .bind(meta.message.clone()) + .bind(meta.author_name.clone()) + .bind(meta.author_email.clone()) + .bind(meta.committed_at) + .bind(meta.pack_key.clone()) + .bind(Json(&meta.file_hash_index)) + .execute(&self.pool) + .await?; + Ok(()) + } + + async fn reconstruct_commit_meta_from_pack( + &self, + workspace_id: Uuid, + commit_hex: &str, + ) -> anyhow::Result> { + let commit_id = decode_commit_id(commit_hex)?; + let Some(pack_bytes) = self + .git_storage + .fetch_pack_for_commit(workspace_id, &commit_id) + .await? + else { + return Ok(None); + }; + let temp_dir = tempfile::tempdir()?; + let repo = Repository::init_bare(temp_dir.path())?; + apply_pack_to_repo(&repo, &pack_bytes)?; + let oid = git2::Oid::from_bytes(&commit_id)?; + let commit = repo.find_commit(oid)?; + let committed_at = git_time_to_datetime(commit.time())?; + let message = commit + .message() + .map(|m| m.trim_end_matches('\n').to_string()) + .filter(|m| !m.trim().is_empty()); + let author = commit.author(); + let author_name = author.name().map(|s| s.to_string()); + let author_email = author.email().map(|s| s.to_string()); + let parent_commit_id = if commit.parent_count() > 0 { + let parent = commit.parent_id(0)?; + Some(parent.as_bytes().to_vec()) + } else { + None + }; + let files = read_commit_files(&repo, commit_id.as_slice())?; + let mut file_hash_index: HashMap = HashMap::new(); + for (path, bytes) in files.into_iter() { + file_hash_index.insert(path, sha256_hex(&bytes)); + } + let meta = CommitMeta { + commit_id, + parent_commit_id, + message, + author_name, + author_email, + committed_at, + pack_key: format!("git/packs/{}/{}.pack", workspace_id, commit_hex), + file_hash_index, + }; + Ok(Some(meta)) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/remote.rs b/api/crates/infrastructure/src/git/workspace/remote.rs new file mode 100644 index 00000000..2f29de97 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/remote.rs @@ -0,0 +1,88 @@ +impl GitWorkspaceService { + async fn remote_head_inner( + &self, + workspace_id: Uuid, + cfg: &UserGitCfg, + ) -> anyhow::Result>> { + let state = self.load_repository_state(workspace_id).await?; + let Some((initialized, branch_default)) = state else { + anyhow::bail!("repository not initialized"); + }; + if !initialized { + anyhow::bail!("repository not initialized"); + } + if cfg.repository_url.is_empty() { + anyhow::bail!("remote not configured"); + } + let branch = if cfg.branch_name.is_empty() { + branch_default + } else { + cfg.branch_name.clone() + }; + let temp_dir = TempDirBuilder::new() + .prefix("git-remote-head-") + .tempdir() + .map_err(|e| anyhow!(e))?; + let repo = Repository::init_bare(temp_dir.path())?; + let head = fetch_remote_head(&repo, cfg, &branch)?; + Ok(head.map(|oid| oid.as_bytes().to_vec())) + } + + async fn check_remote_inner( + &self, + workspace_id: Uuid, + cfg: &UserGitCfg, + ) -> anyhow::Result { + if cfg.repository_url.is_empty() { + return Ok(GitRemoteCheckDto { + ok: true, + message: "remote not configured".to_string(), + reason: Some("no_remote".to_string()), + }); + } + let branch = cfg.branch_name.clone(); + let temp_dir = TempDirBuilder::new() + .prefix("git-check-") + .tempdir() + .map_err(|e| anyhow!(e))?; + let repo = Repository::init_bare(temp_dir.path())?; + let result = match fetch_remote_head(&repo, cfg, &branch) { + Ok(Some(_)) => GitRemoteCheckDto { + ok: true, + message: "remote reachable".to_string(), + reason: None, + }, + Ok(None) => GitRemoteCheckDto { + ok: false, + message: format!("branch '{branch}' not found on remote"), + reason: Some("branch_missing".to_string()), + }, + Err(err) => { + let lower = err.to_string().to_lowercase(); + let (reason, msg) = if lower.contains("git_http_auth_redirect") { + ( + Some("auth_required".to_string()), + "remote requires authentication or SSO approval".to_string(), + ) + } else if lower.contains("git_http_not_found") || lower.contains("status code: 404") + { + ( + Some("repo_not_found".to_string()), + "repository URL or branch not found".to_string(), + ) + } else { + (None, err.to_string()) + }; + GitRemoteCheckDto { + ok: false, + message: msg, + reason, + } + } + }; + drop(repo); + let _ = temp_dir.close(); + info!(workspace_id = %workspace_id, ok = %result.ok, reason = ?result.reason, "git_remote_check_completed"); + Ok(result) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/service/history.rs b/api/crates/infrastructure/src/git/workspace/service/history.rs new file mode 100644 index 00000000..8818cd8c --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/service/history.rs @@ -0,0 +1,671 @@ +impl GitWorkspaceService { + pub fn new( + pool: PgPool, + git_storage: Arc, + storage: Arc, + snapshot: Arc, + realtime: Arc, + docs: Arc, + doc_paths: Arc, + ) -> anyhow::Result { + Ok(Self { + pool, + git_storage, + storage, + snapshot, + realtime, + docs, + doc_paths, + }) + } + + fn is_missing_objects(err: &anyhow::Error) -> bool { + let msg = err.to_string().to_lowercase(); + msg.contains("missing objects") || msg.contains("packfile is missing") + } + + async fn recover_missing_objects( + &self, + workspace_id: Uuid, + cfg: &UserGitCfg, + ) -> anyhow::Result<()> { + // Pick branch from cfg or fallback to repository state default. + let branch = if cfg.branch_name.is_empty() { + self.load_repository_state(workspace_id) + .await? + .map(|(_, default_branch)| default_branch) + .unwrap_or_else(|| "main".to_string()) + } else { + cfg.branch_name.clone() + }; + + let mut tx = self.pool.begin().await?; + sqlx::query("DELETE FROM git_dirty_files WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&mut *tx) + .await?; + sqlx::query("DELETE FROM git_commits WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&mut *tx) + .await?; + sqlx::query( + "UPDATE git_repository_state SET initialized = true, default_branch = $2, updated_at = now() WHERE workspace_id = $1", + ) + .bind(workspace_id) + .bind(&branch) + .execute(&mut *tx) + .await?; + tx.commit().await?; + + let _ = self.git_storage.delete_all(workspace_id).await; + let _ = self.git_storage.set_latest_commit(workspace_id, None).await; + + // Re-bootstrap remote history (best effort). + let _ = self + .bootstrap_remote_history(workspace_id, cfg, branch.as_str()) + .await; + Ok(()) + } + + async fn load_repository_state( + &self, + workspace_id: Uuid, + ) -> anyhow::Result> { + let row = sqlx::query( + "SELECT initialized, default_branch FROM git_repository_state WHERE workspace_id = $1", + ) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| (r.get("initialized"), r.get("default_branch")))) + } + + async fn latest_commit_meta(&self, workspace_id: Uuid) -> anyhow::Result> { + let row = sqlx::query( + r#"SELECT commit_id, parent_commit_id, message, author_name, author_email, + committed_at, pack_key, file_hash_index + FROM git_commits + WHERE workspace_id = $1 + ORDER BY committed_at DESC + LIMIT 1"#, + ) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + + row.map(row_to_commit_meta).transpose() + } + + async fn load_commit_meta_ref( + &self, + workspace_id: Uuid, + rev: &str, + ) -> anyhow::Result> { + if let Some(base) = rev.strip_suffix('^') { + let Some(meta) = self.commit_meta_by_hex(workspace_id, base).await? else { + return Ok(None); + }; + if let Some(parent_id) = meta.parent_commit_id.clone() { + return self + .commit_meta_by_id(workspace_id, parent_id.as_slice()) + .await; + } + return Ok(None); + } + self.commit_meta_by_hex(workspace_id, rev).await + } + + async fn commit_meta_by_id( + &self, + workspace_id: Uuid, + commit_id: &[u8], + ) -> anyhow::Result> { + let row = sqlx::query( + r#"SELECT commit_id, parent_commit_id, message, author_name, author_email, + committed_at, pack_key, file_hash_index + FROM git_commits + WHERE workspace_id = $1 AND commit_id = $2 + LIMIT 1"#, + ) + .bind(workspace_id) + .bind(commit_id) + .fetch_optional(&self.pool) + .await?; + row.map(row_to_commit_meta).transpose() + } + + async fn commit_meta_by_hex( + &self, + workspace_id: Uuid, + hex: &str, + ) -> anyhow::Result> { + let bytes = application::git::ports::git_storage::decode_commit_id(hex)?; + let row = sqlx::query( + r#"SELECT commit_id, parent_commit_id, message, author_name, author_email, + committed_at, pack_key, file_hash_index + FROM git_commits + WHERE workspace_id = $1 AND commit_id = $2 + LIMIT 1"#, + ) + .bind(workspace_id) + .bind(bytes) + .fetch_optional(&self.pool) + .await?; + row.map(row_to_commit_meta).transpose() + } + + async fn ensure_latest_meta(&self, workspace_id: Uuid) -> anyhow::Result> { + if let Some(meta) = self.latest_commit_meta(workspace_id).await? { + return Ok(Some(meta)); + } + let Some(storage_latest) = self.git_storage.latest_commit(workspace_id).await? else { + return Ok(None); + }; + info!(workspace_id = %workspace_id, commit = %encode_commit_id(&storage_latest.commit_id), "git_backfill_latest_from_storage"); + self.backfill_commits_from_storage(workspace_id, &storage_latest) + .await?; + Ok(Some(storage_latest)) + } + + async fn bootstrap_remote_history( + &self, + workspace_id: Uuid, + cfg: &UserGitCfg, + branch: &str, + ) -> anyhow::Result> { + let temp_dir = TempDirBuilder::new() + .prefix("git-bootstrap-") + .tempdir() + .map_err(|e| anyhow!(e))?; + let repo = Repository::init_bare(temp_dir.path())?; + + let Some(remote_head) = fetch_remote_head(&repo, cfg, branch)? else { + return Ok(None); + }; + + let ordered = { + let mut revwalk = repo.revwalk()?; + revwalk.push(remote_head)?; + revwalk.set_sorting(Sort::TOPOLOGICAL | Sort::REVERSE)?; + + let mut collected = Vec::new(); + for oid_result in revwalk { + collected.push(oid_result?); + } + collected + }; + + if ordered.is_empty() { + return Ok(None); + } + + let pack_bytes_master = read_first_pack(repo.path())?.ok_or_else(|| { + anyhow!( + "remote fetch produced no pack files for workspace {}", + workspace_id + ) + })?; + + let mut latest_meta = self.git_storage.latest_commit(workspace_id).await?; + + for oid in ordered { + let existing_meta = self.commit_meta_by_id(workspace_id, oid.as_bytes()).await?; + let existing_pack = self + .git_storage + .fetch_pack_for_commit(workspace_id, oid.as_bytes()) + .await?; + // Skip only when both DB row and pack already exist. + if existing_meta.is_some() && existing_pack.is_some() { + latest_meta = existing_meta; + continue; + } + + let (meta, snapshots, pack_bytes) = { + let commit = repo.find_commit(oid)?; + let committed_at = git_time_to_datetime(commit.time())?; + let message = commit + .message() + .map(|m| m.trim_end_matches('\n').to_string()) + .filter(|m| !m.trim().is_empty()); + let author = commit.author(); + let author_name = author.name().map(|s| s.to_string()); + let author_email = author.email().map(|s| s.to_string()); + let parent_commit_id = if commit.parent_count() > 0 { + let parent = commit.parent_id(0)?; + Some(parent.as_bytes().to_vec()) + } else { + None + }; + + let files = read_commit_files(&repo, oid.as_bytes())?; + let mut snapshots: HashMap = HashMap::new(); + let mut file_hash_index: HashMap = HashMap::new(); + for (path, bytes) in files.into_iter() { + let hash = sha256_hex(&bytes); + let is_text = std::str::from_utf8(&bytes).is_ok(); + file_hash_index.insert(path.clone(), hash.clone()); + snapshots.insert( + path, + FileSnapshot { + hash, + data: FileSnapshotData::Inline(bytes), + is_text, + }, + ); + } + + let pack_builder = repo.packbuilder()?; + // Use the full remote pack for every commit to avoid thin-pack corruption. + let pack_bytes = pack_bytes_master.clone(); + drop(pack_builder); + + let commit_id = oid.as_bytes().to_vec(); + let pack_key = format!( + "git/packs/{}/{}.pack", + workspace_id, + encode_commit_id(&commit_id) + ); + + let meta = CommitMeta { + commit_id, + parent_commit_id, + message, + author_name, + author_email, + committed_at, + pack_key, + file_hash_index, + }; + + (meta, snapshots, pack_bytes) + }; + + let prev_latest = latest_meta.clone(); + let snapshot_keys = match self + .store_commit_snapshots(workspace_id, &meta.commit_id, &snapshots) + .await + { + Ok(keys) => keys, + Err(err) => { + return Err(err); + } + }; + + if let Err(err) = self + .git_storage + .store_pack(workspace_id, &pack_bytes, &meta) + .await + { + for key in snapshot_keys.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + return Err(err.into()); + } + + if let Err(err) = self + .git_storage + .set_latest_commit(workspace_id, Some(&meta)) + .await + { + let _ = self + .git_storage + .delete_pack(workspace_id, &meta.commit_id) + .await; + for key in snapshot_keys.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + let _ = self + .git_storage + .set_latest_commit(workspace_id, prev_latest.as_ref()) + .await; + return Err(err.into()); + } + + let mut tx = self.pool.begin().await?; + let upsert_res = sqlx::query( + r#"INSERT INTO git_commits ( + commit_id, + parent_commit_id, + workspace_id, + message, + author_name, + author_email, + committed_at, + pack_key, + file_hash_index + ) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9) + ON CONFLICT (workspace_id, commit_id) DO UPDATE SET + parent_commit_id = EXCLUDED.parent_commit_id, + message = EXCLUDED.message, + author_name = EXCLUDED.author_name, + author_email = EXCLUDED.author_email, + committed_at = EXCLUDED.committed_at, + pack_key = EXCLUDED.pack_key, + file_hash_index = EXCLUDED.file_hash_index"#, + ) + .bind(meta.commit_id.clone()) + .bind(meta.parent_commit_id.clone()) + .bind(workspace_id) + .bind(meta.message.clone()) + .bind(meta.author_name.clone()) + .bind(meta.author_email.clone()) + .bind(meta.committed_at) + .bind(meta.pack_key.clone()) + .bind(Json(&meta.file_hash_index)) + .execute(&mut *tx) + .await; + + if let Err(err) = upsert_res { + tx.rollback().await.ok(); + let _ = self + .git_storage + .delete_pack(workspace_id, &meta.commit_id) + .await; + for key in snapshot_keys.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + let _ = self + .git_storage + .set_latest_commit(workspace_id, prev_latest.as_ref()) + .await; + return Err(err.into()); + } + + if let Err(err) = sqlx::query( + "UPDATE git_repository_state SET updated_at = now() WHERE workspace_id = $1", + ) + .bind(workspace_id) + .execute(&mut *tx) + .await + { + tx.rollback().await.ok(); + let _ = self + .git_storage + .delete_pack(workspace_id, &meta.commit_id) + .await; + for key in snapshot_keys.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + let _ = self + .git_storage + .set_latest_commit(workspace_id, prev_latest.as_ref()) + .await; + return Err(err.into()); + } + + if let Err(err) = tx.commit().await { + let _ = self + .git_storage + .delete_pack(workspace_id, &meta.commit_id) + .await; + for key in snapshot_keys.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + let _ = self + .git_storage + .set_latest_commit(workspace_id, prev_latest.as_ref()) + .await; + return Err(err.into()); + } + + latest_meta = Some(meta); + } + + drop(repo); + let _ = temp_dir.close(); + self.git_storage + .latest_commit(workspace_id) + .await + .map_err(Into::into) + } + + async fn backfill_commits_from_storage( + &self, + workspace_id: Uuid, + latest: &CommitMeta, + ) -> anyhow::Result<()> { + let mut pending = Vec::new(); + let mut cursor = Some(latest.clone()); + while let Some(meta) = cursor { + if self + .commit_meta_by_id(workspace_id, meta.commit_id.as_slice()) + .await? + .is_some() + { + break; + } + pending.push(meta.clone()); + cursor = match meta.parent_commit_id.clone() { + Some(parent) => { + self.git_storage + .commit_meta(workspace_id, parent.as_slice()) + .await? + } + None => None, + }; + } + if pending.is_empty() { + return Ok(()); + } + pending.reverse(); + let mut tx = self.pool.begin().await?; + for meta in pending.into_iter() { + sqlx::query( + r#"INSERT INTO git_commits ( + commit_id, + parent_commit_id, + workspace_id, + message, + author_name, + author_email, + committed_at, + pack_key, + file_hash_index + ) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9) + ON CONFLICT (workspace_id, commit_id) DO NOTHING"#, + ) + .bind(meta.commit_id.clone()) + .bind(meta.parent_commit_id.clone()) + .bind(workspace_id) + .bind(meta.message.clone()) + .bind(meta.author_name.clone()) + .bind(meta.author_email.clone()) + .bind(meta.committed_at) + .bind(meta.pack_key.clone()) + .bind(Json(&meta.file_hash_index)) + .execute(&mut *tx) + .await?; + } + tx.commit().await?; + Ok(()) + } + + async fn collect_commit_chain( + &self, + workspace_id: Uuid, + start: CommitMeta, + ) -> anyhow::Result> { + let mut chain = Vec::new(); + let mut cursor = Some(start); + while let Some(meta) = cursor { + chain.push(meta.clone()); + cursor = match meta.parent_commit_id.clone() { + Some(parent) => { + self.commit_meta_by_id(workspace_id, parent.as_slice()) + .await? + } + None => None, + }; + } + Ok(chain) + } + + async fn remove_commits( + &self, + workspace_id: Uuid, + commits: &[CommitMeta], + ) -> anyhow::Result<()> { + for meta in commits { + let commit_hex = encode_commit_id(&meta.commit_id); + if let Err(error) = self + .git_storage + .delete_pack(workspace_id, &meta.commit_id) + .await + { + warn!( + workspace_id = %workspace_id, + commit = %commit_hex, + error = ?error, + "git_commit_cleanup_pack_failed" + ); + } + for path in meta.file_hash_index.keys() { + let key = blob_key(workspace_id, &meta.commit_id, path); + if let Err(error) = self.git_storage.delete_blob(&key).await { + warn!( + workspace_id = %workspace_id, + commit = %commit_hex, + path = %path, + error = ?error, + "git_commit_cleanup_blob_failed" + ); + } + } + sqlx::query("DELETE FROM git_commits WHERE workspace_id = $1 AND commit_id = $2") + .bind(workspace_id) + .bind(meta.commit_id.clone()) + .execute(&self.pool) + .await?; + } + Ok(()) + } + + async fn realign_commit_history( + &self, + workspace_id: Uuid, + storage_latest: Option, + db_latest: Option, + ) -> anyhow::Result<()> { + match (storage_latest, db_latest) { + (Some(storage), Some(db)) => { + if storage.commit_id == db.commit_id { + return Ok(()); + } + let storage_id = storage.commit_id.clone(); + let mut cursor = Some(db.clone()); + let mut reached_storage = false; + let mut to_prune: Vec = Vec::new(); + while let Some(meta) = cursor.clone() { + if meta.commit_id == storage_id { + reached_storage = true; + break; + } + to_prune.push(meta.clone()); + cursor = match meta.parent_commit_id.clone() { + Some(parent) => { + self.commit_meta_by_id(workspace_id, parent.as_slice()) + .await? + } + None => None, + }; + } + if !reached_storage { + let all = self.collect_commit_chain(workspace_id, db.clone()).await?; + if !all.is_empty() { + info!( + workspace_id = %workspace_id, + removed = all.len(), + "git_commit_pointer_reset_db_chain" + ); + self.remove_commits(workspace_id, &all).await?; + } + } else if !to_prune.is_empty() { + info!( + workspace_id = %workspace_id, + removed = to_prune.len(), + "git_commit_pointer_pruned_db_commits" + ); + self.remove_commits(workspace_id, &to_prune).await?; + } + self.backfill_commits_from_storage(workspace_id, &storage) + .await?; + } + (Some(storage), None) => { + self.backfill_commits_from_storage(workspace_id, &storage) + .await?; + } + (None, Some(db)) => { + let all = self.collect_commit_chain(workspace_id, db).await?; + if !all.is_empty() { + info!( + workspace_id = %workspace_id, + removed = all.len(), + "git_commit_pointer_dropped_db_history" + ); + self.remove_commits(workspace_id, &all).await?; + } + } + (None, None) => {} + } + Ok(()) + } + + async fn prune_commits_from_head( + &self, + workspace_id: Uuid, + commits: &[CommitMeta], + ) -> anyhow::Result<()> { + if commits.is_empty() { + return Ok(()); + } + self.remove_commits(workspace_id, commits).await?; + let new_latest = self.latest_commit_meta(workspace_id).await?; + self.git_storage + .set_latest_commit(workspace_id, new_latest.as_ref()) + .await?; + Ok(()) + } + + async fn ensure_storage_commit_integrity(&self, workspace_id: Uuid) -> anyhow::Result<()> { + loop { + let Some(latest) = self.latest_commit_meta(workspace_id).await? else { + self.git_storage + .set_latest_commit(workspace_id, None) + .await?; + return Ok(()); + }; + let chain = self + .collect_commit_chain(workspace_id, latest.clone()) + .await?; + let mut missing_idx: Option = None; + for (idx, meta) in chain.iter().enumerate() { + match self + .git_storage + .commit_meta(workspace_id, meta.commit_id.as_slice()) + .await? + { + Some(_) => continue, + None => { + missing_idx = Some(idx); + break; + } + } + } + if let Some(idx) = missing_idx { + let to_remove: Vec = chain[..=idx].to_vec(); + info!( + workspace_id = %workspace_id, + removed = to_remove.len(), + missing_commit = %encode_commit_id(&chain[idx].commit_id), + "git_commit_pointer_pruned_missing_storage_meta" + ); + self.prune_commits_from_head(workspace_id, &to_remove) + .await?; + continue; + } + break; + } + Ok(()) + } + +} diff --git a/api/crates/infrastructure/src/git/workspace/service/state.rs b/api/crates/infrastructure/src/git/workspace/service/state.rs new file mode 100644 index 00000000..ea70c8e6 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/service/state.rs @@ -0,0 +1,10 @@ +use domain::documents::doc_type::DocumentType; +use domain::documents::title::Title; + +include!("state/collect.rs"); +include!("state/dirty.rs"); +include!("state/export.rs"); +include!("state/deltas.rs"); +include!("state/snapshots.rs"); +include!("state/apply.rs"); +include!("state/diff.rs"); diff --git a/api/crates/infrastructure/src/git/workspace/service/state/apply.rs b/api/crates/infrastructure/src/git/workspace/service/state/apply.rs new file mode 100644 index 00000000..521f5b63 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/service/state/apply.rs @@ -0,0 +1,364 @@ +impl GitWorkspaceService { + async fn apply_state_to_workspace( + &self, + workspace_id: Uuid, + state: &HashMap, + previous_index: &HashMap, + ) -> anyhow::Result { + let mut changed: u32 = 0; + // write/update files + for (path, snapshot) in state.iter() { + let rel = format!("{}/{}", workspace_id, path.trim_start_matches('/')); + let abs = self.storage.absolute_from_relative(&rel); + if let Some(parent) = abs.parent() { + async_fs::create_dir_all(parent).await?; + } + let bytes = self.snapshot_bytes(snapshot).await?; + self.storage.write_bytes(abs.as_path(), &bytes).await?; + changed += 1; + } + // remove files missing in next state + for path in previous_index.keys() { + if state.contains_key(path) { + continue; + } + let rel = format!("{}/{}", workspace_id, path.trim_start_matches('/')); + let abs = self.storage.absolute_from_relative(&rel); + if async_fs::remove_file(&abs).await.is_ok() { + changed += 1; + } + } + Ok(changed) + } + + async fn ensure_folder( + &self, + workspace_id: Uuid, + actor_id: Uuid, + folder_path: &str, + cache: &mut HashMap, + ) -> anyhow::Result> { + let trimmed = folder_path.trim_matches('/'); + if trimmed.is_empty() { + return Ok(None); + } + + let mut current_parent: Option = None; + let mut accumulated = String::new(); + for segment in trimmed.split('/') { + if !accumulated.is_empty() { + accumulated.push('/'); + } + accumulated.push_str(segment); + + if let Some(id) = cache.get(&accumulated) { + current_parent = Some(*id); + continue; + } + + let lookup_path = format!("{}/{}", workspace_id, accumulated); + if let Some(existing) = self + .doc_paths + .get_by_owner_and_path(workspace_id, &lookup_path) + .await? + { + if existing.doc_type() != DocumentType::Folder { + anyhow::bail!("path_conflict_not_folder"); + } + cache.insert(accumulated.clone(), existing.id()); + current_parent = Some(existing.id()); + continue; + } + + let title = if segment.trim().is_empty() { + "folder" + } else { + segment + }; + let parent_desired_path = match current_parent { + Some(parent_id) => self + .docs + .get_meta_for_owner(parent_id, workspace_id) + .await? + .map(|m| m.desired_path), + None => None, + }; + let title = Title::from_user_input(title); + let mut repo = self.docs.as_ref(); + let folder = application::documents::use_cases::create_document::CreateDocument { + repo: &mut repo, + } + .execute( + workspace_id, + actor_id, + &title, + current_parent, + parent_desired_path.as_ref(), + DocumentType::Folder, + None, + ) + .await?; + self.doc_paths + .update_repo_path(folder.id(), workspace_id, &accumulated) + .await?; + + cache.insert(accumulated.clone(), folder.id()); + current_parent = Some(folder.id()); + } + + Ok(current_parent) + } + + async fn materialize_documents_from_state( + &self, + workspace_id: Uuid, + actor_id: Uuid, + state: &HashMap, + ) -> anyhow::Result<(u32, u32)> { + fn folder_key(path: &str) -> String { + path.rsplit_once('/') + .map(|(parent, _)| parent.trim().trim_end_matches('/')) + .filter(|s| !s.is_empty()) + .unwrap_or_default() + .to_string() + } + + fn attachment_owner_folder(path: &str) -> String { + if let Some(idx) = path.find("/attachments/") { + let prefix = &path[..idx]; + if prefix.is_empty() { + String::new() + } else { + prefix.trim_end_matches('/').to_string() + } + } else if path.starts_with("attachments/") { + String::new() + } else { + folder_key(path) + } + } + + fn is_markdown_path(path: &str) -> bool { + let lower = path.to_ascii_lowercase(); + lower.ends_with(".md") || lower.ends_with(".markdown") + } + + let mut folder_cache: HashMap = HashMap::new(); + let mut docs_created: u32 = 0; + let mut attachments_created: u32 = 0; + + let mut existing_by_desired: HashMap = HashMap::new(); + let mut folder_docs: HashMap> = HashMap::new(); + + for doc in self.docs.list_workspace_documents(workspace_id).await? { + let normalized = normalize_repo_path(doc.desired_path().as_str().to_string()); + existing_by_desired.insert(normalized.clone(), doc.id()); + if doc.doc_type() != DocumentType::Folder { + let key = folder_key(&normalized); + folder_docs.entry(key.clone()).or_default().push(doc.id()); + if doc.archived_at().is_some() { + let archived_key = if key.is_empty() { + "Archives".to_string() + } else { + format!("Archives/{}", key) + }; + folder_docs.entry(archived_key).or_default().push(doc.id()); + } + } + } + + let mut paths: Vec = state.keys().cloned().collect(); + paths.sort(); + + // First pass: create documents only for markdown files + for path in paths.iter() { + let snapshot = match state.get(path) { + Some(s) => s, + None => continue, + }; + if !snapshot.is_text { + continue; + } + let normalized = normalize_repo_path(path.clone()); + if !is_markdown_path(&normalized) { + continue; + } + + // Skip if document already exists at desired_path (including folders that would conflict) + if existing_by_desired.contains_key(&normalized) { + continue; + } + + let parent_path = folder_key(&normalized); + let parent_id = if parent_path.is_empty() { + None + } else { + self.ensure_folder(workspace_id, actor_id, &parent_path, &mut folder_cache) + .await? + }; + + let filename = normalized + .rsplit('/') + .next() + .unwrap_or(&normalized) + .to_string(); + let title = filename + .trim_end_matches(".md") + .trim_end_matches(".markdown") + .trim_end_matches(".txt"); + + let parent_desired_path = match parent_id { + Some(parent_id) => self + .docs + .get_meta_for_owner(parent_id, workspace_id) + .await? + .map(|m| m.desired_path), + None => None, + }; + let title = Title::from_user_input(if title.is_empty() { "Document" } else { title }); + let mut repo = self.docs.as_ref(); + let doc = application::documents::use_cases::create_document::CreateDocument { + repo: &mut repo, + } + .execute( + workspace_id, + actor_id, + &title, + parent_id, + parent_desired_path.as_ref(), + DocumentType::Document, + None, + ) + .await?; + self.doc_paths + .update_repo_path(doc.id(), workspace_id, &normalized) + .await?; + docs_created += 1; + existing_by_desired.insert(normalized.clone(), doc.id()); + + folder_docs.entry(parent_path).or_default().push(doc.id()); + + let bytes = self.snapshot_bytes(snapshot).await.unwrap_or_default(); + let body = extract_markdown_body(&bytes) + .unwrap_or_else(|| std::str::from_utf8(&bytes).unwrap_or_default().to_string()); + let snap_bytes = snapshot_from_markdown(&body); + let _ = self + .realtime + .apply_snapshot(&doc.id().to_string(), snap_bytes.as_slice()) + .await; + let _ = self.realtime.force_persist(&doc.id().to_string()).await; + } + + for docs in folder_docs.values_mut() { + docs.sort(); + } + + // Second pass: attach binaries without creating documents + for path in paths { + let snapshot = match state.get(&path) { + Some(s) => s, + None => continue, + }; + if snapshot.is_text { + continue; + } + let normalized = normalize_repo_path(path.clone()); + if !normalized.contains("/attachments/") && !normalized.starts_with("attachments/") { + continue; + } + let filename = normalized + .rsplit('/') + .next() + .unwrap_or(&normalized) + .to_string(); + let folder = attachment_owner_folder(&normalized); + let doc_id = folder_docs.get(&folder).and_then(|v| v.first().copied()); + let Some(doc_id) = doc_id else { + warn!( + workspace_id = %workspace_id, + repo_path = normalized.as_str(), + "git_materialize_attachment_no_owner" + ); + continue; + }; + + let storage_path = format!("{}/{}", workspace_id, normalized); + let existing: Option = + sqlx::query_scalar("SELECT id FROM files WHERE storage_path = $1 LIMIT 1") + .bind(&storage_path) + .fetch_optional(&self.pool) + .await?; + if existing.is_some() { + continue; + } + + let bytes = self.snapshot_bytes(snapshot).await.unwrap_or_default(); + let size = bytes.len() as i64; + let _ = sqlx::query( + r#"INSERT INTO files (document_id, filename, content_type, size, storage_path, content_hash) + VALUES ($1,$2,$3,$4,$5,$6)"#, + ) + .bind(doc_id) + .bind(&filename) + .bind::>(None) + .bind(size) + .bind(&storage_path) + .bind(&snapshot.hash) + .execute(&self.pool) + .await?; + attachments_created += 1; + } + Ok((docs_created, attachments_created)) + } + + /// Apply merged markdown files directly to realtime/persistence so documents reflect Pull results. + async fn apply_merged_to_documents( + &self, + workspace_id: Uuid, + next_state: &HashMap, + ) -> anyhow::Result<()> { + let doc_rows = self + .docs + .list_workspace_documents(workspace_id) + .await? + .into_iter() + .filter(|d| d.doc_type() != DocumentType::Folder); + + for doc in doc_rows { + let doc_id = doc.id(); + let normalized = normalize_repo_path(doc.desired_path().as_str().to_string()); + let Some(snapshot) = next_state.get(&normalized) else { + continue; + }; + + if !snapshot.is_text { + continue; + } + let bytes = match self.snapshot_bytes(snapshot).await { + Ok(b) => b, + Err(err) => { + warn!(document_id = %doc_id, error = ?err, "git_pull_snapshot_bytes_failed"); + continue; + } + }; + let body = match extract_markdown_body(&bytes) { + Some(b) => b, + None => continue, + }; + let snap_bytes = + application::documents::services::realtime::snapshot::snapshot_from_markdown(&body); + if let Err(err) = crate::core::storage::suppress_git_dirty(async { + self.realtime + .apply_snapshot(&doc_id.to_string(), snap_bytes.as_slice()) + .await?; + self.realtime.force_persist(&doc_id.to_string()).await + }) + .await + { + warn!(document_id = %doc_id, error = ?err, "git_pull_apply_snapshot_failed"); + continue; + } + } + Ok(()) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/service/state/collect.rs b/api/crates/infrastructure/src/git/workspace/service/state/collect.rs new file mode 100644 index 00000000..f4b64c63 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/service/state/collect.rs @@ -0,0 +1,121 @@ +impl GitWorkspaceService { + async fn collect_current_state( + &self, + workspace_id: Uuid, + ) -> anyhow::Result> { + let mut state: HashMap = HashMap::new(); + + let doc_rows = self + .docs + .list_workspace_documents(workspace_id) + .await? + .into_iter() + .filter(|d| d.doc_type() != DocumentType::Folder); + + for doc in doc_rows { + let doc_id = doc.id(); + let export = match self.snapshot.export_current_markdown(&doc_id).await? { + Some(export) => export, + None => continue, + }; + let repo_path = export + .repo_path + .or_else(|| Some(doc.desired_path().as_str().to_string())) + .map(normalize_repo_path) + .ok_or_else(|| anyhow!("missing_repo_path_for_doc {}", doc_id))?; + state.insert( + repo_path, + FileSnapshot { + hash: export.content_hash, + data: FileSnapshotData::Inline(export.bytes), + is_text: true, + }, + ); + } + + let attachment_rows = sqlx::query( + r#"SELECT f.id AS file_id, f.storage_path, f.content_hash + FROM files f + JOIN documents d ON d.id = f.document_id + WHERE d.owner_id = $1"#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + + for row in attachment_rows { + let file_id: Uuid = row.get("file_id"); + let storage_path: String = row.get("storage_path"); + let stored_hash: Option = row + .try_get("content_hash") + .ok() + .and_then(|h: String| if h.is_empty() { None } else { Some(h) }); + let (hash, needs_persist) = match stored_hash { + Some(existing) => (existing, false), + None => { + let computed = self + .compute_attachment_hash(&storage_path) + .await + .with_context(|| { + format!("failed to compute attachment hash for {}", storage_path) + })?; + match computed { + Some(value) => (value, true), + None => continue, + } + } + }; + if needs_persist { + if let Err(err) = self.persist_attachment_hash(file_id, &hash).await { + warn!( + file_id = %file_id, + path = storage_path.as_str(), + error = ?err, + "git_workspace_attachment_hash_persist_failed" + ); + } + } + let repo_path = repo_relative_path(&storage_path)?; + state.insert( + repo_path, + FileSnapshot { + hash, + data: FileSnapshotData::StoragePath(storage_path), + is_text: false, + }, + ); + } + + Ok(state) + } + + async fn compute_attachment_hash(&self, storage_path: &str) -> anyhow::Result> { + let abs = self.storage.absolute_from_relative(storage_path); + match self.storage.read_bytes(abs.as_path()).await { + Ok(bytes) => Ok(Some(sha256_hex(&bytes))), + Err(err) => { + if let Some(io_err) = err.downcast_ref::() { + if io_err.kind() == io::ErrorKind::NotFound { + return Ok(None); + } + } + if err.to_string().to_lowercase().contains("not found") { + return Ok(None); + } + Err(err.into()) + } + } + } + + async fn persist_attachment_hash(&self, file_id: Uuid, hash: &str) -> anyhow::Result<()> { + sqlx::query( + r#"UPDATE files SET content_hash = $2, updated_at = now() + WHERE id = $1"#, + ) + .bind(file_id) + .bind(hash) + .execute(&self.pool) + .await?; + Ok(()) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/service/state/deltas.rs b/api/crates/infrastructure/src/git/workspace/service/state/deltas.rs new file mode 100644 index 00000000..ff1367b5 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/service/state/deltas.rs @@ -0,0 +1,31 @@ +impl GitWorkspaceService { + fn compute_deltas( + &self, + current: &HashMap, + previous: &HashMap, + ) -> FileDeltaSummary { + let mut added = Vec::new(); + let mut modified = Vec::new(); + let mut deleted = Vec::new(); + + for (path, snapshot) in current.iter() { + match previous.get(path) { + None => added.push(path.clone()), + Some(prev_hash) if prev_hash != &snapshot.hash => modified.push(path.clone()), + _ => {} + } + } + + for path in previous.keys() { + if !current.contains_key(path) { + deleted.push(path.clone()); + } + } + + FileDeltaSummary { + added, + modified, + deleted, + } + } +} diff --git a/api/crates/infrastructure/src/git/workspace/service/state/diff.rs b/api/crates/infrastructure/src/git/workspace/service/state/diff.rs new file mode 100644 index 00000000..352ea0ba --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/service/state/diff.rs @@ -0,0 +1,173 @@ +impl GitWorkspaceService { + fn build_diff_result( + &self, + path: &str, + old_content: Option<&str>, + new_content: Option<&str>, + ) -> TextDiffResult { + match (old_content, new_content) { + (Some(old), Some(new)) => compute_text_diff(old, new, path), + _ => TextDiffResult { + file_path: path.to_string(), + diff_lines: Vec::new(), + old_content: old_content.map(|s| s.to_string()), + new_content: new_content.map(|s| s.to_string()), + }, + } + } + + async fn commit_diff_via_packs( + &self, + workspace_id: Uuid, + from_meta: Option<&CommitMeta>, + to_meta: &CommitMeta, + ) -> anyhow::Result> { + let (to_pack_dir, to_pack_paths) = self + .persist_pack_chain(workspace_id, Some(to_meta.commit_id.as_slice())) + .await? + .ok_or_else(|| { + anyhow!( + "missing pack data for commit {}", + encode_commit_id(&to_meta.commit_id) + ) + })?; + + let from_pack = if let Some(from_meta) = from_meta { + if from_meta.commit_id != to_meta.commit_id { + Some( + self.persist_pack_chain(workspace_id, Some(from_meta.commit_id.as_slice())) + .await? + .ok_or_else(|| { + anyhow!( + "missing pack data for commit {}", + encode_commit_id(&from_meta.commit_id) + ) + })?, + ) + } else { + None + } + } else { + None + }; + + let temp_dir = TempDirBuilder::new() + .prefix("git-diff-") + .tempdir() + .map_err(|e| anyhow::anyhow!(e))?; + let repo = Repository::init_bare(temp_dir.path())?; + + apply_pack_files(&repo, &to_pack_paths)?; + if let Some((_, ref paths)) = from_pack { + apply_pack_files(&repo, paths)?; + } + + let from_files = if let Some(from_meta) = from_meta { + read_commit_files(&repo, from_meta.commit_id.as_slice())? + } else { + HashMap::new() + }; + let to_files = read_commit_files(&repo, to_meta.commit_id.as_slice())?; + + drop(repo); + let _ = temp_dir.close(); + drop(to_pack_dir); + if let Some((dir, _)) = from_pack { + drop(dir); + } + + let mut paths: BTreeSet = BTreeSet::new(); + paths.extend(from_files.keys().cloned()); + paths.extend(to_files.keys().cloned()); + + let mut results = Vec::new(); + for path in paths { + let old_bytes = from_files.get(&path); + let new_bytes = to_files.get(&path); + let old_content = old_bytes + .and_then(|b| std::str::from_utf8(b).ok()) + .map(|s| s.to_string()); + let new_content = new_bytes + .and_then(|b| std::str::from_utf8(b).ok()) + .map(|s| s.to_string()); + if old_content.is_none() && new_content.is_none() { + if old_bytes.is_some() || new_bytes.is_some() { + results.push(self.build_diff_result(&path, None, None)); + } + continue; + } + results.push(self.build_diff_result( + &path, + old_content.as_deref(), + new_content.as_deref(), + )); + } + Ok(results) + } + + async fn commit_diff_from_storage( + &self, + workspace_id: Uuid, + from_meta: Option<&CommitMeta>, + to_meta: Option<&CommitMeta>, + ) -> anyhow::Result> { + let Some(to_meta) = to_meta else { + return Ok(Vec::new()); + }; + + let mut paths: BTreeSet = BTreeSet::new(); + if let Some(meta) = from_meta { + paths.extend(meta.file_hash_index.keys().cloned()); + } + paths.extend(to_meta.file_hash_index.keys().cloned()); + + let mut results = Vec::new(); + for path in paths { + let old_hash = from_meta.and_then(|meta| meta.file_hash_index.get(&path)); + let new_hash = to_meta.file_hash_index.get(&path); + if let (Some(old), Some(new)) = (old_hash, new_hash) { + if old == new { + continue; + } + } + + let old_bytes = match (from_meta, old_hash) { + (Some(meta), Some(_)) => { + self.load_file_snapshot(workspace_id, meta.commit_id.as_slice(), &path) + .await? + } + _ => None, + }; + let new_bytes = match new_hash { + Some(_) => { + self.load_file_snapshot(workspace_id, to_meta.commit_id.as_slice(), &path) + .await? + } + None => None, + }; + + let old_text = old_bytes + .as_ref() + .and_then(|bytes| std::str::from_utf8(bytes).ok()) + .map(|s| s.to_string()); + let new_text = new_bytes + .as_ref() + .and_then(|bytes| std::str::from_utf8(bytes).ok()) + .map(|s| s.to_string()); + + if old_text.is_none() && new_text.is_none() { + if old_bytes.is_some() || new_bytes.is_some() { + results.push(self.build_diff_result(&path, None, None)); + } + } else { + results.push(self.build_diff_result( + &path, + old_text.as_deref(), + new_text.as_deref(), + )); + } + } + + Ok(results) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/service/state/dirty.rs b/api/crates/infrastructure/src/git/workspace/service/state/dirty.rs new file mode 100644 index 00000000..07ccf746 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/service/state/dirty.rs @@ -0,0 +1,36 @@ +impl GitWorkspaceService { + async fn fetch_dirty(&self, workspace_id: Uuid) -> anyhow::Result> { + let rows = sqlx::query( + r#"SELECT path, is_text, op, content_hash + FROM git_dirty_files + WHERE workspace_id = $1 + ORDER BY created_at ASC"#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + + let mut out = Vec::new(); + for r in rows { + let path: String = r.get("path"); + let is_text: bool = r.get("is_text"); + let op: String = r.get("op"); + let content_hash: Option = r.try_get("content_hash").ok(); + out.push(DirtyRow { + path, + is_text, + op, + content_hash, + }); + } + Ok(out) + } + + async fn clear_dirty(&self, workspace_id: Uuid) -> anyhow::Result { + let res = sqlx::query("DELETE FROM git_dirty_files WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&self.pool) + .await?; + Ok(res.rows_affected()) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/service/state/export.rs b/api/crates/infrastructure/src/git/workspace/service/state/export.rs new file mode 100644 index 00000000..ac358227 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/service/state/export.rs @@ -0,0 +1,52 @@ +impl GitWorkspaceService { + async fn export_markdown_for_repo_path( + &self, + workspace_id: Uuid, + repo_path: &str, + ) -> anyhow::Result, String)>> { + let trimmed = repo_path.trim_start_matches('/'); + let mut candidates: Vec<(&str, bool)> = vec![(trimmed, false)]; + if let Some(stripped) = trimmed.strip_prefix("Archives/") { + if !stripped.is_empty() { + candidates.push((stripped, true)); + } + } + + // First try by normalized repo path (documents.path). Fall back to desired_path for older records. + let all_docs = self.docs.list_workspace_documents(workspace_id).await?; + + for (candidate, archived_only) in candidates { + let lookup_path = format!("{}/{}", workspace_id, candidate); + let from_path = self + .doc_paths + .get_by_owner_and_path(workspace_id, &lookup_path) + .await?; + + let doc = if let Some(doc) = from_path { + Some(doc) + } else { + all_docs + .iter() + .find(|d| { + normalize_repo_path(d.desired_path().as_str().to_string()) == candidate + }) + .cloned() + }; + + if let Some(doc) = doc { + if doc.doc_type() == DocumentType::Folder { + continue; + } + if archived_only && doc.archived_at().is_none() { + continue; + } + let doc_id = doc.id(); + if let Some(export) = self.snapshot.export_current_markdown(&doc_id).await? { + return Ok(Some((export.bytes, export.content_hash))); + } + } + } + + Ok(None) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/service/state/snapshots.rs b/api/crates/infrastructure/src/git/workspace/service/state/snapshots.rs new file mode 100644 index 00000000..eea8949d --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/service/state/snapshots.rs @@ -0,0 +1,84 @@ +impl GitWorkspaceService { + async fn store_commit_snapshots( + &self, + workspace_id: Uuid, + commit_id: &[u8], + state: &HashMap, + ) -> anyhow::Result> { + let mut stored = Vec::new(); + for (path, snapshot) in state.iter() { + let key = blob_key(workspace_id, commit_id, path); + let bytes = self.snapshot_bytes(snapshot).await?; + if let Err(err) = self.git_storage.put_blob(&key, &bytes).await { + for key in stored.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + return Err(err.into()); + } + stored.push(key); + } + Ok(stored) + } + + async fn snapshot_bytes(&self, snapshot: &FileSnapshot) -> anyhow::Result> { + match &snapshot.data { + FileSnapshotData::Inline(bytes) => Ok(bytes.clone()), + FileSnapshotData::StoragePath(path) => { + let abs = self.storage.absolute_from_relative(path); + self.storage.read_bytes(abs.as_path()).await.map_err(Into::into) + } + } + } + + async fn load_file_snapshot( + &self, + workspace_id: Uuid, + commit_id: &[u8], + path: &str, + ) -> anyhow::Result>> { + let key = blob_key(workspace_id, commit_id, path); + match self.git_storage.fetch_blob(&key).await { + Ok(bytes) => Ok(Some(bytes)), + Err(err) => { + // Treat missing blob as absence (e.g., binary or not stored). + if let Some(io_err) = err.downcast_ref::() { + if io_err.kind() == std::io::ErrorKind::NotFound { + return Ok(None); + } + } + if err.to_string().contains("not found") { + return Ok(None); + } + Err(err.into()) + } + } + } + + #[allow(dead_code)] + async fn state_from_commit_meta( + &self, + workspace_id: Uuid, + meta: &CommitMeta, + ) -> anyhow::Result> { + let mut state: HashMap = HashMap::new(); + for path in meta.file_hash_index.keys() { + let Some(bytes) = self + .load_file_snapshot(workspace_id, &meta.commit_id, path) + .await? + else { + continue; + }; + let hash = sha256_hex(&bytes); + let is_text = std::str::from_utf8(&bytes).is_ok(); + state.insert( + path.clone(), + FileSnapshot { + hash, + data: FileSnapshotData::Inline(bytes), + is_text, + }, + ); + } + Ok(state) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/service/synthetic.rs b/api/crates/infrastructure/src/git/workspace/service/synthetic.rs new file mode 100644 index 00000000..9a58c9b6 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/service/synthetic.rs @@ -0,0 +1,58 @@ +impl GitWorkspaceService { + // Build a synthetic commit from the current workspace state so dirty edits participate in merges. + fn build_synthetic_commit( + &self, + workspace_id: Uuid, + repo: &Repository, + base_oid: git2::Oid, + ) -> anyhow::Result { + // Collect current workspace state into blobs and index entries (supports nested paths). + let current_state = tokio::task::block_in_place(|| { + let handle = tokio::runtime::Handle::current(); + handle.block_on(self.collect_current_state(workspace_id)) + })?; + + let mut index = repo.index()?; + index.clear()?; + + for (path, snapshot) in current_state.iter() { + let bytes = tokio::task::block_in_place(|| { + let handle = tokio::runtime::Handle::current(); + handle.block_on(self.snapshot_bytes(snapshot)) + })?; + let blob_oid = repo.blob(&bytes)?; + + let entry = git2::IndexEntry { + ctime: git2::IndexTime::new(0, 0), + mtime: git2::IndexTime::new(0, 0), + dev: 0, + ino: 0, + mode: 0o100644, + uid: 0, + gid: 0, + file_size: bytes.len() as u32, + id: blob_oid, + flags: std::cmp::min(path.len(), 0x0fff) as u16, + flags_extended: 0, + path: path.as_bytes().to_vec(), + }; + index.add(&entry)?; + } + + let tree_oid = index.write_tree_to(repo)?; + let tree = repo.find_tree(tree_oid)?; + + // Create a synthetic commit with remote as parent to anchor the merge base. + // Use an explicit signature so we don't rely on local git config being present. + let sig = signature_from_parts("RefMD", "refmd@example.com", Utc::now())?; + let commit_oid = repo.commit( + Some("refs/heads/synthetic-workspace"), + &sig, + &sig, + "workspace-state", + &tree, + &[&repo.find_commit(base_oid)?], + )?; + Ok(commit_oid) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/sync.rs b/api/crates/infrastructure/src/git/workspace/sync.rs new file mode 100644 index 00000000..c0ddd069 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/sync.rs @@ -0,0 +1,6 @@ +include!("sync/main.rs"); +include!("sync/commit_pack.rs"); +include!("sync/pack_chain.rs"); +include!("sync/changes.rs"); +include!("sync/precompute.rs"); +include!("sync/persist.rs"); diff --git a/api/crates/infrastructure/src/git/workspace/sync/changes.rs b/api/crates/infrastructure/src/git/workspace/sync/changes.rs new file mode 100644 index 00000000..1ebe0e2c --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/sync/changes.rs @@ -0,0 +1,43 @@ +impl GitWorkspaceService { + fn sync_build_change_sets( + use_full_scan: bool, + dirty_rows: &[DirtyRow], + previous_index: &HashMap, + ) -> (BTreeMap, BTreeSet) { + if use_full_scan { + return (BTreeMap::new(), BTreeSet::new()); + } + + let mut upserts: BTreeMap = BTreeMap::new(); + let mut deletes: BTreeSet = BTreeSet::new(); + + for row in dirty_rows { + match row.op.as_str() { + "upsert" => { + upserts.insert( + row.path.clone(), + DirtyUpsert { + is_text: row.is_text, + content_hash: row.content_hash.clone(), + }, + ); + deletes.remove(&row.path); + } + "delete" => { + upserts.remove(&row.path); + deletes.insert(row.path.clone()); + } + _ => {} + } + } + + upserts.retain(|path, u| { + !matches!( + (&u.content_hash, previous_index.get(path)), + (Some(hnew), Some(hprev)) if hnew == hprev + ) + }); + + (upserts, deletes) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/sync/commit_pack.rs b/api/crates/infrastructure/src/git/workspace/sync/commit_pack.rs new file mode 100644 index 00000000..18f98bfb --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/sync/commit_pack.rs @@ -0,0 +1,101 @@ +impl GitWorkspaceService { + #[allow(clippy::too_many_arguments)] + fn sync_build_commit_pack( + workspace_id: Uuid, + repo: &Repository, + latest_meta: Option<&CommitMeta>, + branch_name: &str, + author_name: &str, + author_email: &str, + committed_at: DateTime, + message: &str, + use_full_scan: bool, + full_entries: Option<&BTreeMap>>, + deletes: &BTreeSet, + precomputed_upsert_bytes: &BTreeMap>, + next_file_hash_index: HashMap, + cfg: Option<&UserGitCfg>, + skip_push: bool, + force_push: bool, + ) -> anyhow::Result<(CommitMeta, Vec, String, bool)> { + // Skip pre-fetch/verify to avoid remote redirect/auth loops; rely on push outcome. + // Build sources from either full scan or dirty set (no awaits here). + let tree_oid = if use_full_scan { + let entries = full_entries.ok_or_else(|| anyhow!("full-scan entries missing"))?; + build_tree_from_entries(repo, entries)? + } else { + // Incremental: reuse previous blobs for unchanged paths. + let mut sources: BTreeMap = BTreeMap::new(); + if let Some(prev_meta) = latest_meta { + let prev_oids = read_commit_blob_oids(repo, prev_meta.commit_id.as_slice())?; + for (path, oid) in prev_oids { + sources.insert(path, FileSource::Oid(oid)); + } + } + for d in deletes.iter() { + sources.remove(d); + } + for (path, bytes) in precomputed_upsert_bytes.iter() { + sources.insert(path.clone(), FileSource::Bytes(bytes.clone())); + } + build_tree_from_sources(repo, &sources)? + }; + let tree = repo.find_tree(tree_oid)?; + + let mut parent_commits = Vec::new(); + if let Some(prev_meta) = latest_meta { + let parent_oid = git2::Oid::from_bytes(&prev_meta.commit_id)?; + parent_commits.push(repo.find_commit(parent_oid)?); + } + let parent_refs: Vec<&Commit> = parent_commits.iter().collect(); + + let branch_ref = format!("refs/heads/{}", branch_name); + let author_sig = signature_from_parts(author_name, author_email, committed_at)?; + let commit_oid = repo.commit( + Some(&branch_ref), + &author_sig, + &author_sig, + message, + &tree, + &parent_refs, + )?; + let commit_hex = encode_commit_id(commit_oid.as_bytes()); + + let mut pack_builder = repo.packbuilder()?; + pack_builder.insert_commit(commit_oid)?; + // Include parent commit objects to avoid missing bases when applying packs later. + for parent in parent_commits.iter() { + pack_builder.insert_commit(parent.id())?; + } + let mut pack_buf = git2::Buf::new(); + pack_builder.write_buf(&mut pack_buf)?; + let pack_bytes = pack_buf.to_vec(); + + let message_opt = if message.trim().is_empty() { + None + } else { + Some(message.to_string()) + }; + + let meta = CommitMeta { + commit_id: commit_oid.as_bytes().to_vec(), + parent_commit_id: latest_meta.map(|c| c.commit_id.clone()), + message: message_opt, + author_name: Some(author_name.to_string()), + author_email: Some(author_email.to_string()), + committed_at, + pack_key: format!("git/packs/{}/{}.pack", workspace_id, commit_hex.clone()), + file_hash_index: next_file_hash_index, + }; + + let mut pushed = false; + if let Some(cfg) = cfg { + if !cfg.repository_url.is_empty() && !skip_push { + // Propagate push errors so the caller can retry with force. + pushed = perform_push(repo, cfg, branch_name, commit_oid, force_push)?; + } + } + + Ok((meta, pack_bytes, commit_hex, pushed)) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/sync/main.rs b/api/crates/infrastructure/src/git/workspace/sync/main.rs new file mode 100644 index 00000000..3e1f121e --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/sync/main.rs @@ -0,0 +1,420 @@ +struct SyncPrecompute { + precomputed_full_entries: Option>>, + precomputed_upsert_bytes: BTreeMap>, + changed_text_snapshots: HashMap, + next_file_hash_index: HashMap, + files_changed_for_response: u32, +} + +impl GitWorkspaceService { + async fn sync_inner( + &self, + workspace_id: Uuid, + req: &GitSyncRequestDto, + cfg: Option<&UserGitCfg>, + ) -> anyhow::Result { + let state = self.load_repository_state(workspace_id).await?; + let Some((state_initialized, state_default_branch)) = state else { + anyhow::bail!("repository not initialized") + }; + if !state_initialized { + anyhow::bail!("repository not initialized") + } + + let branch_hint = cfg + .map(|c| c.branch_name.clone()) + .unwrap_or(state_default_branch.clone()); + + let mut latest_meta = self.ensure_latest_meta(workspace_id).await?; + if latest_meta.is_none() { + if let Some(cfg) = cfg { + if !cfg.repository_url.is_empty() { + // Bootstrap remote history; propagate errors to avoid proceeding without packs. + self.bootstrap_remote_history(workspace_id, cfg, branch_hint.as_str()) + .await?; + latest_meta = self.ensure_latest_meta(workspace_id).await?; + } + } + } + + // Resolve branch without holding a DB lock for long. + let branch_name = cfg + .map(|c| c.branch_name.clone()) + .unwrap_or(state_default_branch.clone()); + let force_push = req.force.unwrap_or(false); + let force_full_scan = req.full_scan.unwrap_or(false); + let skip_push = req.skip_push.unwrap_or(false); + let push_required = cfg + .as_ref() + .map(|c| !c.repository_url.is_empty()) + .unwrap_or(false) + && !skip_push; + + // Ensure latest commit pack exists; if missing, attempt to rebuild from storage/remote or fail early. + if let Some(latest) = latest_meta.as_ref() { + if self + .git_storage + .fetch_pack_for_commit(workspace_id, latest.commit_id.as_slice()) + .await? + .is_none() + { + // Try to restore metadata and pack from storage (if pointer mismatch), else try remote bootstrap. + warn!( + workspace_id = %workspace_id, + commit = %encode_commit_id(&latest.commit_id), + "git_sync_missing_latest_pack_detected" + ); + // Attempt backfill from storage; ensure_latest_meta will also update latest pointer. + self.ensure_storage_commit_integrity(workspace_id).await?; + latest_meta = self.ensure_latest_meta(workspace_id).await?; + if let Some(latest2) = latest_meta.as_ref() { + if self + .git_storage + .fetch_pack_for_commit(workspace_id, latest2.commit_id.as_slice()) + .await? + .is_none() + { + if let Some(cfg) = cfg { + if !cfg.repository_url.is_empty() { + info!( + workspace_id = %workspace_id, + commit = %encode_commit_id(&latest2.commit_id), + "git_sync_missing_latest_pack_bootstrap_remote" + ); + self.bootstrap_remote_history( + workspace_id, + cfg, + branch_hint.as_str(), + ) + .await?; + latest_meta = self.ensure_latest_meta(workspace_id).await?; + } + } + } + } + if let Some(latest3) = latest_meta.as_ref() { + if self + .git_storage + .fetch_pack_for_commit(workspace_id, latest3.commit_id.as_slice()) + .await? + .is_none() + { + anyhow::bail!( + "missing pack data for latest commit {}; pull and retry", + encode_commit_id(&latest3.commit_id) + ); + } + } + } + } + + let mut storage_latest = self.git_storage.latest_commit(workspace_id).await?; + let mut storage_commit_hex = storage_latest + .as_ref() + .map(|m| encode_commit_id(&m.commit_id)); + let mut db_commit_hex = latest_meta.as_ref().map(|m| encode_commit_id(&m.commit_id)); + if storage_commit_hex != db_commit_hex { + warn!( + workspace_id = %workspace_id, + db_commit = ?db_commit_hex, + storage_commit = ?storage_commit_hex, + "git_commit_pointer_mismatch_detected" + ); + if let Some(storage_meta) = storage_latest.as_ref() { + self.backfill_commits_from_storage(workspace_id, storage_meta) + .await?; + latest_meta = self.latest_commit_meta(workspace_id).await?; + } + storage_latest = self.git_storage.latest_commit(workspace_id).await?; + storage_commit_hex = storage_latest + .as_ref() + .map(|m| encode_commit_id(&m.commit_id)); + db_commit_hex = latest_meta.as_ref().map(|m| encode_commit_id(&m.commit_id)); + if storage_commit_hex == db_commit_hex { + info!( + workspace_id = %workspace_id, + commit = ?storage_commit_hex, + "git_commit_pointer_repaired_from_storage" + ); + } else { + warn!( + workspace_id = %workspace_id, + db_commit = ?db_commit_hex, + storage_commit = ?storage_commit_hex, + "git_commit_pointer_attempting_realign" + ); + self.realign_commit_history( + workspace_id, + storage_latest.clone(), + latest_meta.clone(), + ) + .await?; + latest_meta = self.ensure_latest_meta(workspace_id).await?; + storage_latest = self.git_storage.latest_commit(workspace_id).await?; + storage_commit_hex = storage_latest + .as_ref() + .map(|m| encode_commit_id(&m.commit_id)); + db_commit_hex = latest_meta.as_ref().map(|m| encode_commit_id(&m.commit_id)); + if storage_commit_hex == db_commit_hex { + info!( + workspace_id = %workspace_id, + commit = ?db_commit_hex, + "git_commit_pointer_repaired_by_prune" + ); + } else { + error!( + workspace_id = %workspace_id, + db_commit = ?db_commit_hex, + storage_commit = ?storage_commit_hex, + "git_commit_pointer_irreparable" + ); + anyhow::bail!( + "repository latest commit mismatch between database ({db_commit_hex:?}) and storage ({storage_commit_hex:?})" + ); + } + } + } + + self.ensure_storage_commit_integrity(workspace_id).await?; + latest_meta = self.latest_commit_meta(workspace_id).await?; + + let use_full_scan = force_full_scan || latest_meta.is_none(); + + let previous_index = latest_meta + .as_ref() + .map(|c| c.file_hash_index.clone()) + .unwrap_or_default(); + let dirty_rows = self.fetch_dirty(workspace_id).await?; + + let (upserts, deletes) = + Self::sync_build_change_sets(use_full_scan, &dirty_rows, &previous_index); + + // If still nothing to do, optionally push existing head when a remote is configured. + if !use_full_scan && upserts.is_empty() && deletes.is_empty() { + if push_required { + if let Some(latest) = latest_meta.as_ref() { + // Ensure pack chain exists to materialize the commit for push. + let pack_chain = self + .persist_pack_chain(workspace_id, Some(latest.commit_id.as_slice())) + .await?; + if let Some((temp_dir, pack_paths)) = pack_chain { + let repo = Repository::init_bare(temp_dir.path())?; + apply_pack_files(&repo, &pack_paths)?; + let oid = git2::Oid::from_bytes(&latest.commit_id)?; + let pushed = + perform_push(&repo, cfg.unwrap(), &branch_name, oid, force_push)?; + drop(repo); + drop(temp_dir); + let _ = self.clear_dirty(workspace_id).await; + return Ok(GitSyncOutcome { + files_changed: 0, + commit_hash: Some(encode_commit_id(&latest.commit_id)), + pushed, + message: if pushed { + "push completed".to_string() + } else { + "nothing to push".to_string() + }, + }); + } + } + } + // Nothing to commit/push: clear any leftover dirty and exit. + let _ = self.clear_dirty(workspace_id).await; + return Ok(GitSyncOutcome { + files_changed: 0, + commit_hash: latest_meta.map(|c| encode_commit_id(&c.commit_id)), + pushed: false, + message: "nothing to commit".to_string(), + }); + } + + let committed_at = Utc::now(); + let author_name = "RefMD".to_string(); + let author_email = "refmd@example.com".to_string(); + let message = req + .message + .clone() + .unwrap_or_else(|| "RefMD sync".to_string()); + + let precompute = self + .sync_precompute_tree_inputs( + workspace_id, + use_full_scan, + previous_index.clone(), + &upserts, + &deletes, + ) + .await?; + let mut precomputed_full_entries = precompute.precomputed_full_entries; + let precomputed_upsert_bytes = precompute.precomputed_upsert_bytes; + let changed_text_snapshots = precompute.changed_text_snapshots; + let mut next_file_hash_index = precompute.next_file_hash_index; + let mut files_changed_for_response = precompute.files_changed_for_response; + + // Ensure full-scan entries are available before we touch libgit2 types. + if use_full_scan && precomputed_full_entries.is_none() { + next_file_hash_index.clear(); + let current = self.collect_current_state(workspace_id).await?; + let mut entries: BTreeMap> = BTreeMap::new(); + for (path, snapshot) in current.iter() { + let bytes = self.snapshot_bytes(snapshot).await?; + entries.insert(path.clone(), bytes); + next_file_hash_index.insert(path.clone(), snapshot.hash.clone()); + } + files_changed_for_response = next_file_hash_index.len() as u32; + precomputed_full_entries = Some(entries); + } + + let mut previous_pack = self + .sync_load_previous_pack_chain(workspace_id, cfg, &mut latest_meta) + .await?; + + let (meta, pack_bytes, commit_hex, pushed) = { + let temp_dir = TempDirBuilder::new() + .prefix("git-sync-") + .tempdir() + .map_err(|e| anyhow::anyhow!(e))?; + let repo = Repository::init_bare(temp_dir.path())?; + + if let Some((_, ref pack_paths)) = previous_pack { + // Apply full chain to ensure delta bases are present. + if let Err(err) = apply_pack_files(&repo, pack_paths) { + let lower = err.to_string().to_lowercase(); + let missing_obj = lower.contains("missing") && lower.contains("object"); + if !missing_obj { + return Err(err); + } + + // Try to repair packs by re-bootstrap from remote, then retry apply once more. + warn!( + workspace_id = %workspace_id, + error = %err, + "git_sync_pack_missing_objects_retry_bootstrap" + ); + if let Some(cfg) = cfg { + if !cfg.repository_url.is_empty() { + previous_pack = self + .sync_rebuild_pack_chain_from_remote( + workspace_id, + cfg, + &branch_name, + latest_meta.as_ref(), + ) + .await?; + if let Some((_, ref pack_paths_retry)) = previous_pack { + if apply_pack_files(&repo, pack_paths_retry).is_err() { + // Last resort: recover objects and retry once more. + warn!( + workspace_id = %workspace_id, + "git_sync_pack_retry_still_missing_recovering_objects" + ); + previous_pack = self + .sync_recover_objects_and_reload_pack_chain( + workspace_id, + cfg, + &mut latest_meta, + ) + .await?; + if let Some((_, ref pack_paths_retry2)) = previous_pack { + apply_pack_files(&repo, pack_paths_retry2)?; + } else { + anyhow::bail!( + "missing pack objects after recovery; pull/import required before sync" + ); + } + } + } else { + anyhow::bail!( + "missing pack objects after bootstrap; pull/import required before sync" + ); + } + } + } + + anyhow::bail!( + "missing pack objects for {}; pull/import to repair history", + latest_meta + .as_ref() + .map(|m| encode_commit_id(&m.commit_id)) + .unwrap_or_else(|| "unknown".to_string()) + ); + } + } + + let full_entries = if use_full_scan { + Some( + precomputed_full_entries + .as_ref() + .ok_or_else(|| anyhow!("full-scan entries missing"))?, + ) + } else { + None + }; + let (meta, pack_bytes, commit_hex, pushed) = Self::sync_build_commit_pack( + workspace_id, + &repo, + latest_meta.as_ref(), + branch_name.as_str(), + author_name.as_str(), + author_email.as_str(), + committed_at, + message.as_str(), + use_full_scan, + full_entries, + &deletes, + &precomputed_upsert_bytes, + next_file_hash_index, + cfg, + skip_push, + force_push, + )?; + + drop(repo); + let _ = temp_dir.close(); + + // files_changed_for_response computed earlier + + (meta, pack_bytes, commit_hex, pushed) + }; + + if let Some((dir, _)) = previous_pack { + drop(dir); + } + + // If push to a configured remote failed, do not advance local commit pointers or clear dirty state. + // Leave files as-is so the next sync attempt will retry the push instead of treating the workspace as clean. + if push_required && !pushed { + return Ok(GitSyncOutcome { + files_changed: files_changed_for_response, + commit_hash: None, + pushed: false, + message: "commit created (push failed)".to_string(), + }); + } + + self.sync_persist_commit( + workspace_id, + use_full_scan, + &meta, + &pack_bytes, + &changed_text_snapshots, + latest_meta.as_ref(), + ) + .await?; + let outcome_message = if pushed { + "sync completed".to_string() + } else if skip_push { + "sync completed (push skipped)".to_string() + } else { + "commit created (push failed)".to_string() + }; + + Ok(GitSyncOutcome { + files_changed: files_changed_for_response, + commit_hash: Some(commit_hex), + pushed, + message: outcome_message, + }) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/sync/pack_chain.rs b/api/crates/infrastructure/src/git/workspace/sync/pack_chain.rs new file mode 100644 index 00000000..86a4f431 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/sync/pack_chain.rs @@ -0,0 +1,80 @@ +impl GitWorkspaceService { + async fn sync_load_previous_pack_chain( + &self, + workspace_id: Uuid, + cfg: Option<&UserGitCfg>, + latest_meta: &mut Option, + ) -> anyhow::Result)>> { + let Some(prev_meta) = latest_meta.as_ref() else { + return Ok(None); + }; + let prev_commit_hex = encode_commit_id(&prev_meta.commit_id); + match self + .persist_pack_chain(workspace_id, Some(prev_meta.commit_id.as_slice())) + .await? + { + Some(chain) => Ok(Some(chain)), + None => { + // Attempt to repair from remote and retry once. + if let Some(cfg) = cfg { + if !cfg.repository_url.is_empty() { + warn!( + workspace_id = %workspace_id, + commit = %prev_commit_hex, + "git_sync_missing_pack_chain_recover" + ); + self.recover_missing_objects(workspace_id, cfg).await?; + *latest_meta = self.ensure_latest_meta(workspace_id).await?; + if let Some(latest) = latest_meta.as_ref() { + let chain = self + .persist_pack_chain( + workspace_id, + Some(latest.commit_id.as_slice()), + ) + .await?; + if chain.is_some() { + return Ok(chain); + } + } + } + } + warn!(workspace_id = %workspace_id, "git_sync_missing_pack_chain_abort"); + anyhow::bail!( + "missing pack data for current head {}; pull/import required before sync", + prev_commit_hex + ); + } + } + } + + async fn sync_rebuild_pack_chain_from_remote( + &self, + workspace_id: Uuid, + cfg: &UserGitCfg, + branch_name: &str, + latest_meta: Option<&CommitMeta>, + ) -> anyhow::Result)>> { + self.bootstrap_remote_history(workspace_id, cfg, branch_name) + .await?; + self.persist_pack_chain( + workspace_id, + latest_meta.map(|m| m.commit_id.as_slice()), + ) + .await + } + + async fn sync_recover_objects_and_reload_pack_chain( + &self, + workspace_id: Uuid, + cfg: &UserGitCfg, + latest_meta: &mut Option, + ) -> anyhow::Result)>> { + self.recover_missing_objects(workspace_id, cfg).await?; + *latest_meta = self.ensure_latest_meta(workspace_id).await?; + self.persist_pack_chain( + workspace_id, + latest_meta.as_ref().map(|m| m.commit_id.as_slice()), + ) + .await + } +} diff --git a/api/crates/infrastructure/src/git/workspace/sync/persist.rs b/api/crates/infrastructure/src/git/workspace/sync/persist.rs new file mode 100644 index 00000000..8774cc3a --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/sync/persist.rs @@ -0,0 +1,131 @@ +impl GitWorkspaceService { + async fn sync_persist_commit( + &self, + workspace_id: Uuid, + use_full_scan: bool, + meta: &CommitMeta, + pack_bytes: &[u8], + changed_text_snapshots: &HashMap, + latest_meta_for_rollback: Option<&CommitMeta>, + ) -> anyhow::Result<()> { + let mut tx = self.pool.begin().await?; + let repo_row = + sqlx::query("SELECT initialized FROM git_repository_state WHERE workspace_id = $1") + .bind(workspace_id) + .fetch_optional(&mut *tx) + .await?; + let Some(repo_row) = repo_row else { + tx.rollback().await.ok(); + anyhow::bail!("repository not initialized") + }; + let initialized: bool = repo_row.get("initialized"); + if !initialized { + tx.rollback().await.ok(); + anyhow::bail!("repository not initialized") + } + + sqlx::query( + r#"INSERT INTO git_commits ( + commit_id, + parent_commit_id, + workspace_id, + message, + author_name, + author_email, + committed_at, + pack_key, + file_hash_index + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)"#, + ) + .bind(meta.commit_id.clone()) + .bind(meta.parent_commit_id.clone()) + .bind(workspace_id) + .bind(meta.message.clone()) + .bind(meta.author_name.clone()) + .bind(meta.author_email.clone()) + .bind(meta.committed_at) + .bind(meta.pack_key.clone()) + .bind(Json(&meta.file_hash_index)) + .execute(&mut *tx) + .await?; + + sqlx::query("UPDATE git_repository_state SET updated_at = now() WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&mut *tx) + .await?; + + let snapshot_keys = if use_full_scan { + let current = self.collect_current_state(workspace_id).await?; + match self + .store_commit_snapshots(workspace_id, &meta.commit_id, ¤t) + .await + { + Ok(keys) => keys, + Err(err) => { + tx.rollback().await.ok(); + return Err(err); + } + } + } else { + match self + .store_commit_snapshots(workspace_id, &meta.commit_id, changed_text_snapshots) + .await + { + Ok(keys) => keys, + Err(err) => { + tx.rollback().await.ok(); + return Err(err); + } + } + }; + + if let Err(err) = self + .git_storage + .store_pack(workspace_id, pack_bytes, meta) + .await + { + for key in snapshot_keys.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + tx.rollback().await.ok(); + return Err(err.into()); + } + + if let Err(err) = self + .git_storage + .set_latest_commit(workspace_id, Some(meta)) + .await + { + let _ = self + .git_storage + .delete_pack(workspace_id, &meta.commit_id) + .await; + for key in snapshot_keys.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + tx.rollback().await.ok(); + return Err(err.into()); + } + + if let Err(err) = tx.commit().await { + let _ = self + .git_storage + .delete_pack(workspace_id, &meta.commit_id) + .await; + for key in snapshot_keys.iter().rev() { + let _ = self.git_storage.delete_blob(key).await; + } + let _ = self + .git_storage + .set_latest_commit(workspace_id, latest_meta_for_rollback) + .await; + return Err(err.into()); + } + + self.clear_dirty(workspace_id).await.map_err(|err| { + error!(workspace_id = %workspace_id, error = %err, "git_import_clear_dirty_failed"); + err + })?; + Ok(()) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/sync/precompute.rs b/api/crates/infrastructure/src/git/workspace/sync/precompute.rs new file mode 100644 index 00000000..96522526 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/sync/precompute.rs @@ -0,0 +1,106 @@ +impl GitWorkspaceService { + async fn sync_precompute_tree_inputs( + &self, + workspace_id: Uuid, + use_full_scan: bool, + previous_index: HashMap, + upserts: &BTreeMap, + deletes: &BTreeSet, + ) -> anyhow::Result { + // Precompute data needed for tree build and meta before creating libgit2 objects. + // This avoids holding non-Send libgit2 types across await points. + let mut precomputed_full_entries: Option>> = None; + let mut precomputed_upsert_bytes: BTreeMap> = BTreeMap::new(); + let mut changed_text_snapshots: HashMap = HashMap::new(); + let mut next_file_hash_index: HashMap = previous_index; + let files_changed_for_response: u32; + + if use_full_scan { + next_file_hash_index.clear(); + let current = self.collect_current_state(workspace_id).await?; + let mut entries: BTreeMap> = BTreeMap::new(); + for (path, snapshot) in current.iter() { + let bytes = self.snapshot_bytes(snapshot).await?; + entries.insert(path.clone(), bytes); + next_file_hash_index.insert(path.clone(), snapshot.hash.clone()); + } + files_changed_for_response = next_file_hash_index.len() as u32; + precomputed_full_entries = Some(entries); + } else { + let mut stale_paths: Vec = Vec::new(); + for (path, up) in upserts.iter() { + if up.is_text { + match self + .export_markdown_for_repo_path(workspace_id, path) + .await? + { + Some((bytes, hash)) => { + precomputed_upsert_bytes.insert(path.clone(), bytes.clone()); + next_file_hash_index.insert(path.clone(), hash.clone()); + changed_text_snapshots.insert( + path.clone(), + FileSnapshot { + hash, + data: FileSnapshotData::Inline(bytes), + is_text: true, + }, + ); + } + None => { + stale_paths.push(path.clone()); + } + } + continue; + } + + let storage_rel = format!("{}/{}", workspace_id, path); + let abs = self.storage.absolute_from_relative(&storage_rel); + match self.storage.read_bytes(abs.as_path()).await { + Ok(bytes) => { + precomputed_upsert_bytes.insert(path.clone(), bytes.clone()); + let hash = match up.content_hash.as_ref() { + Some(h) => h.clone(), + None => sha256_hex(&bytes), + }; + next_file_hash_index.insert(path.clone(), hash); + } + Err(e) => { + let skip = e + .downcast_ref::() + .map(|ioe| ioe.kind() == ErrorKind::NotFound) + .unwrap_or_else(|| e.to_string().to_lowercase().contains("not found")); + if skip { + stale_paths.push(path.clone()); + continue; + } else { + return Err(e.into()); + } + } + } + } + if !stale_paths.is_empty() { + for p in stale_paths { + let _ = sqlx::query( + "DELETE FROM git_dirty_files WHERE workspace_id = $1 AND path = $2", + ) + .bind(workspace_id) + .bind(&p) + .execute(&self.pool) + .await; + } + } + for d in deletes.iter() { + next_file_hash_index.remove(d); + } + files_changed_for_response = (upserts.len() + deletes.len()) as u32; + } + + Ok(SyncPrecompute { + precomputed_full_entries, + precomputed_upsert_bytes, + changed_text_snapshots, + next_file_hash_index, + files_changed_for_response, + }) + } +} diff --git a/api/crates/infrastructure/src/git/workspace/workspace_service.rs b/api/crates/infrastructure/src/git/workspace/workspace_service.rs new file mode 100644 index 00000000..d1053d18 --- /dev/null +++ b/api/crates/infrastructure/src/git/workspace/workspace_service.rs @@ -0,0 +1,3 @@ +include!("service/history.rs"); +include!("service/state.rs"); +include!("service/synthetic.rs"); diff --git a/api/src/infrastructure/auth/github.rs b/api/crates/infrastructure/src/identity/auth/github.rs similarity index 97% rename from api/src/infrastructure/auth/github.rs rename to api/crates/infrastructure/src/identity/auth/github.rs index 797107e4..574dc017 100644 --- a/api/src/infrastructure/auth/github.rs +++ b/api/crates/infrastructure/src/identity/auth/github.rs @@ -3,11 +3,11 @@ use reqwest::header::{ACCEPT, HeaderMap, HeaderValue, USER_AGENT}; use serde::Deserialize; use tracing::warn; -use crate::application::services::auth::external::{ +use application::core::services::errors::ServiceError; +use application::identity::services::auth::external::{ ExternalAuthIdentity, ExternalAuthPayload, ExternalAuthProviderDescriptor, ExternalAuthProviderKind, ExternalAuthVerifier, }; -use crate::application::services::errors::ServiceError; const AUTH_URL: &str = "https://github.com/login/oauth/authorize"; const TOKEN_URL: &str = "https://github.com/login/oauth/access_token"; @@ -238,7 +238,7 @@ impl ExternalAuthVerifier for GithubOAuthProvider { subject: user.id.to_string(), email: Some(email), email_verified: true, - name: user.name.or_else(|| Some(user.login)), + name: user.name.or(Some(user.login)), avatar_url: user.avatar_url, }) } diff --git a/api/src/infrastructure/auth/google.rs b/api/crates/infrastructure/src/identity/auth/google.rs similarity index 94% rename from api/src/infrastructure/auth/google.rs rename to api/crates/infrastructure/src/identity/auth/google.rs index 7db44d6e..91dfbc02 100644 --- a/api/src/infrastructure/auth/google.rs +++ b/api/crates/infrastructure/src/identity/auth/google.rs @@ -4,11 +4,11 @@ use async_trait::async_trait; use serde::Deserialize; use tracing::warn; -use crate::application::services::auth::external::{ +use application::core::services::errors::ServiceError; +use application::identity::services::auth::external::{ ExternalAuthIdentity, ExternalAuthPayload, ExternalAuthProviderDescriptor, ExternalAuthProviderKind, ExternalAuthVerifier, }; -use crate::application::services::errors::ServiceError; const TOKENINFO_URL: &str = "https://oauth2.googleapis.com/tokeninfo"; @@ -56,10 +56,10 @@ struct GoogleTokenInfo { } fn parse_email_verified(value: Option) -> bool { - match value.unwrap_or_default().to_lowercase().as_str() { - "true" | "1" | "yes" => true, - _ => false, - } + matches!( + value.unwrap_or_default().to_lowercase().as_str(), + "true" | "1" | "yes" + ) } #[async_trait] diff --git a/api/src/infrastructure/auth/mod.rs b/api/crates/infrastructure/src/identity/auth/mod.rs similarity index 100% rename from api/src/infrastructure/auth/mod.rs rename to api/crates/infrastructure/src/identity/auth/mod.rs diff --git a/api/src/infrastructure/auth/oidc.rs b/api/crates/infrastructure/src/identity/auth/oidc.rs similarity index 94% rename from api/src/infrastructure/auth/oidc.rs rename to api/crates/infrastructure/src/identity/auth/oidc.rs index ad4ea1c9..4a96c0ac 100644 --- a/api/src/infrastructure/auth/oidc.rs +++ b/api/crates/infrastructure/src/identity/auth/oidc.rs @@ -3,12 +3,22 @@ use reqwest::Url; use serde::Deserialize; use tracing::warn; -use crate::application::services::auth::external::{ +use application::core::services::errors::ServiceError; +use application::identity::services::auth::external::{ ExternalAuthIdentity, ExternalAuthPayload, ExternalAuthProviderDescriptor, ExternalAuthProviderKind, ExternalAuthVerifier, }; -use crate::application::services::errors::ServiceError; -use crate::bootstrap::config::OidcOAuthConfig; + +#[derive(Debug, Clone)] +pub struct OidcOAuthProviderConfig { + pub issuer_url: String, + pub discovery_url: Option, + pub client_id: String, + pub client_secret: String, + pub redirect_uri: Option, + pub scopes: Vec, + pub display_name: Option, +} #[derive(Debug, Clone)] pub struct OidcIdentityProvider { @@ -91,8 +101,8 @@ fn infer_display_name(issuer: &str) -> Option { } impl OidcIdentityProvider { - pub async fn discover(cfg: OidcOAuthConfig) -> anyhow::Result { - let OidcOAuthConfig { + pub async fn discover(cfg: OidcOAuthProviderConfig) -> anyhow::Result { + let OidcOAuthProviderConfig { issuer_url, discovery_url, client_id, diff --git a/api/crates/infrastructure/src/identity/crypto.rs b/api/crates/infrastructure/src/identity/crypto.rs new file mode 100644 index 00000000..10f332f8 --- /dev/null +++ b/api/crates/infrastructure/src/identity/crypto.rs @@ -0,0 +1,35 @@ +use application::core::ports::errors::PortResult; +use application::identity::ports::secret_hasher::SecretHasher; +use argon2::{ + Argon2, + password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString}, +}; +use password_hash::rand_core::OsRng; + +#[derive(Debug, Default)] +pub struct Argon2SecretHasher; + +impl SecretHasher for Argon2SecretHasher { + fn hash_secret(&self, secret: &str) -> PortResult { + let out: anyhow::Result = (|| { + let salt = SaltString::generate(&mut OsRng); + let hash = Argon2::default() + .hash_password(secret.as_bytes(), &salt) + .map_err(|e| anyhow::anyhow!(e.to_string()))? + .to_string(); + Ok(hash) + })(); + out.map_err(Into::into) + } + + fn verify_secret(&self, secret: &str, secret_hash: &str) -> PortResult { + let out: anyhow::Result = (|| { + let parsed = + PasswordHash::new(secret_hash).map_err(|e| anyhow::anyhow!(e.to_string()))?; + Ok(Argon2::default() + .verify_password(secret.as_bytes(), &parsed) + .is_ok()) + })(); + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/identity/db/mod.rs b/api/crates/infrastructure/src/identity/db/mod.rs new file mode 100644 index 00000000..21b552a0 --- /dev/null +++ b/api/crates/infrastructure/src/identity/db/mod.rs @@ -0,0 +1 @@ +pub mod repositories; diff --git a/api/crates/infrastructure/src/identity/db/repositories/api_token_repository_sqlx/mod.rs b/api/crates/infrastructure/src/identity/db/repositories/api_token_repository_sqlx/mod.rs new file mode 100644 index 00000000..c889129d --- /dev/null +++ b/api/crates/infrastructure/src/identity/db/repositories/api_token_repository_sqlx/mod.rs @@ -0,0 +1,150 @@ +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::identity::ports::api_token_repository::{ + ApiToken, ApiTokenRepository, ApiTokenSecret, +}; + +pub struct SqlxApiTokenRepository { + pool: PgPool, +} + +impl SqlxApiTokenRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl ApiTokenRepository for SqlxApiTokenRepository { + async fn create( + &self, + workspace_id: Uuid, + owner_id: Uuid, + name: &str, + token_hash: &str, + token_digest: &str, + ) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query( + r#"INSERT INTO api_tokens (workspace_id, owner_id, name, token_hash, token_digest) + VALUES ($1, $2, $3, $4, $5) + RETURNING id, workspace_id, owner_id, name, created_at, last_used_at, revoked_at"#, + ) + .bind(workspace_id) + .bind(owner_id) + .bind(name) + .bind(token_hash) + .bind(token_digest) + .fetch_one(&self.pool) + .await?; + + Ok(ApiToken { + id: row.get("id"), + workspace_id: row.get("workspace_id"), + owner_id: row.get("owner_id"), + name: row.get("name"), + created_at: row.get("created_at"), + last_used_at: row.try_get("last_used_at").ok(), + revoked_at: row.try_get("revoked_at").ok(), + }) + } + .await; + out.map_err(Into::into) + } + + async fn list_active(&self, workspace_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT id, workspace_id, owner_id, name, created_at, last_used_at, revoked_at + FROM api_tokens + WHERE workspace_id = $1 + ORDER BY created_at DESC"#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + + Ok(rows + .into_iter() + .map(|row| ApiToken { + id: row.get("id"), + workspace_id: row.get("workspace_id"), + owner_id: row.get("owner_id"), + name: row.get("name"), + created_at: row.get("created_at"), + last_used_at: row.try_get("last_used_at").ok(), + revoked_at: row.try_get("revoked_at").ok(), + }) + .collect()) + } + .await; + out.map_err(Into::into) + } + + async fn revoke(&self, workspace_id: Uuid, token_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query( + r#"UPDATE api_tokens + SET revoked_at = now() + WHERE id = $1 AND workspace_id = $2 AND revoked_at IS NULL + RETURNING id"#, + ) + .bind(token_id) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + Ok(row.is_some()) + } + .await; + out.map_err(Into::into) + } + + async fn find_by_digest(&self, digest: &str) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT id, workspace_id, owner_id, name, created_at, last_used_at, revoked_at, token_hash, token_digest + FROM api_tokens + WHERE token_digest = $1 + LIMIT 1"#, + ) + .bind(digest) + .fetch_optional(&self.pool) + .await?; + + Ok(row.map(|row| { + let token = ApiToken { + id: row.get("id"), + workspace_id: row.get("workspace_id"), + owner_id: row.get("owner_id"), + name: row.get("name"), + created_at: row.get("created_at"), + last_used_at: row.try_get("last_used_at").ok(), + revoked_at: row.try_get("revoked_at").ok(), + }; + ApiTokenSecret { + token, + token_hash: row.get("token_hash"), + token_digest: row.get("token_digest"), + } + })) + } + .await; + out.map_err(Into::into) + } + + async fn touch_last_used(&self, token_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query("UPDATE api_tokens SET last_used_at = now() WHERE id = $1") + .bind(token_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/identity/db/repositories/mod.rs b/api/crates/infrastructure/src/identity/db/repositories/mod.rs new file mode 100644 index 00000000..2e6528ce --- /dev/null +++ b/api/crates/infrastructure/src/identity/db/repositories/mod.rs @@ -0,0 +1,4 @@ +pub mod api_token_repository_sqlx; +pub mod user_repository_sqlx; +pub mod user_session_repository_sqlx; +pub mod user_shortcut_repository_sqlx; diff --git a/api/crates/infrastructure/src/identity/db/repositories/user_repository_sqlx/mod.rs b/api/crates/infrastructure/src/identity/db/repositories/user_repository_sqlx/mod.rs new file mode 100644 index 00000000..d58e9da6 --- /dev/null +++ b/api/crates/infrastructure/src/identity/db/repositories/user_repository_sqlx/mod.rs @@ -0,0 +1,160 @@ +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::identity::ports::user_repository::{UserRepository, UserRow}; + +pub struct SqlxUserRepository { + pub pool: PgPool, +} + +impl SqlxUserRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl UserRepository for SqlxUserRepository { + async fn create_user( + &self, + id: Uuid, + email: &str, + name: &str, + password_hash: Option<&str>, + default_workspace_id: Uuid, + ) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query( + r#"INSERT INTO users (id, email, name, password_hash, default_workspace_id) + VALUES ($1, $2, $3, $4, $5) + RETURNING id, email, name, password_hash"#, + ) + .bind(id) + .bind(email) + .bind(name) + .bind(password_hash) + .bind(default_workspace_id) + .fetch_one(&self.pool) + .await?; + Ok(UserRow { + id: row.get("id"), + email: row.get("email"), + name: row.get("name"), + password_hash: row.try_get("password_hash").ok(), + }) + } + .await; + out.map_err(Into::into) + } + + async fn find_by_email(&self, email: &str) -> PortResult> { + let out: anyhow::Result> = async { + let row = + sqlx::query(r#"SELECT id, email, name, password_hash FROM users WHERE email = $1"#) + .bind(email) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| UserRow { + id: r.get("id"), + email: r.get("email"), + name: r.get("name"), + password_hash: r.try_get("password_hash").ok(), + })) + } + .await; + out.map_err(Into::into) + } + + async fn find_by_external_identity( + &self, + provider: &str, + subject: &str, + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT u.id, u.email, u.name, u.password_hash + FROM user_external_accounts a + JOIN users u ON u.id = a.user_id + WHERE a.provider = $1 AND a.subject = $2"#, + ) + .bind(provider) + .bind(subject) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| UserRow { + id: r.get("id"), + email: r.get("email"), + name: r.get("name"), + password_hash: r.try_get("password_hash").ok(), + })) + } + .await; + out.map_err(Into::into) + } + + async fn find_by_id(&self, id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query(r#"SELECT id, email, name FROM users WHERE id = $1"#) + .bind(id) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| UserRow { + id: r.get("id"), + email: r.get("email"), + name: r.get("name"), + password_hash: None, + })) + } + .await; + out.map_err(Into::into) + } + + async fn link_external_identity( + &self, + user_id: Uuid, + provider: &str, + subject: &str, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#"INSERT INTO user_external_accounts (user_id, provider, subject) + VALUES ($1, $2, $3) + ON CONFLICT (provider, subject) DO UPDATE SET user_id = EXCLUDED.user_id"#, + ) + .bind(user_id) + .bind(provider) + .bind(subject) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn delete_user(&self, id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let res = sqlx::query("DELETE FROM users WHERE id = $1") + .bind(id) + .execute(&self.pool) + .await?; + Ok(res.rows_affected() > 0) + } + .await; + out.map_err(Into::into) + } + + async fn list_user_ids(&self) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query("SELECT id FROM users") + .fetch_all(&self.pool) + .await?; + Ok(rows.into_iter().map(|r| r.get("id")).collect()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/identity/db/repositories/user_session_repository_sqlx/mod.rs b/api/crates/infrastructure/src/identity/db/repositories/user_session_repository_sqlx/mod.rs new file mode 100644 index 00000000..9209cc0d --- /dev/null +++ b/api/crates/infrastructure/src/identity/db/repositories/user_session_repository_sqlx/mod.rs @@ -0,0 +1,281 @@ +use async_trait::async_trait; +use chrono::{DateTime, Utc}; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::identity::ports::user_session_repository::{ + UserSessionRecord, UserSessionRepository, UserSessionSecret, +}; + +pub struct SqlxUserSessionRepository { + pool: PgPool, +} + +impl SqlxUserSessionRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } + + fn map_record(row: sqlx::postgres::PgRow) -> UserSessionRecord { + UserSessionRecord { + id: row.get("id"), + user_id: row.get("user_id"), + workspace_id: row.get("workspace_id"), + user_agent: row.try_get("user_agent").ok(), + ip_address: row.try_get("ip_address").ok(), + remember_me: row.get("remember_me"), + created_at: row.get("created_at"), + last_seen_at: row.get("last_seen_at"), + expires_at: row.get("expires_at"), + revoked_at: row.try_get("revoked_at").ok(), + } + } +} + +#[async_trait] +impl UserSessionRepository for SqlxUserSessionRepository { + async fn create( + &self, + user_id: Uuid, + workspace_id: Uuid, + token_hash: &str, + token_digest: &str, + expires_at: DateTime, + remember_me: bool, + user_agent: Option<&str>, + ip_address: Option<&str>, + ) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query( + r#"INSERT INTO user_sessions + (user_id, workspace_id, token_hash, token_digest, expires_at, remember_me, user_agent, ip_address) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + RETURNING id, user_id, workspace_id, user_agent, ip_address, remember_me, created_at, last_seen_at, expires_at, revoked_at"#, + ) + .bind(user_id) + .bind(workspace_id) + .bind(token_hash) + .bind(token_digest) + .bind(expires_at) + .bind(remember_me) + .bind(user_agent) + .bind(ip_address) + .fetch_one(&self.pool) + .await?; + + Ok(Self::map_record(row)) + } + .await; + out.map_err(Into::into) + } + + async fn find_by_digest(&self, token_digest: &str) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT id, user_id, workspace_id, user_agent, ip_address, remember_me, + created_at, last_seen_at, expires_at, revoked_at, token_hash, token_digest + FROM user_sessions + WHERE token_digest = $1 + LIMIT 1"#, + ) + .bind(token_digest) + .fetch_optional(&self.pool) + .await?; + + Ok(row.map(|row| UserSessionSecret { + token_hash: row.get("token_hash"), + token_digest: row.get("token_digest"), + session: Self::map_record(row), + })) + } + .await; + out.map_err(Into::into) + } + + async fn update_token( + &self, + session_id: Uuid, + expected_token_digest: &str, + token_hash: &str, + token_digest: &str, + expires_at: DateTime, + user_agent: Option<&str>, + ip_address: Option<&str>, + workspace_id: Option, + ) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query( + r#"UPDATE user_sessions + SET token_hash = $2, + token_digest = $3, + expires_at = $4, + last_seen_at = now(), + user_agent = $5, + ip_address = $6, + workspace_id = COALESCE($8, workspace_id) + WHERE id = $1 + AND revoked_at IS NULL + AND token_digest = $7 + RETURNING id"#, + ) + .bind(session_id) + .bind(token_hash) + .bind(token_digest) + .bind(expires_at) + .bind(user_agent) + .bind(ip_address) + .bind(expected_token_digest) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + + Ok(row.is_some()) + } + .await; + out.map_err(Into::into) + } + + async fn update_workspace(&self, session_id: Uuid, workspace_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query( + r#"UPDATE user_sessions + SET workspace_id = $2 + WHERE id = $1 AND revoked_at IS NULL + RETURNING id"#, + ) + .bind(session_id) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + + Ok(row.is_some()) + } + .await; + out.map_err(Into::into) + } + + async fn touch(&self, session_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query("UPDATE user_sessions SET last_seen_at = now() WHERE id = $1") + .bind(session_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn list_for_user(&self, user_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT id, user_id, workspace_id, user_agent, ip_address, remember_me, + created_at, last_seen_at, expires_at, revoked_at + FROM user_sessions + WHERE user_id = $1 + ORDER BY last_seen_at DESC"#, + ) + .bind(user_id) + .fetch_all(&self.pool) + .await?; + + Ok(rows.into_iter().map(Self::map_record).collect()) + } + .await; + out.map_err(Into::into) + } + + async fn find_by_id(&self, session_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT id, user_id, workspace_id, user_agent, ip_address, remember_me, + created_at, last_seen_at, expires_at, revoked_at + FROM user_sessions + WHERE id = $1 + LIMIT 1"#, + ) + .bind(session_id) + .fetch_optional(&self.pool) + .await?; + + Ok(row.map(Self::map_record)) + } + .await; + out.map_err(Into::into) + } + + async fn revoke(&self, session_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let affected = sqlx::query( + r#"UPDATE user_sessions + SET revoked_at = now() + WHERE id = $1 AND revoked_at IS NULL"#, + ) + .bind(session_id) + .execute(&self.pool) + .await?; + Ok(affected.rows_affected() > 0) + } + .await; + out.map_err(Into::into) + } + + async fn revoke_by_digest(&self, token_digest: &str) -> PortResult { + let out: anyhow::Result = async { + let affected = sqlx::query( + r#"UPDATE user_sessions + SET revoked_at = now() + WHERE token_digest = $1 AND revoked_at IS NULL"#, + ) + .bind(token_digest) + .execute(&self.pool) + .await?; + Ok(affected.rows_affected() > 0) + } + .await; + out.map_err(Into::into) + } + + async fn revoke_all_for_user(&self, user_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#"UPDATE user_sessions + SET revoked_at = now() + WHERE user_id = $1 AND revoked_at IS NULL"#, + ) + .bind(user_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn delete_expired(&self, before: DateTime, batch_size: i64) -> PortResult { + let out: anyhow::Result = async { + let rows = sqlx::query( + r#"WITH expired AS ( + SELECT id + FROM user_sessions + WHERE expires_at < $1 + ORDER BY expires_at ASC + LIMIT $2 + ) + DELETE FROM user_sessions + WHERE id IN (SELECT id FROM expired) + RETURNING 1"#, + ) + .bind(before) + .bind(batch_size) + .fetch_all(&self.pool) + .await?; + + Ok(rows.len() as u64) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/identity/db/repositories/user_shortcut_repository_sqlx/mod.rs b/api/crates/infrastructure/src/identity/db/repositories/user_shortcut_repository_sqlx/mod.rs new file mode 100644 index 00000000..cbe1fbca --- /dev/null +++ b/api/crates/infrastructure/src/identity/db/repositories/user_shortcut_repository_sqlx/mod.rs @@ -0,0 +1,79 @@ +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::identity::ports::user_shortcuts::user_shortcut_repository::{ + UserShortcutProfile, UserShortcutRepository, +}; + +pub struct SqlxUserShortcutRepository { + pool: PgPool, +} + +impl SqlxUserShortcutRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl UserShortcutRepository for SqlxUserShortcutRepository { + async fn get_by_user(&self, user_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT user_id, bindings, leader_key, updated_at + FROM user_shortcuts + WHERE user_id = $1 + LIMIT 1"#, + ) + .bind(user_id) + .fetch_optional(&self.pool) + .await?; + + Ok(row.map(|row| UserShortcutProfile { + user_id: row.get("user_id"), + bindings: row.get("bindings"), + leader_key: row.try_get("leader_key").ok(), + updated_at: row.get("updated_at"), + })) + } + .await; + out.map_err(Into::into) + } + + async fn upsert( + &self, + user_id: Uuid, + bindings: serde_json::Value, + leader_key: Option, + ) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query( + r#"INSERT INTO user_shortcuts (user_id, bindings, leader_key, updated_at) + VALUES ($1, $2, $3, now()) + ON CONFLICT (user_id) + DO UPDATE SET + bindings = EXCLUDED.bindings, + leader_key = EXCLUDED.leader_key, + updated_at = now() + RETURNING user_id, bindings, leader_key, updated_at"#, + ) + .bind(user_id) + .bind(bindings) + .bind(leader_key) + .fetch_one(&self.pool) + .await?; + + Ok(UserShortcutProfile { + user_id: row.get("user_id"), + bindings: row.get("bindings"), + leader_key: row.try_get("leader_key").ok(), + updated_at: row.get("updated_at"), + }) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/identity/jwt.rs b/api/crates/infrastructure/src/identity/jwt.rs new file mode 100644 index 00000000..96397ec8 --- /dev/null +++ b/api/crates/infrastructure/src/identity/jwt.rs @@ -0,0 +1,89 @@ +use application::identity::ports::jwt_codec::{ + JwtClaims, JwtCodec, JwtDecodeError, JwtEncodeError, +}; +use jsonwebtoken::errors::ErrorKind; +use jsonwebtoken::{DecodingKey, EncodingKey, Header, Validation}; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +#[derive(Debug, Clone)] +pub struct Hs256JwtCodec { + secret: String, +} + +impl Hs256JwtCodec { + pub fn new(secret: impl Into) -> Self { + Self { + secret: secret.into(), + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +struct WireClaims { + sub: String, + #[serde(default)] + workspace_id: Option, + #[serde(default)] + iat: usize, + exp: usize, + #[serde(default, skip_serializing_if = "Option::is_none")] + sid: Option, +} + +impl JwtCodec for Hs256JwtCodec { + fn decode(&self, token: &str) -> Result { + let decoded = jsonwebtoken::decode::( + token, + &DecodingKey::from_secret(self.secret.as_bytes()), + &Validation::default(), + ); + let claims = match decoded { + Ok(data) => data.claims, + Err(err) => { + if matches!(err.kind(), ErrorKind::ExpiredSignature) { + return Err(JwtDecodeError::Expired); + } + return Err(JwtDecodeError::Invalid); + } + }; + + let sub = Uuid::parse_str(&claims.sub).map_err(|_| JwtDecodeError::Invalid)?; + let workspace_id = claims + .workspace_id + .as_deref() + .map(Uuid::parse_str) + .transpose() + .map_err(|_| JwtDecodeError::Invalid)?; + let sid = claims + .sid + .as_deref() + .map(Uuid::parse_str) + .transpose() + .map_err(|_| JwtDecodeError::Invalid)?; + + Ok(JwtClaims { + sub, + workspace_id, + iat: claims.iat, + exp: claims.exp, + sid, + }) + } + + fn encode(&self, claims: &JwtClaims) -> Result { + let wire = WireClaims { + sub: claims.sub.to_string(), + workspace_id: claims.workspace_id.map(|id| id.to_string()), + iat: claims.iat, + exp: claims.exp, + sid: claims.sid.map(|id| id.to_string()), + }; + jsonwebtoken::encode( + &Header::default(), + &wire, + &EncodingKey::from_secret(self.secret.as_bytes()), + ) + .map_err(|_| JwtEncodeError) + } +} diff --git a/api/crates/infrastructure/src/identity/mod.rs b/api/crates/infrastructure/src/identity/mod.rs new file mode 100644 index 00000000..badd40fb --- /dev/null +++ b/api/crates/infrastructure/src/identity/mod.rs @@ -0,0 +1,4 @@ +pub mod auth; +pub mod crypto; +pub mod db; +pub mod jwt; diff --git a/api/crates/infrastructure/src/lib.rs b/api/crates/infrastructure/src/lib.rs new file mode 100644 index 00000000..06ff4e80 --- /dev/null +++ b/api/crates/infrastructure/src/lib.rs @@ -0,0 +1,9 @@ +#![allow(clippy::collapsible_if)] +#![allow(clippy::too_many_arguments)] + +pub mod core; +pub mod documents; +pub mod git; +pub mod identity; +pub mod plugins; +pub mod workspaces; diff --git a/api/crates/infrastructure/src/plugins/db/mod.rs b/api/crates/infrastructure/src/plugins/db/mod.rs new file mode 100644 index 00000000..21b552a0 --- /dev/null +++ b/api/crates/infrastructure/src/plugins/db/mod.rs @@ -0,0 +1 @@ +pub mod repositories; diff --git a/api/crates/infrastructure/src/plugins/db/repositories/mod.rs b/api/crates/infrastructure/src/plugins/db/repositories/mod.rs new file mode 100644 index 00000000..36e68ab7 --- /dev/null +++ b/api/crates/infrastructure/src/plugins/db/repositories/mod.rs @@ -0,0 +1,2 @@ +pub mod plugin_installation_repository_sqlx; +pub mod plugin_repository_sqlx; diff --git a/api/crates/infrastructure/src/plugins/db/repositories/plugin_installation_repository_sqlx/mod.rs b/api/crates/infrastructure/src/plugins/db/repositories/plugin_installation_repository_sqlx/mod.rs new file mode 100644 index 00000000..61c572a1 --- /dev/null +++ b/api/crates/infrastructure/src/plugins/db/repositories/plugin_installation_repository_sqlx/mod.rs @@ -0,0 +1,154 @@ +use async_trait::async_trait; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::plugins::ports::plugin_installation_repository::{ + PluginInstallation, PluginInstallationRepository, +}; +use domain::plugins::scope::{PluginInstallationStatus, PluginScope}; + +pub struct SqlxPluginInstallationRepository { + pub pool: PgPool, +} + +impl SqlxPluginInstallationRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl PluginInstallationRepository for SqlxPluginInstallationRepository { + async fn upsert( + &self, + workspace_id: Uuid, + plugin_id: &str, + version: &str, + scope: PluginScope, + origin_url: Option<&str>, + status: PluginInstallationStatus, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#"INSERT INTO plugin_installations + (workspace_id, plugin_id, version, scope, origin_url, status) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (workspace_id, plugin_id) + DO UPDATE SET + version = EXCLUDED.version, + scope = EXCLUDED.scope, + origin_url = EXCLUDED.origin_url, + status = EXCLUDED.status, + updated_at = now()"#, + ) + .bind(workspace_id) + .bind(plugin_id) + .bind(version) + .bind(scope.as_str()) + .bind(origin_url) + .bind(status.as_str()) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn list_for_workspace(&self, workspace_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT workspace_id, plugin_id, version, scope, origin_url, status, installed_at, updated_at + FROM plugin_installations + WHERE workspace_id = $1"#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + + let mut out = Vec::with_capacity(rows.len()); + for row in rows { + let scope_raw: String = row.get("scope"); + let status_raw: String = row.get("status"); + out.push(PluginInstallation { + workspace_id: row.get("workspace_id"), + plugin_id: row.get("plugin_id"), + version: row.get("version"), + scope: PluginScope::parse(&scope_raw) + .ok_or_else(|| anyhow::anyhow!("invalid_plugin_scope"))?, + origin_url: row.try_get("origin_url").ok(), + status: PluginInstallationStatus::parse(&status_raw) + .ok_or_else(|| anyhow::anyhow!("invalid_plugin_installation_status"))?, + installed_at: row.get("installed_at"), + updated_at: row.get("updated_at"), + }); + } + + Ok(out) + } + .await; + out.map_err(Into::into) + } + + async fn list_all(&self) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT workspace_id, plugin_id, version, scope, origin_url, status, installed_at, updated_at + FROM plugin_installations"#, + ) + .fetch_all(&self.pool) + .await?; + + let mut out = Vec::with_capacity(rows.len()); + for row in rows { + let scope_raw: String = row.get("scope"); + let status_raw: String = row.get("status"); + out.push(PluginInstallation { + workspace_id: row.get("workspace_id"), + plugin_id: row.get("plugin_id"), + version: row.get("version"), + scope: PluginScope::parse(&scope_raw) + .ok_or_else(|| anyhow::anyhow!("invalid_plugin_scope"))?, + origin_url: row.try_get("origin_url").ok(), + status: PluginInstallationStatus::parse(&status_raw) + .ok_or_else(|| anyhow::anyhow!("invalid_plugin_installation_status"))?, + installed_at: row.get("installed_at"), + updated_at: row.get("updated_at"), + }); + } + + Ok(out) + } + .await; + out.map_err(Into::into) + } + + async fn remove(&self, workspace_id: Uuid, plugin_id: &str) -> PortResult { + let out: anyhow::Result = async { + let res = sqlx::query( + "DELETE FROM plugin_installations WHERE workspace_id = $1 AND plugin_id = $2", + ) + .bind(workspace_id) + .bind(plugin_id) + .execute(&self.pool) + .await?; + Ok(res.rows_affected() > 0) + } + .await; + out.map_err(Into::into) + } + + async fn remove_all_for_workspace(&self, workspace_id: Uuid) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query("DELETE FROM plugin_installations WHERE workspace_id = $1") + .bind(workspace_id) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/plugins/db/repositories/plugin_repository_sqlx/mod.rs b/api/crates/infrastructure/src/plugins/db/repositories/plugin_repository_sqlx/mod.rs new file mode 100644 index 00000000..f817f54f --- /dev/null +++ b/api/crates/infrastructure/src/plugins/db/repositories/plugin_repository_sqlx/mod.rs @@ -0,0 +1,274 @@ +use async_trait::async_trait; +use serde_json::Value as JsonValue; +use sqlx::Row; +use uuid::Uuid; + +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::plugins::ports::plugin_repository::{PluginRecord, PluginRepository}; +use domain::plugins::scope::{PluginRecordScope, PluginScope}; + +pub struct SqlxPluginRepository { + pub pool: PgPool, +} + +impl SqlxPluginRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl PluginRepository for SqlxPluginRepository { + async fn kv_get( + &self, + plugin: &str, + scope: PluginScope, + scope_id: Option, + key: &str, + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT value FROM plugin_kv WHERE plugin = $1 AND scope = $2 AND scope_id IS NOT DISTINCT FROM $3 AND key = $4"#, + ) + .bind(plugin) + .bind(scope.as_str()) + .bind(scope_id) + .bind(key) + .fetch_optional(&self.pool) + .await?; + Ok(row.and_then(|r| r.try_get::("value").ok())) + } + .await; + out.map_err(Into::into) + } + + async fn kv_set( + &self, + plugin: &str, + scope: PluginScope, + scope_id: Option, + key: &str, + value: &JsonValue, + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + sqlx::query( + r#"INSERT INTO plugin_kv (plugin, scope, scope_id, key, value) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (plugin, scope, scope_id, key) + DO UPDATE SET value = EXCLUDED.value, updated_at = now()"#, + ) + .bind(plugin) + .bind(scope.as_str()) + .bind(scope_id) + .bind(key) + .bind(value) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn insert_record( + &self, + plugin: &str, + scope: PluginRecordScope, + scope_id: Uuid, + kind: &str, + data: &JsonValue, + ) -> PortResult { + let out: anyhow::Result = async { + let row = sqlx::query( + r#"INSERT INTO plugin_records (plugin, scope, scope_id, kind, data) + VALUES ($1, $2, $3, $4, $5) + RETURNING id, plugin, scope, scope_id, kind, data, created_at, updated_at"#, + ) + .bind(plugin) + .bind(scope.as_str()) + .bind(scope_id) + .bind(kind) + .bind(data) + .fetch_one(&self.pool) + .await?; + let scope_raw: String = row.get("scope"); + let scope = PluginRecordScope::parse(&scope_raw) + .ok_or_else(|| anyhow::anyhow!("invalid_plugin_record_scope"))?; + Ok(PluginRecord { + id: row.get("id"), + plugin: row.get("plugin"), + scope, + scope_id: row.get("scope_id"), + kind: row.get("kind"), + data: row.get("data"), + created_at: row.get("created_at"), + updated_at: row.get("updated_at"), + }) + } + .await; + out.map_err(Into::into) + } + + async fn update_record_data( + &self, + record_id: Uuid, + patch: &JsonValue, + ) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"UPDATE plugin_records SET data = data || $2::jsonb, updated_at = now() + WHERE id = $1 + RETURNING id, plugin, scope, scope_id, kind, data, created_at, updated_at"#, + ) + .bind(record_id) + .bind(patch) + .fetch_optional(&self.pool) + .await?; + row.map(|r| { + let scope_raw: String = r.get("scope"); + let scope = PluginRecordScope::parse(&scope_raw) + .ok_or_else(|| anyhow::anyhow!("invalid_plugin_record_scope"))?; + Ok(PluginRecord { + id: r.get("id"), + plugin: r.get("plugin"), + scope, + scope_id: r.get("scope_id"), + kind: r.get("kind"), + data: r.get("data"), + created_at: r.get("created_at"), + updated_at: r.get("updated_at"), + }) + }) + .transpose() + } + .await; + out.map_err(Into::into) + } + + async fn delete_record(&self, record_id: Uuid) -> PortResult { + let out: anyhow::Result = async { + let res = sqlx::query("DELETE FROM plugin_records WHERE id = $1") + .bind(record_id) + .execute(&self.pool) + .await?; + Ok(res.rows_affected() > 0) + } + .await; + out.map_err(Into::into) + } + + async fn get_record(&self, record_id: Uuid) -> PortResult> { + let out: anyhow::Result> = async { + let row = sqlx::query( + r#"SELECT id, plugin, scope, scope_id, kind, data, created_at, updated_at + FROM plugin_records WHERE id = $1"#, + ) + .bind(record_id) + .fetch_optional(&self.pool) + .await?; + row.map(|r| { + let scope_raw: String = r.get("scope"); + let scope = PluginRecordScope::parse(&scope_raw) + .ok_or_else(|| anyhow::anyhow!("invalid_plugin_record_scope"))?; + Ok(PluginRecord { + id: r.get("id"), + plugin: r.get("plugin"), + scope, + scope_id: r.get("scope_id"), + kind: r.get("kind"), + data: r.get("data"), + created_at: r.get("created_at"), + updated_at: r.get("updated_at"), + }) + }) + .transpose() + } + .await; + out.map_err(Into::into) + } + + async fn list_records( + &self, + plugin: &str, + scope: PluginRecordScope, + scope_id: Uuid, + kind: &str, + limit: i64, + offset: i64, + ) -> PortResult> { + let out: anyhow::Result> = async { + let rows = sqlx::query( + r#"SELECT id, plugin, scope, scope_id, kind, data, created_at, updated_at + FROM plugin_records + WHERE plugin = $1 AND scope = $2 AND scope_id = $3 AND kind = $4 + ORDER BY COALESCE((data->>'pinned')::boolean,false) DESC, created_at DESC + LIMIT $5 OFFSET $6"#, + ) + .bind(plugin) + .bind(scope.as_str()) + .bind(scope_id) + .bind(kind) + .bind(limit) + .bind(offset) + .fetch_all(&self.pool) + .await?; + + let mut out = Vec::with_capacity(rows.len()); + for r in rows { + let scope_raw: String = r.get("scope"); + let parsed_scope = PluginRecordScope::parse(&scope_raw) + .ok_or_else(|| anyhow::anyhow!("invalid_plugin_record_scope"))?; + out.push(PluginRecord { + id: r.get("id"), + plugin: r.get("plugin"), + scope: parsed_scope, + scope_id: r.get("scope_id"), + kind: r.get("kind"), + data: r.get("data"), + created_at: r.get("created_at"), + updated_at: r.get("updated_at"), + }); + } + Ok(out) + } + .await; + out.map_err(Into::into) + } + + async fn delete_scoped_kv(&self, scope: PluginScope, scope_ids: &[Uuid]) -> PortResult<()> { + let out: anyhow::Result<()> = async { + if scope_ids.is_empty() { + return Ok(()); + } + sqlx::query("DELETE FROM plugin_kv WHERE scope = $1 AND scope_id = ANY($2)") + .bind(scope.as_str()) + .bind(scope_ids) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn delete_scoped_records( + &self, + scope: PluginRecordScope, + scope_ids: &[Uuid], + ) -> PortResult<()> { + let out: anyhow::Result<()> = async { + if scope_ids.is_empty() { + return Ok(()); + } + sqlx::query("DELETE FROM plugin_records WHERE scope = $1 AND scope_id = ANY($2)") + .bind(scope.as_str()) + .bind(scope_ids) + .execute(&self.pool) + .await?; + Ok(()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/src/infrastructure/plugins/event_bus_pg.rs b/api/crates/infrastructure/src/plugins/event_bus_pg.rs similarity index 76% rename from api/src/infrastructure/plugins/event_bus_pg.rs rename to api/crates/infrastructure/src/plugins/event_bus_pg.rs index daae92f1..44fde4f6 100644 --- a/api/src/infrastructure/plugins/event_bus_pg.rs +++ b/api/crates/infrastructure/src/plugins/event_bus_pg.rs @@ -8,9 +8,12 @@ use tokio::sync::mpsc; use tokio::time::sleep; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::application::ports::plugin_event_publisher::{PluginEventPublisher, PluginScopedEvent}; -use crate::application::ports::plugin_event_subscriber::PluginEventSubscriber; -use crate::infrastructure::db::PgPool; +use crate::core::db::PgPool; +use application::core::ports::errors::PortResult; +use application::plugins::ports::plugin_event_publisher::{ + PluginEventPublisher, PluginScopedEvent, +}; +use application::plugins::ports::plugin_event_subscriber::PluginEventSubscriber; #[derive(Clone)] pub struct PgPluginEventBus { @@ -99,28 +102,32 @@ struct EventEnvelope { #[async_trait] impl PluginEventPublisher for PgPluginEventBus { - async fn publish(&self, event: &PluginScopedEvent) -> anyhow::Result<()> { - let envelope = EventEnvelope { - user_id: event.user_id, - workspace_id: event.workspace_id, - payload: event.payload.clone(), - }; - let payload = serde_json::to_string(&envelope).context("plugin_event_serialize")?; + async fn publish(&self, event: &PluginScopedEvent) -> PortResult<()> { + let out: anyhow::Result<()> = async { + let envelope = EventEnvelope { + user_id: event.user_id, + workspace_id: event.workspace_id, + payload: event.payload.clone(), + }; + let payload = serde_json::to_string(&envelope).context("plugin_event_serialize")?; - sqlx::query("SELECT pg_notify($1, $2)") - .bind(&self.channel) - .bind(payload) - .execute(&self.pool) - .await - .context("plugin_event_pg_notify")?; + sqlx::query("SELECT pg_notify($1, $2)") + .bind(&self.channel) + .bind(payload) + .execute(&self.pool) + .await + .context("plugin_event_pg_notify")?; - Ok(()) + Ok(()) + } + .await; + out.map_err(Into::into) } } #[async_trait] impl PluginEventSubscriber for PgPluginEventBus { - async fn subscribe(&self) -> anyhow::Result> { - self.subscribe_stream().await + async fn subscribe(&self) -> PortResult> { + self.subscribe_stream().await.map_err(Into::into) } } diff --git a/api/crates/infrastructure/src/plugins/filesystem_store/asset_store_impl.rs b/api/crates/infrastructure/src/plugins/filesystem_store/asset_store_impl.rs new file mode 100644 index 00000000..788c9480 --- /dev/null +++ b/api/crates/infrastructure/src/plugins/filesystem_store/asset_store_impl.rs @@ -0,0 +1,166 @@ +#[async_trait] +impl PluginAssetStore for FilesystemPluginStore { + async fn fetch_asset( + &self, + scope: PluginAssetStoreScope<'_>, + plugin_id: &str, + version: &str, + relative_path: &str, + ) -> application::core::ports::errors::PortResult { + let out: anyhow::Result = async { + Self::ensure_valid_plugin_id(plugin_id)?; + if version.is_empty() + || version.len() > 128 + || version.contains("..") + || version.contains(['/', '\\']) + { + bail!("invalid plugin version"); + } + + let base_root = match scope { + PluginAssetStoreScope::Global => self.global_root(), + PluginAssetStoreScope::User { owner_id } => self.user_root(owner_id), + }; + + let mut sanitized = PathBuf::new(); + for component in Path::new(relative_path).components() { + match component { + Component::Normal(part) => sanitized.push(part), + Component::CurDir => continue, + _ => bail!("invalid asset path"), + } + } + if sanitized.as_os_str().is_empty() { + bail!("invalid asset path"); + } + + let plugin_dir = base_root.join(plugin_id).join(version); + let full_path = plugin_dir.join(&sanitized); + if !full_path.starts_with(&plugin_dir) { + bail!("invalid asset scope"); + } + + let bytes = tokio::fs::read(&full_path).await?; + let content_type = mime_guess::from_path(&full_path) + .first_raw() + .unwrap_or("application/octet-stream") + .to_string(); + Ok(PluginAssetPayload { + bytes, + content_type, + }) + } + .await; + out.map_err(Into::into) + } + + async fn remove_user_plugin_dir( + &self, + user_id: &Uuid, + plugin_id: &str, + ) -> application::core::ports::errors::PortResult<()> { + let out: anyhow::Result<()> = + async { FilesystemPluginStore::remove_user_plugin_dir(self, user_id, plugin_id) } + .await; + out.map_err(Into::into) + } + + async fn list_latest_global_manifests( + &self, + ) -> application::core::ports::errors::PortResult> { + let out: anyhow::Result> = async { + use std::io::ErrorKind; + let mut items = Vec::new(); + let root = self.global_root(); + let mut entries = match tokio::fs::read_dir(&root).await { + Ok(iter) => iter, + Err(err) if err.kind() == ErrorKind::NotFound => return Ok(items), + Err(err) => return Err(err.into()), + }; + + while let Some(entry) = entries.next_entry().await? { + if !entry.file_type().await?.is_dir() { + continue; + } + + let plugin_id = entry.file_name().to_string_lossy().to_string(); + let base = entry.path(); + let best = match self.latest_version_dir(&base) { + Ok(Some(path)) => path, + Ok(None) => continue, + Err(err) => { + tracing::warn!( + error = ?err, + plugin_id = plugin_id.as_str(), + path = ?base, + "resolve_global_plugin_version_failed" + ); + continue; + } + }; + + let version = best + .file_name() + .and_then(|v| v.to_str()) + .unwrap_or("0.0.0") + .to_string(); + let manifest_path = best.join("plugin.json"); + let contents = match tokio::fs::read_to_string(&manifest_path).await { + Ok(contents) => contents, + Err(err) if err.kind() == ErrorKind::NotFound => continue, + Err(err) => { + tracing::warn!( + error = ?err, + plugin_id = plugin_id.as_str(), + version = version.as_str(), + path = ?manifest_path, + "read_global_plugin_manifest_failed" + ); + continue; + } + }; + + match serde_json::from_str::(&contents) { + Ok(json) => items.push(LatestGlobalManifest { + plugin_id: plugin_id.clone(), + version: version.clone(), + manifest: json, + }), + Err(err) => tracing::warn!( + error = ?err, + plugin_id = plugin_id.as_str(), + version = version.as_str(), + path = ?manifest_path, + "parse_global_plugin_manifest_failed" + ), + } + } + + Ok(items) + } + .await; + out.map_err(Into::into) + } + + async fn load_user_manifest( + &self, + user_id: &Uuid, + plugin_id: &str, + version: &str, + ) -> application::core::ports::errors::PortResult> { + let out: anyhow::Result> = async { + use std::io::ErrorKind; + let manifest_path = self.user_plugin_manifest_path(user_id, plugin_id, version); + match tokio::fs::read_to_string(&manifest_path).await { + Ok(contents) => { + let json = serde_json::from_str::(&contents)?; + Ok(Some(json)) + } + Err(err) if err.kind() == ErrorKind::NotFound => Ok(None), + Err(err) => Err(err.into()), + } + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/plugins/filesystem_store/extism.rs b/api/crates/infrastructure/src/plugins/filesystem_store/extism.rs new file mode 100644 index 00000000..473d1dfc --- /dev/null +++ b/api/crates/infrastructure/src/plugins/filesystem_store/extism.rs @@ -0,0 +1,198 @@ +impl FilesystemPluginStore { + async fn resolve_backend_wasm_path(&self, plugin_dir: &Path) -> anyhow::Result { + let manifest_path = plugin_dir.join("plugin.json"); + let manifest_str = tokio::fs::read_to_string(&manifest_path) + .await + .with_context(|| format!("read plugin manifest at {}", manifest_path.display()))?; + let manifest: JsonValue = serde_json::from_str(&manifest_str) + .with_context(|| format!("parse plugin manifest at {}", manifest_path.display()))?; + + let wasm_rel = manifest + .get("backend") + .and_then(|b| b.get("wasm")) + .and_then(|w| w.as_str()) + .unwrap_or("backend/plugin.wasm"); + let sanitized = Self::sanitize_relative_path(wasm_rel)?; + Ok(plugin_dir.join(sanitized)) + } + + fn extract_permissions(manifest: &JsonValue) -> Vec { + manifest + .get("permissions") + .and_then(|value| value.as_array()) + .map(|items| { + items + .iter() + .filter_map(|item| item.as_str().map(|s| s.to_string())) + .collect::>() + }) + .unwrap_or_default() + } + + async fn load_plugin_instance(&self, plugin_dir: &Path) -> anyhow::Result>> { + let wasm_path = self.resolve_backend_wasm_path(plugin_dir).await?; + let metadata = tokio::fs::metadata(&wasm_path) + .await + .with_context(|| format!("read metadata for {}", wasm_path.display()))?; + let modified = metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH); + + { + let cache = self.plugin_cache.read().await; + if let Some(entry) = cache.get(&wasm_path) { + if entry.modified == modified { + return Ok(entry.plugin.clone()); + } + } + } + + let wasm_bytes = tokio::fs::read(&wasm_path) + .await + .with_context(|| format!("read wasm module at {}", wasm_path.display()))?; + let wasm_key = wasm_path.clone(); + let limits = self.limits; + let plugin = task::spawn_blocking(move || -> anyhow::Result { + let mut manifest = Manifest::new([Wasm::data(wasm_bytes)]); + if let Some(timeout) = limits.timeout { + manifest = manifest.with_timeout(timeout); + } + if let Some(memory_max) = limits.memory_max_pages { + manifest = manifest.with_memory_max(memory_max); + } + let builder = PluginBuilder::new(manifest).with_wasi(true); + let builder = if let Some(fuel_limit) = limits.fuel_limit { + builder.with_fuel_limit(fuel_limit) + } else { + builder + }; + builder.build().context("create plugin") + }) + .await + .context("join extism initialization task")??; + + let plugin_arc = Arc::new(Mutex::new(plugin)); + let mut cache = self.plugin_cache.write().await; + cache.insert( + wasm_key, + CachedPlugin { + modified, + plugin: plugin_arc.clone(), + }, + ); + Ok(plugin_arc) + } + + async fn invoke_plugin( + &self, + plugin_dir: &Path, + function: &str, + input: Vec, + ) -> anyhow::Result> { + let plugin = self.load_plugin_instance(plugin_dir).await?; + let function = function.to_string(); + let output = task::spawn_blocking(move || -> anyhow::Result> { + let mut guard = plugin + .lock() + .map_err(|_| anyhow::anyhow!("extism plugin mutex poisoned"))?; + let bytes: &[u8] = guard + .call(&function, &input) + .map_err(|err| anyhow::anyhow!(format!("extism call error: {err}")))?; + Ok(bytes.to_vec()) + }) + .await + .context("join extism call task")??; + Ok(output) + } + + fn sanitize_relative_path(path: &str) -> anyhow::Result { + let trimmed = path.trim(); + let without_root = trimmed.trim_start_matches('/'); + if without_root.is_empty() { + anyhow::bail!("invalid backend wasm path"); + } + if without_root + .split('/') + .any(|segment| segment.is_empty() || segment == "." || segment == "..") + { + anyhow::bail!("invalid backend wasm path segment"); + } + Ok(without_root.to_string()) + } + + fn build_invocation_context( + user_id: Option, + plugin: &str, + invocation: &str, + doc_id: Option, + kind: InvocationKind, + ) -> JsonValue { + let timestamp = Utc::now().to_rfc3339(); + let mut ctx = JsonMap::new(); + ctx.insert("plugin".to_string(), json!({ "id": plugin })); + ctx.insert("invocation".to_string(), json!(invocation)); + ctx.insert("timestamp".to_string(), json!(timestamp)); + ctx.insert( + "invocation_meta".to_string(), + json!({ + "name": invocation, + "kind": kind.as_str(), + "timestamp": timestamp, + }), + ); + if let Some(uid) = user_id { + ctx.insert("user".to_string(), json!({ "id": uid })); + ctx.insert("user_id".to_string(), json!(uid)); + } + if let Some(doc) = doc_id { + ctx.insert("doc".to_string(), json!({ "id": doc })); + ctx.insert("doc_id".to_string(), json!(doc)); + } + ctx.insert("kind".to_string(), json!(kind.as_str())); + JsonValue::Object(ctx) + } + + fn extract_doc_id(value: &JsonValue) -> Option { + match value { + JsonValue::Object(map) => { + let direct_keys = ["docId", "doc_id", "doc", "document"]; + for key in direct_keys { + if let Some(candidate) = map.get(key) { + if let Some(id) = Self::value_to_uuid(candidate) { + return Some(id); + } + } + } + + let nested_keys = ["options", "payload", "context", "meta"]; // fallback search + for key in nested_keys { + if let Some(nested) = map.get(key) { + if let Some(id) = Self::extract_doc_id(nested) { + return Some(id); + } + } + } + None + } + JsonValue::String(s) => Uuid::parse_str(s).ok(), + JsonValue::Array(items) => { + for item in items { + if let Some(id) = Self::extract_doc_id(item) { + return Some(id); + } + } + None + } + _ => None, + } + } + + fn value_to_uuid(value: &JsonValue) -> Option { + match value { + JsonValue::String(s) => Uuid::parse_str(s).ok(), + JsonValue::Object(obj) => obj + .get("id") + .and_then(|id| id.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()), + _ => None, + } + } +} diff --git a/api/crates/infrastructure/src/plugins/filesystem_store/install.rs b/api/crates/infrastructure/src/plugins/filesystem_store/install.rs new file mode 100644 index 00000000..9af27c39 --- /dev/null +++ b/api/crates/infrastructure/src/plugins/filesystem_store/install.rs @@ -0,0 +1,121 @@ +impl FilesystemPluginStore { + fn validate_manifest( + manifest: &serde_json::Value, + ) -> Result<(String, String), PluginInstallError> { + let id = manifest + .get("id") + .and_then(|v| v.as_str()) + .ok_or_else(|| PluginInstallError::InvalidPackage(anyhow::anyhow!("missing id")))? + .to_string(); + let version = manifest + .get("version") + .and_then(|v| v.as_str()) + .ok_or_else(|| PluginInstallError::InvalidPackage(anyhow::anyhow!("missing version")))? + .to_string(); + + if !PLUGIN_ID_RE.is_match(&id) { + return Err(PluginInstallError::InvalidPackage(anyhow::anyhow!( + "invalid plugin id" + ))); + } + if !PLUGIN_VERSION_RE.is_match(&version) { + return Err(PluginInstallError::InvalidPackage(anyhow::anyhow!( + "invalid plugin version" + ))); + } + Ok((id, version)) + } + + fn extract_archive(archive: &[u8], dest_root: &Path) -> Result<(), PluginInstallError> { + let reader = std::io::Cursor::new(archive); + let mut archive = zip::ZipArchive::new(reader) + .map_err(|e| PluginInstallError::InvalidPackage(anyhow::anyhow!(e)))?; + + let dest_root = dest_root + .canonicalize() + .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; + + for i in 0..archive.len() { + let mut file = archive + .by_index(i) + .map_err(|e| PluginInstallError::InvalidPackage(anyhow::anyhow!(e)))?; + let Some(rel_path) = file.enclosed_name().map(|p| p.to_path_buf()) else { + continue; + }; + + if let Some(mode) = file.unix_mode() { + if (mode & 0o170000) == 0o120000 { + continue; + } + } + + let outpath = dest_root.join(&rel_path); + if !outpath.starts_with(&dest_root) { + continue; + } + + if file.is_dir() { + std::fs::create_dir_all(&outpath) + .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; + } else { + if let Some(parent) = outpath.parent() { + std::fs::create_dir_all(parent) + .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; + } + let mut outfile = std::fs::File::create(&outpath) + .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; + std::io::copy(&mut file, &mut outfile) + .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; + } + } + + Ok(()) + } + + fn read_manifest_from_archive( + archive_vec: &[u8], + ) -> Result<(serde_json::Value, InstalledPlugin), PluginInstallError> { + let reader = std::io::Cursor::new(archive_vec); + let mut zip = zip::ZipArchive::new(reader) + .map_err(|e| PluginInstallError::InvalidPackage(anyhow::anyhow!(e)))?; + + let mut manifest_json: Option = None; + for i in 0..zip.len() { + let mut file = zip + .by_index(i) + .map_err(|e| PluginInstallError::InvalidPackage(anyhow::anyhow!(e)))?; + if file.name().ends_with("plugin.json") { + let mut contents = String::new(); + file.read_to_string(&mut contents) + .map_err(|e| PluginInstallError::InvalidPackage(anyhow::anyhow!(e)))?; + manifest_json = serde_json::from_str(&contents).ok(); + break; + } + } + + let manifest = manifest_json.ok_or_else(|| { + PluginInstallError::InvalidPackage(anyhow::anyhow!("plugin.json not found")) + })?; + let (id, version) = Self::validate_manifest(&manifest)?; + Ok((manifest, InstalledPlugin { id, version })) + } + + pub fn load_manifest(&self, manifest_path: &Path) -> Option { + std::fs::read_to_string(manifest_path) + .ok() + .and_then(|s| serde_json::from_str(&s).ok()) + } + + pub fn remove_user_plugin_dir(&self, user_id: &Uuid, plugin_id: &str) -> anyhow::Result<()> { + Self::ensure_valid_plugin_id(plugin_id)?; + let root = self.user_root(user_id); + let path = root.join(plugin_id); + if !path.starts_with(&root) { + bail!("invalid plugin path"); + } + if path.exists() { + std::fs::remove_dir_all(&path)?; + } + Ok(()) + } +} diff --git a/api/crates/infrastructure/src/plugins/filesystem_store/installer_impl.rs b/api/crates/infrastructure/src/plugins/filesystem_store/installer_impl.rs new file mode 100644 index 00000000..a7af6c60 --- /dev/null +++ b/api/crates/infrastructure/src/plugins/filesystem_store/installer_impl.rs @@ -0,0 +1,44 @@ +#[async_trait] +impl PluginInstaller for FilesystemPluginStore { + async fn install_for_user( + &self, + user_id: Uuid, + archive: &[u8], + ) -> Result { + let archive_vec = archive.to_vec(); + let (_manifest, installed) = Self::read_manifest_from_archive(&archive_vec)?; + + let dest_root = self + .user_root(&user_id) + .join(&installed.id) + .join(&installed.version); + + match tokio::fs::metadata(&dest_root).await { + Ok(_) => { + tokio::fs::remove_dir_all(&dest_root) + .await + .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; + } + Err(err) if err.kind() == std::io::ErrorKind::NotFound => {} + Err(err) => return Err(PluginInstallError::Storage(anyhow::anyhow!(err))), + } + if let Some(parent) = dest_root.parent() { + tokio::fs::create_dir_all(parent) + .await + .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; + } + tokio::fs::create_dir_all(&dest_root) + .await + .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; + + let dest_for_extract = dest_root.clone(); + let archive_for_extract = archive_vec; + tokio::task::spawn_blocking(move || { + FilesystemPluginStore::extract_archive(&archive_for_extract, &dest_for_extract) + }) + .await + .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))??; + + Ok(installed) + } +} diff --git a/api/crates/infrastructure/src/plugins/filesystem_store/mod.rs b/api/crates/infrastructure/src/plugins/filesystem_store/mod.rs new file mode 100644 index 00000000..6b94312f --- /dev/null +++ b/api/crates/infrastructure/src/plugins/filesystem_store/mod.rs @@ -0,0 +1,8 @@ +include!("types.rs"); +include!("paths.rs"); +include!("extism.rs"); +include!("install.rs"); +include!("installer_impl.rs"); +include!("asset_store_impl.rs"); +include!("runtime_impl.rs"); +include!("tests.rs"); diff --git a/api/crates/infrastructure/src/plugins/filesystem_store/paths.rs b/api/crates/infrastructure/src/plugins/filesystem_store/paths.rs new file mode 100644 index 00000000..6096651b --- /dev/null +++ b/api/crates/infrastructure/src/plugins/filesystem_store/paths.rs @@ -0,0 +1,124 @@ +impl FilesystemPluginStore { + pub(crate) fn is_valid_plugin_id(plugin_id: &str) -> bool { + !plugin_id.is_empty() && PLUGIN_ID_RE.is_match(plugin_id) + } + + pub(crate) fn ensure_valid_plugin_id(plugin_id: &str) -> anyhow::Result<()> { + if Self::is_valid_plugin_id(plugin_id) { + Ok(()) + } else { + bail!("invalid plugin id"); + } + } + + pub fn new(configured_dir: &str, limits: PluginExecutionLimits) -> anyhow::Result { + let root = Self::resolve_root(configured_dir)?; + Ok(Self { + root, + plugin_cache: Arc::new(RwLock::new(HashMap::new())), + limits, + }) + } + + pub fn root(&self) -> &Path { + &self.root + } + + pub fn global_root(&self) -> PathBuf { + self.root.join("global") + } + + pub fn user_root(&self, user_id: &Uuid) -> PathBuf { + self.root.join(user_id.to_string()) + } + + pub fn user_plugin_manifest_path( + &self, + user_id: &Uuid, + plugin_id: &str, + version: &str, + ) -> PathBuf { + self.user_root(user_id) + .join(plugin_id) + .join(version) + .join("plugin.json") + } + + pub fn global_plugin_manifest_path(&self, plugin_id: &str, version: &str) -> PathBuf { + self.global_root() + .join(plugin_id) + .join(version) + .join("plugin.json") + } + + fn resolve_root(configured_dir: &str) -> anyhow::Result { + let configured = configured_dir.trim(); + if !configured.is_empty() { + let path = PathBuf::from(configured); + if !path.exists() { + std::fs::create_dir_all(&path)?; + } + return path.canonicalize().or_else(|_| Ok(path)); + } + let candidates = [PathBuf::from("./plugins"), PathBuf::from("../plugins")]; + for candidate in &candidates { + if candidate.exists() { + return candidate.canonicalize().or_else(|_| Ok(candidate.clone())); + } + } + let fallback = PathBuf::from("./plugins"); + std::fs::create_dir_all(&fallback)?; + match fallback.canonicalize() { + Ok(p) => Ok(p), + Err(_) => Ok(fallback), + } + } + + pub fn latest_version_dir(&self, base: &Path) -> anyhow::Result> { + if !base.exists() { + return Ok(None); + } + let mut best: Option<(PathBuf, String, Option)> = None; + for entry in std::fs::read_dir(base)? { + let entry = entry?; + if !entry.file_type()?.is_dir() { + continue; + } + let candidate_name = entry.file_name().to_string_lossy().into_owned(); + let candidate_semver = Version::parse(&candidate_name).ok(); + match &best { + Some((_path, current_name, current_semver)) => { + let is_newer = match (&candidate_semver, current_semver) { + (Some(candidate), Some(current)) => candidate > current, + (Some(_), None) => true, + (None, Some(_)) => false, + (None, None) => candidate_name.as_str() > current_name.as_str(), + }; + if is_newer { + best = Some((entry.path(), candidate_name, candidate_semver)); + } + } + None => best = Some((entry.path(), candidate_name, candidate_semver)), + } + } + Ok(best.map(|(path, _, _)| path)) + } + + fn locate_plugin_dir( + &self, + user_id: Option, + plugin: &str, + ) -> anyhow::Result> { + if !Self::is_valid_plugin_id(plugin) { + return Ok(None); + } + if let Some(uid) = user_id { + let base = self.user_root(&uid).join(plugin); + if let Some(dir) = self.latest_version_dir(&base)? { + return Ok(Some(dir)); + } + } + let base = self.global_root().join(plugin); + self.latest_version_dir(&base) + } +} diff --git a/api/crates/infrastructure/src/plugins/filesystem_store/runtime_impl.rs b/api/crates/infrastructure/src/plugins/filesystem_store/runtime_impl.rs new file mode 100644 index 00000000..287b8f68 --- /dev/null +++ b/api/crates/infrastructure/src/plugins/filesystem_store/runtime_impl.rs @@ -0,0 +1,116 @@ +#[async_trait] +impl PluginRuntime for FilesystemPluginStore { + async fn execute( + &self, + user_id: Option, + plugin: &str, + action: &str, + payload: &serde_json::Value, + ) -> application::core::ports::errors::PortResult> { + let out: anyhow::Result> = async { + let plugin_dir = self.locate_plugin_dir(user_id, plugin)?; + + let Some(plugin_dir) = plugin_dir else { + return Ok(None); + }; + + let doc_hint = Self::extract_doc_id(payload); + let ctx = Self::build_invocation_context( + user_id, + plugin, + action, + doc_hint, + InvocationKind::Exec, + ); + let input = json!({ + "action": action, + "payload": payload, + "ctx": ctx + }); + + let out = self + .invoke_plugin(&plugin_dir, "exec", serde_json::to_vec(&input)?) + .await?; + + if out.is_empty() { + return Ok(None); + } + + let res: ExecResult = serde_json::from_slice(&out)?; + Ok(Some(res)) + } + .await; + out.map_err(Into::into) + } + + async fn render_placeholder( + &self, + user_id: Option, + plugin: &str, + function: &str, + request: &serde_json::Value, + ) -> application::core::ports::errors::PortResult> { + let out: anyhow::Result> = async { + let plugin_dir = self.locate_plugin_dir(user_id, plugin)?; + let Some(plugin_dir) = plugin_dir else { + return Ok(None); + }; + + let doc_hint = Self::extract_doc_id(request); + + let ctx = Self::build_invocation_context( + user_id, + plugin, + function, + doc_hint, + InvocationKind::Render, + ); + + let envelope = match request.clone() { + JsonValue::Object(mut map) => { + map.insert("context".to_string(), ctx); + JsonValue::Object(map) + } + other => json!({ + "payload": other, + "context": ctx + }), + }; + + let out = self + .invoke_plugin(&plugin_dir, function, serde_json::to_vec(&envelope)?) + .await?; + if out.is_empty() { + return Ok(None); + } + let value = serde_json::from_slice(&out)?; + Ok(Some(value)) + } + .await; + out.map_err(Into::into) + } + + async fn permissions( + &self, + user_id: Option, + plugin: &str, + ) -> application::core::ports::errors::PortResult>> { + let out: anyhow::Result>> = async { + let plugin_dir = self.locate_plugin_dir(user_id, plugin)?; + let Some(plugin_dir) = plugin_dir else { + return Ok(None); + }; + + let manifest_path = plugin_dir.join("plugin.json"); + let manifest_str = tokio::fs::read_to_string(&manifest_path) + .await + .with_context(|| format!("read plugin manifest at {}", manifest_path.display()))?; + let manifest: JsonValue = serde_json::from_str(&manifest_str) + .with_context(|| format!("parse plugin manifest at {}", manifest_path.display()))?; + + Ok(Some(Self::extract_permissions(&manifest))) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/plugins/filesystem_store/tests.rs b/api/crates/infrastructure/src/plugins/filesystem_store/tests.rs new file mode 100644 index 00000000..a2508147 --- /dev/null +++ b/api/crates/infrastructure/src/plugins/filesystem_store/tests.rs @@ -0,0 +1,41 @@ +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn prefers_semver_when_available() { + let temp = TempDir::new().unwrap(); + let root = temp.path().join("plugins_test"); + std::fs::create_dir_all(root.as_path()).unwrap(); + + let store = + FilesystemPluginStore::new(root.to_str().unwrap(), PluginExecutionLimits::default()) + .unwrap(); + + let base = store.root().join("marp"); + std::fs::create_dir_all(base.join("1.9.0")).unwrap(); + std::fs::create_dir_all(base.join("1.10.0")).unwrap(); + + let latest = store.latest_version_dir(&base).unwrap().unwrap(); + assert_eq!(latest.file_name().unwrap(), "1.10.0"); + } + + #[test] + fn falls_back_to_lexical_for_non_semver() { + let temp = TempDir::new().unwrap(); + let root = temp.path().join("plugins_test_non_semver"); + std::fs::create_dir_all(root.as_path()).unwrap(); + + let store = + FilesystemPluginStore::new(root.to_str().unwrap(), PluginExecutionLimits::default()) + .unwrap(); + + let base = store.root().join("example"); + std::fs::create_dir_all(base.join("beta")).unwrap(); + std::fs::create_dir_all(base.join("alpha")).unwrap(); + + let latest = store.latest_version_dir(&base).unwrap().unwrap(); + assert_eq!(latest.file_name().unwrap(), "beta"); + } +} diff --git a/api/crates/infrastructure/src/plugins/filesystem_store/types.rs b/api/crates/infrastructure/src/plugins/filesystem_store/types.rs new file mode 100644 index 00000000..9ca04229 --- /dev/null +++ b/api/crates/infrastructure/src/plugins/filesystem_store/types.rs @@ -0,0 +1,88 @@ +use std::collections::HashMap; +use std::io::Read; +use std::path::{Component, Path, PathBuf}; +use std::sync::{Arc, Mutex}; +use std::time::{Duration, SystemTime}; + +use anyhow::{Context, bail}; +use async_trait::async_trait; +use chrono::Utc; +use extism::{Manifest, Plugin, PluginBuilder, Wasm}; +use once_cell::sync::Lazy; +use regex::Regex; +use semver::Version; +use serde_json::{Map as JsonMap, Value as JsonValue, json}; +use tokio::{sync::RwLock, task}; +use uuid::Uuid; + +use application::plugins::dtos::ExecResult; +use application::plugins::ports::plugin_asset_store::{ + LatestGlobalManifest, PluginAssetPayload, PluginAssetStore, PluginAssetStoreScope, +}; +use application::plugins::ports::plugin_installer::{ + InstalledPlugin, PluginInstallError, PluginInstaller, +}; +use application::plugins::ports::plugin_runtime::PluginRuntime; + +static PLUGIN_ID_RE: Lazy = + Lazy::new(|| Regex::new(r"^[A-Za-z0-9_-]+$").expect("valid regex")); +static PLUGIN_VERSION_RE: Lazy = + Lazy::new(|| Regex::new(r"^[A-Za-z0-9._-]+$").expect("valid regex")); + +pub struct FilesystemPluginStore { + root: PathBuf, + plugin_cache: Arc>>, + limits: PluginExecutionLimits, +} + +struct CachedPlugin { + modified: SystemTime, + plugin: Arc>, +} + +#[derive(Clone, Copy)] +pub struct PluginExecutionLimits { + pub timeout: Option, + pub memory_max_pages: Option, + pub fuel_limit: Option, +} + +impl PluginExecutionLimits { + pub const fn new( + timeout: Option, + memory_max_pages: Option, + fuel_limit: Option, + ) -> Self { + Self { + timeout, + memory_max_pages, + fuel_limit, + } + } +} + +impl Default for PluginExecutionLimits { + fn default() -> Self { + Self { + timeout: Some(Duration::from_secs(10)), + memory_max_pages: Some(4096), // ~256 MiB + fuel_limit: Some(50_000_000), + } + } +} + +#[derive(Clone, Copy)] +enum InvocationKind { + Exec, + Render, +} + +impl InvocationKind { + fn as_str(&self) -> &'static str { + match self { + InvocationKind::Exec => "exec", + InvocationKind::Render => "render", + } + } +} + diff --git a/api/src/infrastructure/plugins/mod.rs b/api/crates/infrastructure/src/plugins/mod.rs similarity index 89% rename from api/src/infrastructure/plugins/mod.rs rename to api/crates/infrastructure/src/plugins/mod.rs index d50b19af..172b7d4d 100644 --- a/api/src/infrastructure/plugins/mod.rs +++ b/api/crates/infrastructure/src/plugins/mod.rs @@ -1,3 +1,4 @@ +pub mod db; pub mod event_bus_pg; pub mod filesystem_store; pub mod package_fetcher_reqwest; diff --git a/api/crates/infrastructure/src/plugins/package_fetcher_reqwest.rs b/api/crates/infrastructure/src/plugins/package_fetcher_reqwest.rs new file mode 100644 index 00000000..56c92101 --- /dev/null +++ b/api/crates/infrastructure/src/plugins/package_fetcher_reqwest.rs @@ -0,0 +1,48 @@ +use async_trait::async_trait; + +use application::core::ports::errors::PortResult; +use application::plugins::ports::plugin_package_fetcher::PluginPackageFetcher; + +pub struct ReqwestPluginPackageFetcher { + client: reqwest::Client, +} + +impl ReqwestPluginPackageFetcher { + pub fn new() -> Self { + Self { + client: reqwest::Client::new(), + } + } +} + +impl Default for ReqwestPluginPackageFetcher { + fn default() -> Self { + Self::new() + } +} + +#[async_trait] +impl PluginPackageFetcher for ReqwestPluginPackageFetcher { + async fn fetch(&self, url: &str, token: Option<&str>) -> PortResult> { + let out: anyhow::Result> = async { + let mut req = self.client.get(url); + if let Some(t) = token { + req = req.bearer_auth(t); + } + let resp = req + .send() + .await + .map_err(|e| anyhow::anyhow!("request failed: {e}"))?; + if !resp.status().is_success() { + anyhow::bail!("upstream returned status {}", resp.status()); + } + let bytes = resp + .bytes() + .await + .map_err(|e| anyhow::anyhow!("failed to read body: {e}"))?; + Ok(bytes.to_vec()) + } + .await; + out.map_err(Into::into) + } +} diff --git a/api/src/infrastructure/plugins/s3_store.rs b/api/crates/infrastructure/src/plugins/s3_store.rs similarity index 77% rename from api/src/infrastructure/plugins/s3_store.rs rename to api/crates/infrastructure/src/plugins/s3_store.rs index cef72e89..b937ab32 100644 --- a/api/src/infrastructure/plugins/s3_store.rs +++ b/api/crates/infrastructure/src/plugins/s3_store.rs @@ -20,19 +20,19 @@ use tokio::time::sleep; use uuid::Uuid; use walkdir::WalkDir; -use crate::application::dto::plugins::ExecResult; -use crate::application::ports::plugin_asset_store::{ - PluginAssetPayload, PluginAssetStore, PluginAssetStoreScope, +use crate::plugins::event_bus_pg::PgPluginEventBus; +use crate::plugins::filesystem_store::{FilesystemPluginStore, PluginExecutionLimits}; +use application::core::ports::errors::PortResult; +use application::plugins::dtos::ExecResult; +use application::plugins::ports::plugin_asset_store::{ + LatestGlobalManifest, PluginAssetPayload, PluginAssetStore, PluginAssetStoreScope, }; -use crate::application::ports::plugin_event_publisher::PluginScopedEvent; -use crate::application::ports::plugin_installer::{ +use application::plugins::ports::plugin_event_publisher::PluginScopedEvent; +use application::plugins::ports::plugin_installer::{ InstalledPlugin, PluginInstallError, PluginInstaller, }; -use crate::application::ports::plugin_runtime::PluginRuntime; -use crate::infrastructure::plugins::event_bus_pg::PgPluginEventBus; -use crate::infrastructure::plugins::filesystem_store::{ - FilesystemPluginStore, PluginExecutionLimits, -}; +use application::plugins::ports::plugin_runtime::PluginRuntime; +use domain::plugins::events::PluginEventKind; const PLUGINS_PREFIX: &str = "plugins"; const GLOBAL_MANIFEST_CACHE_TTL_SECS: u64 = 300; @@ -85,14 +85,12 @@ impl UserPluginKey { } fn is_manifest_affecting_event(event: &PluginScopedEvent) -> bool { - if let Some(kind) = event.payload.get("event").and_then(|value| value.as_str()) { - matches!( - kind, - "installed" | "uninstalled" | "updated" | "publish" | "unpublish" - ) - } else { - false - } + event + .payload + .get("event") + .and_then(|value| value.as_str()) + .and_then(PluginEventKind::parse) + .is_some_and(|kind| kind.affects_manifests()) } fn epoch_secs_now() -> u64 { @@ -460,7 +458,7 @@ impl S3BackedPluginStore { .payload .get("event") .and_then(|value| value.as_str()) - .unwrap_or(""); + .and_then(PluginEventKind::parse); if is_manifest_affecting_event(event) { self.global_cache.invalidate(); @@ -469,7 +467,7 @@ impl S3BackedPluginStore { if let Some(owner_id) = event.workspace_id.or(event.user_id) { if let Some(plugin_id) = event.payload.get("id").and_then(|v| v.as_str()) { let key = UserPluginKey::new(owner_id, plugin_id); - if kind == "uninstalled" { + if kind == Some(PluginEventKind::Uninstalled) { self.schedule_remove_user_plugin(key); } else { self.schedule_refresh_user_plugin(key); @@ -506,42 +504,58 @@ impl PluginAssetStore for S3BackedPluginStore { plugin_id: &str, version: &str, relative_path: &str, - ) -> anyhow::Result { - let owner = match scope { - PluginAssetStoreScope::Global => None, - PluginAssetStoreScope::User { owner_id } => Some(*owner_id), - }; - self.ensure_local(owner, plugin_id).await?; - self.local - .fetch_asset(scope, plugin_id, version, relative_path) - .await - } - - async fn remove_user_plugin_dir(&self, user_id: &Uuid, plugin_id: &str) -> anyhow::Result<()> { - self.local.remove_user_plugin_dir(user_id, plugin_id)?; - let prefix = format!("{}/{}", user_id, plugin_id); - let client = self.client.clone(); - let bucket = self.bucket.clone(); - tokio::spawn(async move { - let _ = delete_prefix(&client, &bucket, &prefix).await; - }); - self.global_cache.invalidate(); - Ok(()) - } - - async fn list_latest_global_manifests( - &self, - ) -> anyhow::Result> { - let now = epoch_secs_now(); - if self.global_cache.needs_refresh(now) { - let _guard = self.global_cache.refresh_lock.lock().await; - let refreshed_now = epoch_secs_now(); - if self.global_cache.needs_refresh(refreshed_now) { - download_prefix(&self.client, &self.bucket, "global", self.local.root()).await?; - self.global_cache.mark_refreshed(refreshed_now); + ) -> PortResult { + let out: anyhow::Result = async { + let owner = match scope { + PluginAssetStoreScope::Global => None, + PluginAssetStoreScope::User { owner_id } => Some(*owner_id), + }; + self.ensure_local(owner, plugin_id).await?; + let payload = self + .local + .fetch_asset(scope, plugin_id, version, relative_path) + .await?; + Ok(payload) + } + .await; + out.map_err(Into::into) + } + + async fn remove_user_plugin_dir(&self, user_id: &Uuid, plugin_id: &str) -> PortResult<()> { + let out: anyhow::Result<()> = async { + self.local.remove_user_plugin_dir(user_id, plugin_id)?; + let prefix = format!("{}/{}", user_id, plugin_id); + let client = self.client.clone(); + let bucket = self.bucket.clone(); + tokio::spawn(async move { + let _ = delete_prefix(&client, &bucket, &prefix).await; + }); + self.global_cache.invalidate(); + Ok(()) + } + .await; + out.map_err(Into::into) + } + + async fn list_latest_global_manifests(&self) -> PortResult> { + let out: anyhow::Result> = async { + let now = epoch_secs_now(); + if self.global_cache.needs_refresh(now) { + let _guard = self.global_cache.refresh_lock.lock().await; + let refreshed_now = epoch_secs_now(); + if self.global_cache.needs_refresh(refreshed_now) { + download_prefix(&self.client, &self.bucket, "global", self.local.root()) + .await?; + self.global_cache.mark_refreshed(refreshed_now); + } } + self.local + .list_latest_global_manifests() + .await + .map_err(Into::into) } - self.local.list_latest_global_manifests().await + .await; + out.map_err(Into::into) } async fn load_user_manifest( @@ -549,14 +563,19 @@ impl PluginAssetStore for S3BackedPluginStore { user_id: &Uuid, plugin_id: &str, version: &str, - ) -> anyhow::Result> { - if !FilesystemPluginStore::is_valid_plugin_id(plugin_id) { - return Ok(None); + ) -> PortResult> { + let out: anyhow::Result> = async { + if !FilesystemPluginStore::is_valid_plugin_id(plugin_id) { + return Ok(None); + } + self.ensure_local(Some(*user_id), plugin_id).await?; + self.local + .load_user_manifest(user_id, plugin_id, version) + .await + .map_err(Into::into) } - self.ensure_local(Some(*user_id), plugin_id).await?; - self.local - .load_user_manifest(user_id, plugin_id, version) - .await + .await; + out.map_err(Into::into) } } @@ -590,12 +609,19 @@ impl PluginRuntime for S3BackedPluginStore { plugin: &str, action: &str, payload: &serde_json::Value, - ) -> anyhow::Result> { - if !FilesystemPluginStore::is_valid_plugin_id(plugin) { - return Ok(None); + ) -> PortResult> { + let out: anyhow::Result> = async { + if !FilesystemPluginStore::is_valid_plugin_id(plugin) { + return Ok(None); + } + self.ensure_local(user_id, plugin).await?; + self.local + .execute(user_id, plugin, action, payload) + .await + .map_err(Into::into) } - self.ensure_local(user_id, plugin).await?; - self.local.execute(user_id, plugin, action, payload).await + .await; + out.map_err(Into::into) } async fn render_placeholder( @@ -604,25 +630,37 @@ impl PluginRuntime for S3BackedPluginStore { plugin: &str, function: &str, request: &serde_json::Value, - ) -> anyhow::Result> { - if !FilesystemPluginStore::is_valid_plugin_id(plugin) { - return Ok(None); + ) -> PortResult> { + let out: anyhow::Result> = async { + if !FilesystemPluginStore::is_valid_plugin_id(plugin) { + return Ok(None); + } + self.ensure_local(user_id, plugin).await?; + self.local + .render_placeholder(user_id, plugin, function, request) + .await + .map_err(Into::into) } - self.ensure_local(user_id, plugin).await?; - self.local - .render_placeholder(user_id, plugin, function, request) - .await + .await; + out.map_err(Into::into) } async fn permissions( &self, user_id: Option, plugin: &str, - ) -> anyhow::Result>> { - if !FilesystemPluginStore::is_valid_plugin_id(plugin) { - return Ok(None); + ) -> PortResult>> { + let out: anyhow::Result>> = async { + if !FilesystemPluginStore::is_valid_plugin_id(plugin) { + return Ok(None); + } + self.ensure_local(user_id, plugin).await?; + self.local + .permissions(user_id, plugin) + .await + .map_err(Into::into) } - self.ensure_local(user_id, plugin).await?; - self.local.permissions(user_id, plugin).await + .await; + out.map_err(Into::into) } } diff --git a/api/crates/infrastructure/src/workspaces/db/mod.rs b/api/crates/infrastructure/src/workspaces/db/mod.rs new file mode 100644 index 00000000..21b552a0 --- /dev/null +++ b/api/crates/infrastructure/src/workspaces/db/mod.rs @@ -0,0 +1 @@ +pub mod repositories; diff --git a/api/crates/infrastructure/src/workspaces/db/repositories/mod.rs b/api/crates/infrastructure/src/workspaces/db/repositories/mod.rs new file mode 100644 index 00000000..501757ec --- /dev/null +++ b/api/crates/infrastructure/src/workspaces/db/repositories/mod.rs @@ -0,0 +1 @@ +pub mod workspace_repository_sqlx; diff --git a/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/helpers.rs b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/helpers.rs new file mode 100644 index 00000000..642cfbe0 --- /dev/null +++ b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/helpers.rs @@ -0,0 +1,156 @@ +use std::collections::HashMap; + +use anyhow::Context; +use chrono::{DateTime, Utc}; +use sqlx::{PgConnection, Row, postgres::PgRow}; +use uuid::Uuid; + +use application::workspaces::ports::workspace_repository::{ + WorkspaceInvitationRecord, WorkspaceRoleRecord, +}; +use domain::access::permissions::PermissionOverride; +use domain::workspaces::roles::{WorkspaceBaseRole, WorkspaceRoleKind, WorkspaceSystemRole}; + +use super::SqlxWorkspaceRepository; + +impl SqlxWorkspaceRepository { + pub(super) fn collect_roles( + &self, + rows: Vec, + ) -> anyhow::Result> { + let mut map: HashMap = HashMap::new(); + for row in rows { + let role_id: Uuid = row.get("id"); + let base_role_raw: String = row.get("base_role"); + let entry = map.entry(role_id).or_insert_with(|| WorkspaceRoleRecord { + id: role_id, + workspace_id: row.get("workspace_id"), + name: row.get("name"), + description: row.try_get("description").ok(), + base_role: WorkspaceBaseRole::Viewer, + priority: row.get("priority"), + overrides: Vec::new(), + }); + entry.base_role = Self::parse_base_role(&base_role_raw).with_context(|| { + format!("invalid workspace_roles.base_role for role_id={role_id}") + })?; + if let (Some(permission), Some(allowed)) = ( + row.try_get::, _>("permission") + .ok() + .flatten(), + row.try_get::, _>("allowed").ok().flatten(), + ) { + entry + .overrides + .push(PermissionOverride::new(permission, allowed)); + } + } + Ok(map + .into_values() + .map(|mut record| { + record + .overrides + .sort_by(|a, b| a.permission.cmp(&b.permission)); + record + }) + .collect()) + } + + pub(super) async fn replace_role_permissions_tx( + &self, + tx: &mut PgConnection, + role_id: Uuid, + overrides: &[PermissionOverride], + ) -> anyhow::Result<()> { + sqlx::query("DELETE FROM workspace_role_permissions WHERE workspace_role_id = $1") + .bind(role_id) + .execute(&mut *tx) + .await?; + for item in overrides { + sqlx::query( + r#"INSERT INTO workspace_role_permissions (workspace_role_id, permission, allowed) + VALUES ($1, $2, $3)"#, + ) + .bind(role_id) + .bind(item.permission.as_str()) + .bind(item.allowed) + .execute(&mut *tx) + .await?; + } + Ok(()) + } + + pub(super) async fn fetch_role_overrides( + &self, + role_id: Uuid, + ) -> anyhow::Result> { + let rows = sqlx::query( + r#"SELECT permission, allowed + FROM workspace_role_permissions + WHERE workspace_role_id = $1"#, + ) + .bind(role_id) + .fetch_all(&self.pool) + .await?; + Ok(rows + .into_iter() + .filter_map(|row| { + row.try_get::, _>("permission") + .ok() + .flatten() + .map(|perm| PermissionOverride::new(perm, row.get("allowed"))) + }) + .collect()) + } + + pub(super) fn map_invitation_row( + &self, + row: &PgRow, + ) -> anyhow::Result { + let role_kind_raw: String = row.get("role_kind"); + let system_role_raw: Option = row.try_get("system_role").ok(); + Ok(WorkspaceInvitationRecord { + id: row.get("id"), + workspace_id: row.get("workspace_id"), + email: row.get("email"), + role_kind: Self::parse_role_kind(&role_kind_raw)?, + system_role: Self::parse_system_role(system_role_raw.as_deref())?, + custom_role_id: row.try_get("custom_role_id").ok().flatten(), + invited_by: row.get("invited_by"), + token: row.get("token"), + expires_at: row + .try_get::>, _>("expires_at") + .ok() + .flatten(), + accepted_by: row.try_get("accepted_by").ok().flatten(), + accepted_at: row + .try_get::>, _>("accepted_at") + .ok() + .flatten(), + revoked_at: row + .try_get::>, _>("revoked_at") + .ok() + .flatten(), + created_at: row.get("created_at"), + }) + } + + pub(super) fn parse_role_kind(raw: &str) -> anyhow::Result { + WorkspaceRoleKind::parse(raw).ok_or_else(|| anyhow::anyhow!("invalid_role_kind")) + } + + pub(super) fn parse_system_role( + raw: Option<&str>, + ) -> anyhow::Result> { + let Some(raw) = raw else { + return Ok(None); + }; + WorkspaceSystemRole::parse(raw) + .ok_or_else(|| anyhow::anyhow!("invalid_system_role")) + .map(Some) + } + + pub(super) fn parse_base_role(raw: &str) -> anyhow::Result { + WorkspaceBaseRole::parse(raw).ok_or_else(|| anyhow::anyhow!("invalid_base_role")) + } +} diff --git a/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/mod.rs b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/mod.rs new file mode 100644 index 00000000..e58fb7e8 --- /dev/null +++ b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/mod.rs @@ -0,0 +1,14 @@ +use crate::core::db::PgPool; + +mod helpers; +mod repository; + +pub struct SqlxWorkspaceRepository { + pub pool: PgPool, +} + +impl SqlxWorkspaceRepository { + pub fn new(pool: PgPool) -> Self { + Self { pool } + } +} diff --git a/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository.rs b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository.rs new file mode 100644 index 00000000..7c6a7e94 --- /dev/null +++ b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository.rs @@ -0,0 +1,300 @@ +use anyhow::bail; +use async_trait::async_trait; +use chrono::{DateTime, Utc}; +use sqlx::Row; +use uuid::Uuid; + +use application::core::ports::errors::PortResult; +use application::workspaces::ports::workspace_repository::{ + WorkspaceInvitationRecord, WorkspaceListItem, WorkspaceMemberDetail, WorkspaceMemberRow, + WorkspacePermissionRecord, WorkspaceRepository, WorkspaceRoleRecord, WorkspaceRow, + WorkspaceSetDefaultError, +}; +use domain::access::permissions::PermissionOverride; +use domain::workspaces::roles::{WorkspaceBaseRole, WorkspaceRoleKind, WorkspaceSystemRole}; + +use super::SqlxWorkspaceRepository; + +mod invitations; +mod members; +mod roles; +mod workspaces; + +#[async_trait] +impl WorkspaceRepository for SqlxWorkspaceRepository { + async fn list_for_user(&self, user_id: Uuid) -> PortResult> { + self.list_for_user_impl(user_id).await.map_err(Into::into) + } + + async fn create_workspace( + &self, + creator_id: Uuid, + name: &str, + slug: &str, + icon: Option<&str>, + description: Option<&str>, + is_personal: bool, + ) -> PortResult { + self.create_workspace_impl(creator_id, name, slug, icon, description, is_personal) + .await + .map_err(Into::into) + } + + async fn get_workspace(&self, workspace_id: Uuid) -> PortResult> { + self.get_workspace_impl(workspace_id) + .await + .map_err(Into::into) + } + + async fn create_workspace_with_id( + &self, + workspace_id: Uuid, + created_by: Option, + name: &str, + slug: &str, + icon: Option<&str>, + description: Option<&str>, + is_personal: bool, + ) -> PortResult { + self.create_workspace_with_id_impl( + workspace_id, + created_by, + name, + slug, + icon, + description, + is_personal, + ) + .await + .map_err(Into::into) + } + + async fn add_member( + &self, + workspace_id: Uuid, + user_id: Uuid, + role_kind: WorkspaceRoleKind, + system_role: Option, + custom_role_id: Option, + ) -> PortResult { + self.add_member_impl( + workspace_id, + user_id, + role_kind, + system_role, + custom_role_id, + ) + .await + .map_err(Into::into) + } + + async fn set_default_workspace( + &self, + user_id: Uuid, + workspace_id: Uuid, + ) -> Result { + self.set_default_workspace_impl(user_id, workspace_id).await + } + + async fn list_members(&self, workspace_id: Uuid) -> PortResult> { + self.list_members_impl(workspace_id) + .await + .map_err(Into::into) + } + + async fn get_member_detail( + &self, + workspace_id: Uuid, + user_id: Uuid, + ) -> PortResult> { + self.get_member_detail_impl(workspace_id, user_id) + .await + .map_err(Into::into) + } + + async fn update_member_role( + &self, + workspace_id: Uuid, + user_id: Uuid, + role_kind: WorkspaceRoleKind, + system_role: Option, + custom_role_id: Option, + ) -> PortResult { + self.update_member_role_impl( + workspace_id, + user_id, + role_kind, + system_role, + custom_role_id, + ) + .await + .map_err(Into::into) + } + + async fn get_member_with_permissions( + &self, + workspace_id: Uuid, + user_id: Uuid, + ) -> PortResult> { + self.get_member_with_permissions_impl(workspace_id, user_id) + .await + .map_err(Into::into) + } + + async fn count_system_role_members( + &self, + workspace_id: Uuid, + system_role: WorkspaceSystemRole, + ) -> PortResult { + self.count_system_role_members_impl(workspace_id, system_role) + .await + .map_err(Into::into) + } + + async fn list_roles(&self, workspace_id: Uuid) -> PortResult> { + self.list_roles_impl(workspace_id).await.map_err(Into::into) + } + + async fn create_role( + &self, + workspace_id: Uuid, + name: &str, + base_role: WorkspaceBaseRole, + description: Option<&str>, + priority: i32, + overrides: &[PermissionOverride], + ) -> PortResult { + self.create_role_impl( + workspace_id, + name, + base_role, + description, + priority, + overrides, + ) + .await + .map_err(Into::into) + } + + async fn update_role( + &self, + workspace_id: Uuid, + role_id: Uuid, + name: Option<&str>, + base_role: Option, + description: Option<&str>, + priority: Option, + overrides: Option<&[PermissionOverride]>, + ) -> PortResult { + self.update_role_impl( + workspace_id, + role_id, + name, + base_role, + description, + priority, + overrides, + ) + .await + .map_err(Into::into) + } + + async fn delete_role(&self, workspace_id: Uuid, role_id: Uuid) -> PortResult { + self.delete_role_impl(workspace_id, role_id) + .await + .map_err(Into::into) + } + + async fn delete_workspace(&self, workspace_id: Uuid) -> PortResult { + self.delete_workspace_impl(workspace_id) + .await + .map_err(Into::into) + } + + async fn get_role( + &self, + workspace_id: Uuid, + role_id: Uuid, + ) -> PortResult> { + self.get_role_impl(workspace_id, role_id) + .await + .map_err(Into::into) + } + + async fn delete_member(&self, workspace_id: Uuid, user_id: Uuid) -> PortResult { + self.delete_member_impl(workspace_id, user_id) + .await + .map_err(Into::into) + } + + async fn update_workspace( + &self, + workspace_id: Uuid, + name: Option<&str>, + icon: Option<&str>, + description: Option<&str>, + ) -> PortResult> { + self.update_workspace_impl(workspace_id, name, icon, description) + .await + .map_err(Into::into) + } + + async fn create_invitation( + &self, + workspace_id: Uuid, + email: &str, + role_kind: WorkspaceRoleKind, + system_role: Option, + custom_role_id: Option, + invited_by: Uuid, + token: &str, + expires_at: Option>, + ) -> PortResult { + self.create_invitation_impl( + workspace_id, + email, + role_kind, + system_role, + custom_role_id, + invited_by, + token, + expires_at, + ) + .await + .map_err(Into::into) + } + + async fn list_invitations( + &self, + workspace_id: Uuid, + ) -> PortResult> { + self.list_invitations_impl(workspace_id) + .await + .map_err(Into::into) + } + + async fn accept_invitation( + &self, + token: &str, + user_id: Uuid, + email: &str, + ) -> PortResult { + self.accept_invitation_impl(token, user_id, email) + .await + .map_err(Into::into) + } + + async fn revoke_invitation( + &self, + workspace_id: Uuid, + invitation_id: Uuid, + ) -> PortResult> { + self.revoke_invitation_impl(workspace_id, invitation_id) + .await + .map_err(Into::into) + } + + async fn list_all_workspace_ids(&self) -> PortResult> { + self.list_all_workspace_ids_impl().await.map_err(Into::into) + } +} diff --git a/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/invitations.rs b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/invitations.rs new file mode 100644 index 00000000..2d9903f8 --- /dev/null +++ b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/invitations.rs @@ -0,0 +1,162 @@ +use super::*; + +impl SqlxWorkspaceRepository { + pub(super) async fn create_invitation_impl( + &self, + workspace_id: Uuid, + email: &str, + role_kind: WorkspaceRoleKind, + system_role: Option, + custom_role_id: Option, + invited_by: Uuid, + token: &str, + expires_at: Option>, + ) -> anyhow::Result { + let row = sqlx::query( + r#"INSERT INTO workspace_invitations ( + workspace_id, + email, + role_kind, + system_role, + custom_role_id, + invited_by, + token, + expires_at + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + RETURNING id, workspace_id, email, role_kind, system_role, custom_role_id, + invited_by, token, expires_at, accepted_by, accepted_at, revoked_at, + created_at"#, + ) + .bind(workspace_id) + .bind(email) + .bind(role_kind.as_str()) + .bind(system_role.map(|r| r.as_str())) + .bind(custom_role_id) + .bind(invited_by) + .bind(token) + .bind(expires_at) + .fetch_one(&self.pool) + .await?; + self.map_invitation_row(&row) + } + pub(super) async fn list_invitations_impl( + &self, + workspace_id: Uuid, + ) -> anyhow::Result> { + let rows = sqlx::query( + r#"SELECT id, workspace_id, email, role_kind, system_role, custom_role_id, + invited_by, token, expires_at, accepted_by, accepted_at, revoked_at, + created_at + FROM workspace_invitations + WHERE workspace_id = $1 + ORDER BY created_at DESC"#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + rows.into_iter() + .map(|row| self.map_invitation_row(&row)) + .collect::>>() + } + pub(super) async fn accept_invitation_impl( + &self, + token: &str, + user_id: Uuid, + user_email: &str, + ) -> anyhow::Result { + let mut tx = self.pool.begin().await?; + let row = sqlx::query( + r#"SELECT id, workspace_id, email, role_kind, system_role, custom_role_id, + invited_by, token, expires_at, accepted_by, accepted_at, revoked_at, + created_at + FROM workspace_invitations + WHERE token = $1 + FOR UPDATE"#, + ) + .bind(token) + .fetch_optional(tx.as_mut()) + .await?; + + let Some(row) = row else { + bail!("invitation_not_found"); + }; + let mut record = self.map_invitation_row(&row)?; + if record.revoked_at.is_some() { + bail!("invitation_revoked"); + } + if record.accepted_at.is_some() { + bail!("invitation_already_accepted"); + } + if record + .expires_at + .is_some_and(|expires| expires < Utc::now()) + { + bail!("invitation_expired"); + } + if record.email.trim().to_lowercase() != user_email.trim().to_lowercase() { + bail!("invitation_email_mismatch"); + } + + let now = Utc::now(); + sqlx::query( + r#"UPDATE workspace_invitations + SET accepted_by = $2, accepted_at = $3 + WHERE id = $1"#, + ) + .bind(record.id) + .bind(user_id) + .bind(now) + .execute(tx.as_mut()) + .await?; + + sqlx::query( + r#"INSERT INTO workspace_members ( + workspace_id, + user_id, + role_kind, + system_role, + custom_role_id, + invited_by, + is_default + ) + VALUES ($1, $2, $3, $4, $5, $6, false) + ON CONFLICT (workspace_id, user_id) DO UPDATE SET + role_kind = EXCLUDED.role_kind, + system_role = EXCLUDED.system_role, + custom_role_id = EXCLUDED.custom_role_id"#, + ) + .bind(record.workspace_id) + .bind(user_id) + .bind(record.role_kind.as_str()) + .bind(record.system_role.map(|role| role.as_str())) + .bind(record.custom_role_id) + .bind(record.invited_by) + .execute(tx.as_mut()) + .await?; + + tx.commit().await?; + record.accepted_by = Some(user_id); + record.accepted_at = Some(now); + Ok(record) + } + pub(super) async fn revoke_invitation_impl( + &self, + workspace_id: Uuid, + invitation_id: Uuid, + ) -> anyhow::Result> { + let row = sqlx::query( + r#"UPDATE workspace_invitations + SET revoked_at = now() + WHERE id = $1 AND workspace_id = $2 AND revoked_at IS NULL AND accepted_at IS NULL + RETURNING id, workspace_id, email, role_kind, system_role, custom_role_id, + invited_by, token, expires_at, accepted_by, accepted_at, revoked_at, + created_at"#, + ) + .bind(invitation_id) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + row.map(|row| self.map_invitation_row(&row)).transpose() + } +} diff --git a/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/members.rs b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/members.rs new file mode 100644 index 00000000..ff30f4d5 --- /dev/null +++ b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/members.rs @@ -0,0 +1,303 @@ +use super::*; + +impl SqlxWorkspaceRepository { + pub(super) async fn add_member_impl( + &self, + workspace_id: Uuid, + user_id: Uuid, + role_kind: WorkspaceRoleKind, + system_role: Option, + custom_role_id: Option, + ) -> anyhow::Result { + let row = sqlx::query( + r#"INSERT INTO workspace_members (workspace_id, user_id, role_kind, system_role, custom_role_id, invited_by) + VALUES ($1, $2, $3, $4, $5, $2) + ON CONFLICT (workspace_id, user_id) DO UPDATE SET + role_kind = EXCLUDED.role_kind, + system_role = EXCLUDED.system_role, + custom_role_id = EXCLUDED.custom_role_id + RETURNING workspace_id, user_id, role_kind, system_role, custom_role_id, is_default"#, + ) + .bind(workspace_id) + .bind(user_id) + .bind(role_kind.as_str()) + .bind(system_role.map(|r| r.as_str())) + .bind(custom_role_id) + .fetch_one(&self.pool) + .await?; + let role_kind_raw: String = row.get("role_kind"); + let system_role_raw: Option = row.try_get("system_role").ok(); + Ok(WorkspaceMemberRow { + workspace_id: row.get("workspace_id"), + user_id: row.get("user_id"), + role_kind: Self::parse_role_kind(&role_kind_raw)?, + system_role: Self::parse_system_role(system_role_raw.as_deref())?, + custom_role_id: row.try_get("custom_role_id").ok(), + is_default: row.get("is_default"), + }) + } + pub(super) async fn set_default_workspace_impl( + &self, + user_id: Uuid, + workspace_id: Uuid, + ) -> Result { + let mut tx = self + .pool + .begin() + .await + .map_err(|err| WorkspaceSetDefaultError::Unexpected(err.into()))?; + sqlx::query(r#"UPDATE workspace_members SET is_default = false WHERE user_id = $1"#) + .bind(user_id) + .execute(tx.as_mut()) + .await + .map_err(|err| WorkspaceSetDefaultError::Unexpected(err.into()))?; + + let row = sqlx::query( + r#"UPDATE workspace_members + SET is_default = true + WHERE workspace_id = $1 AND user_id = $2 + RETURNING workspace_id, user_id, role_kind, system_role, custom_role_id, is_default"#, + ) + .bind(workspace_id) + .bind(user_id) + .fetch_optional(tx.as_mut()) + .await + .map_err(|err| WorkspaceSetDefaultError::Unexpected(err.into()))?; + + let Some(row) = row else { + tx.rollback().await.ok(); + return Err(WorkspaceSetDefaultError::MembershipNotFound); + }; + + sqlx::query(r#"UPDATE users SET default_workspace_id = $1 WHERE id = $2"#) + .bind(workspace_id) + .bind(user_id) + .execute(tx.as_mut()) + .await + .map_err(|err| WorkspaceSetDefaultError::Unexpected(err.into()))?; + + tx.commit() + .await + .map_err(|err| WorkspaceSetDefaultError::Unexpected(err.into()))?; + let role_kind_raw: String = row.get("role_kind"); + let system_role_raw: Option = row.try_get("system_role").ok(); + Ok(WorkspaceMemberRow { + workspace_id: row.get("workspace_id"), + user_id: row.get("user_id"), + role_kind: Self::parse_role_kind(&role_kind_raw) + .map_err(WorkspaceSetDefaultError::Unexpected)?, + system_role: Self::parse_system_role(system_role_raw.as_deref()) + .map_err(WorkspaceSetDefaultError::Unexpected)?, + custom_role_id: row.try_get("custom_role_id").ok(), + is_default: row.get("is_default"), + }) + } + pub(super) async fn list_members_impl( + &self, + workspace_id: Uuid, + ) -> anyhow::Result> { + let rows = sqlx::query( + r#"SELECT m.workspace_id, + m.user_id, + m.role_kind, + m.system_role, + m.custom_role_id, + m.is_default, + u.email, + u.name + FROM workspace_members m + JOIN users u ON u.id = m.user_id + WHERE m.workspace_id = $1 + ORDER BY u.name"#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + rows.into_iter() + .map(|row| { + let role_kind_raw: String = row.get("role_kind"); + let system_role_raw: Option = row.try_get("system_role").ok(); + Ok(WorkspaceMemberDetail { + workspace_id: row.get("workspace_id"), + user_id: row.get("user_id"), + role_kind: Self::parse_role_kind(&role_kind_raw)?, + system_role: Self::parse_system_role(system_role_raw.as_deref())?, + custom_role_id: row.try_get("custom_role_id").ok(), + is_default: row.get("is_default"), + user_email: row.get("email"), + user_name: row.get("name"), + }) + }) + .collect::>>() + } + pub(super) async fn get_member_detail_impl( + &self, + workspace_id: Uuid, + user_id: Uuid, + ) -> anyhow::Result> { + let row = sqlx::query( + r#"SELECT m.workspace_id, + m.user_id, + m.role_kind, + m.system_role, + m.custom_role_id, + m.is_default, + u.email, + u.name + FROM workspace_members m + JOIN users u ON u.id = m.user_id + WHERE m.workspace_id = $1 AND m.user_id = $2"#, + ) + .bind(workspace_id) + .bind(user_id) + .fetch_optional(&self.pool) + .await?; + match row { + None => Ok(None), + Some(row) => { + let role_kind_raw: String = row.get("role_kind"); + let system_role_raw: Option = row.try_get("system_role").ok(); + Ok(Some(WorkspaceMemberDetail { + workspace_id: row.get("workspace_id"), + user_id: row.get("user_id"), + role_kind: Self::parse_role_kind(&role_kind_raw)?, + system_role: Self::parse_system_role(system_role_raw.as_deref())?, + custom_role_id: row.try_get("custom_role_id").ok(), + is_default: row.get("is_default"), + user_email: row.get("email"), + user_name: row.get("name"), + })) + } + } + } + pub(super) async fn update_member_role_impl( + &self, + workspace_id: Uuid, + user_id: Uuid, + role_kind: WorkspaceRoleKind, + system_role: Option, + custom_role_id: Option, + ) -> anyhow::Result { + let row = sqlx::query( + r#"UPDATE workspace_members + SET role_kind = $3, + system_role = $4, + custom_role_id = $5 + WHERE workspace_id = $1 AND user_id = $2 + RETURNING workspace_id, user_id, role_kind, system_role, custom_role_id, is_default"#, + ) + .bind(workspace_id) + .bind(user_id) + .bind(role_kind.as_str()) + .bind(system_role.map(|r| r.as_str())) + .bind(custom_role_id) + .fetch_optional(&self.pool) + .await?; + let Some(row) = row else { + bail!("membership_not_found"); + }; + let role_kind_raw: String = row.get("role_kind"); + let system_role_raw: Option = row.try_get("system_role").ok(); + Ok(WorkspaceMemberRow { + workspace_id: row.get("workspace_id"), + user_id: row.get("user_id"), + role_kind: Self::parse_role_kind(&role_kind_raw)?, + system_role: Self::parse_system_role(system_role_raw.as_deref())?, + custom_role_id: row.try_get("custom_role_id").ok(), + is_default: row.get("is_default"), + }) + } + pub(super) async fn get_member_with_permissions_impl( + &self, + workspace_id: Uuid, + user_id: Uuid, + ) -> anyhow::Result> { + let rows = sqlx::query( + r#"SELECT m.workspace_id, + m.user_id, + m.role_kind, + m.system_role, + m.custom_role_id, + r.base_role, + p.permission, + p.allowed + FROM workspace_members m + LEFT JOIN workspace_roles r ON r.id = m.custom_role_id + LEFT JOIN workspace_role_permissions p ON p.workspace_role_id = r.id + WHERE m.workspace_id = $1 AND m.user_id = $2"#, + ) + .bind(workspace_id) + .bind(user_id) + .fetch_all(&self.pool) + .await?; + + if rows.is_empty() { + return Ok(None); + } + + let first = &rows[0]; + let role_kind_raw: String = first.get("role_kind"); + let system_role_raw: Option = first.try_get("system_role").ok(); + let custom_base_role_raw: Option = first.try_get("base_role").ok(); + let mut record = WorkspacePermissionRecord { + workspace_id: first.get("workspace_id"), + user_id: first.get("user_id"), + role_kind: Self::parse_role_kind(&role_kind_raw)?, + system_role: Self::parse_system_role(system_role_raw.as_deref())?, + custom_role_id: first.try_get("custom_role_id").ok(), + custom_base_role: match custom_base_role_raw { + None => None, + Some(raw) => Some(Self::parse_base_role(&raw)?), + }, + overrides: Vec::new(), + }; + + for row in rows { + if let (Some(permission), Some(allowed)) = ( + row.try_get::, _>("permission") + .ok() + .flatten(), + row.try_get::, _>("allowed").ok().flatten(), + ) { + record + .overrides + .push(PermissionOverride::new(permission, allowed)); + } + } + + Ok(Some(record)) + } + pub(super) async fn count_system_role_members_impl( + &self, + workspace_id: Uuid, + system_role: WorkspaceSystemRole, + ) -> anyhow::Result { + let count = sqlx::query_scalar( + r#"SELECT COUNT(1)::BIGINT + FROM workspace_members + WHERE workspace_id = $1 + AND role_kind = 'system' + AND system_role = $2"#, + ) + .bind(workspace_id) + .bind(system_role.as_str()) + .fetch_one(&self.pool) + .await?; + Ok(count) + } + pub(super) async fn delete_member_impl( + &self, + workspace_id: Uuid, + user_id: Uuid, + ) -> anyhow::Result { + let result = sqlx::query( + r#"DELETE FROM workspace_members + WHERE workspace_id = $1 AND user_id = $2"#, + ) + .bind(workspace_id) + .bind(user_id) + .execute(&self.pool) + .await?; + Ok(result.rows_affected() > 0) + } +} diff --git a/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/roles.rs b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/roles.rs new file mode 100644 index 00000000..429fabee --- /dev/null +++ b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/roles.rs @@ -0,0 +1,154 @@ +use super::*; + +impl SqlxWorkspaceRepository { + pub(super) async fn list_roles_impl( + &self, + workspace_id: Uuid, + ) -> anyhow::Result> { + let rows = sqlx::query( + r#"SELECT r.id, + r.workspace_id, + r.name, + r.description, + r.base_role, + r.priority, + p.permission, + p.allowed + FROM workspace_roles r + LEFT JOIN workspace_role_permissions p ON p.workspace_role_id = r.id + WHERE r.workspace_id = $1 + ORDER BY r.priority, r.created_at"#, + ) + .bind(workspace_id) + .fetch_all(&self.pool) + .await?; + self.collect_roles(rows) + } + pub(super) async fn create_role_impl( + &self, + workspace_id: Uuid, + name: &str, + base_role: WorkspaceBaseRole, + description: Option<&str>, + priority: i32, + overrides: &[PermissionOverride], + ) -> anyhow::Result { + let mut tx = self.pool.begin().await?; + let row = sqlx::query( + r#"INSERT INTO workspace_roles (workspace_id, name, base_role, description, priority) + VALUES ($1, $2, $3, $4, $5) + RETURNING id, workspace_id, name, description, base_role, priority"#, + ) + .bind(workspace_id) + .bind(name) + .bind(base_role.as_str()) + .bind(description) + .bind(priority) + .fetch_one(tx.as_mut()) + .await?; + let role_id: Uuid = row.get("id"); + self.replace_role_permissions_tx(tx.as_mut(), role_id, overrides) + .await?; + tx.commit().await?; + Ok(WorkspaceRoleRecord { + id: role_id, + workspace_id: row.get("workspace_id"), + name: row.get("name"), + description: row.try_get("description").ok(), + base_role, + priority: row.get("priority"), + overrides: overrides.to_vec(), + }) + } + pub(super) async fn update_role_impl( + &self, + workspace_id: Uuid, + role_id: Uuid, + name: Option<&str>, + base_role: Option, + description: Option<&str>, + priority: Option, + overrides: Option<&[PermissionOverride]>, + ) -> anyhow::Result { + let mut tx = self.pool.begin().await?; + let row = sqlx::query( + r#"UPDATE workspace_roles + SET name = COALESCE($3, name), + base_role = COALESCE($4, base_role), + description = COALESCE($5, description), + priority = COALESCE($6, priority) + WHERE id = $2 AND workspace_id = $1 + RETURNING id, workspace_id, name, description, base_role, priority"#, + ) + .bind(workspace_id) + .bind(role_id) + .bind(name) + .bind(base_role.map(|b| b.as_str())) + .bind(description) + .bind(priority) + .fetch_optional(tx.as_mut()) + .await?; + let Some(row) = row else { + bail!("role_not_found"); + }; + if let Some(overrides) = overrides { + self.replace_role_permissions_tx(tx.as_mut(), role_id, overrides) + .await?; + } + tx.commit().await?; + let overrides_vec = if let Some(overrides) = overrides { + overrides.to_vec() + } else { + self.fetch_role_overrides(role_id).await? + }; + Ok(WorkspaceRoleRecord { + id: row.get("id"), + workspace_id: row.get("workspace_id"), + name: row.get("name"), + description: row.try_get("description").ok(), + base_role: Self::parse_base_role(&row.get::("base_role"))?, + priority: row.get("priority"), + overrides: overrides_vec, + }) + } + pub(super) async fn delete_role_impl( + &self, + workspace_id: Uuid, + role_id: Uuid, + ) -> anyhow::Result { + let result = sqlx::query( + r#"DELETE FROM workspace_roles + WHERE id = $1 AND workspace_id = $2"#, + ) + .bind(role_id) + .bind(workspace_id) + .execute(&self.pool) + .await?; + Ok(result.rows_affected() > 0) + } + pub(super) async fn get_role_impl( + &self, + workspace_id: Uuid, + role_id: Uuid, + ) -> anyhow::Result> { + let rows = sqlx::query( + r#"SELECT r.id, + r.workspace_id, + r.name, + r.description, + r.base_role, + r.priority, + p.permission, + p.allowed + FROM workspace_roles r + LEFT JOIN workspace_role_permissions p ON p.workspace_role_id = r.id + WHERE r.workspace_id = $1 AND r.id = $2"#, + ) + .bind(workspace_id) + .bind(role_id) + .fetch_all(&self.pool) + .await?; + let mut roles = self.collect_roles(rows)?; + Ok(roles.pop()) + } +} diff --git a/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/workspaces.rs b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/workspaces.rs new file mode 100644 index 00000000..f9b407fb --- /dev/null +++ b/api/crates/infrastructure/src/workspaces/db/repositories/workspace_repository_sqlx/repository/workspaces.rs @@ -0,0 +1,175 @@ +use super::*; + +impl SqlxWorkspaceRepository { + pub(super) async fn list_for_user_impl( + &self, + user_id: Uuid, + ) -> anyhow::Result> { + let rows = sqlx::query( + r#"SELECT w.id, + w.name, + w.slug, + w.icon, + w.description, + w.is_personal, + m.role_kind, + m.system_role, + m.custom_role_id, + m.is_default + FROM workspace_members m + JOIN workspaces w ON w.id = m.workspace_id + WHERE m.user_id = $1 + ORDER BY w.created_at"#, + ) + .bind(user_id) + .fetch_all(&self.pool) + .await?; + rows.into_iter() + .map(|r| { + let role_kind_raw: String = r.get("role_kind"); + let system_role_raw: Option = r.try_get("system_role").ok(); + Ok(WorkspaceListItem { + id: r.get("id"), + name: r.get("name"), + slug: r.get("slug"), + icon: r.try_get("icon").ok(), + description: r.try_get("description").ok(), + is_personal: r.get("is_personal"), + role_kind: Self::parse_role_kind(&role_kind_raw)?, + system_role: Self::parse_system_role(system_role_raw.as_deref())?, + custom_role_id: r.try_get("custom_role_id").ok(), + is_default: r.get("is_default"), + }) + }) + .collect::>>() + } + pub(super) async fn create_workspace_impl( + &self, + creator_id: Uuid, + name: &str, + slug: &str, + icon: Option<&str>, + description: Option<&str>, + is_personal: bool, + ) -> anyhow::Result { + let row = sqlx::query( + r#"INSERT INTO workspaces (id, name, slug, icon, description, created_by, is_personal) + VALUES (gen_random_uuid(), $1, $2, $3, $4, $5, $6) + RETURNING id, name, slug, icon, description, is_personal"#, + ) + .bind(name) + .bind(slug) + .bind(icon) + .bind(description) + .bind(creator_id) + .bind(is_personal) + .fetch_one(&self.pool) + .await?; + Ok(WorkspaceRow { + id: row.get("id"), + name: row.get("name"), + slug: row.get("slug"), + icon: row.try_get("icon").ok(), + description: row.try_get("description").ok(), + is_personal: row.get("is_personal"), + }) + } + pub(super) async fn get_workspace_impl( + &self, + workspace_id: Uuid, + ) -> anyhow::Result> { + let row = sqlx::query( + r#"SELECT id, name, slug, icon, description, is_personal + FROM workspaces + WHERE id = $1"#, + ) + .bind(workspace_id) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|row| WorkspaceRow { + id: row.get("id"), + name: row.get("name"), + slug: row.get("slug"), + icon: row.try_get("icon").ok(), + description: row.try_get("description").ok(), + is_personal: row.get("is_personal"), + })) + } + pub(super) async fn create_workspace_with_id_impl( + &self, + workspace_id: Uuid, + created_by: Option, + name: &str, + slug: &str, + icon: Option<&str>, + description: Option<&str>, + is_personal: bool, + ) -> anyhow::Result { + let row = sqlx::query( + r#"INSERT INTO workspaces (id, name, slug, icon, description, created_by, is_personal) + VALUES ($1, $2, $3, $4, $5, $6, $7) + RETURNING id, name, slug, icon, description, is_personal"#, + ) + .bind(workspace_id) + .bind(name) + .bind(slug) + .bind(icon) + .bind(description) + .bind(created_by) + .bind(is_personal) + .fetch_one(&self.pool) + .await?; + Ok(WorkspaceRow { + id: row.get("id"), + name: row.get("name"), + slug: row.get("slug"), + icon: row.try_get("icon").ok(), + description: row.try_get("description").ok(), + is_personal: row.get("is_personal"), + }) + } + pub(super) async fn update_workspace_impl( + &self, + workspace_id: Uuid, + name: Option<&str>, + icon: Option<&str>, + description: Option<&str>, + ) -> anyhow::Result> { + let row = sqlx::query( + r#"UPDATE workspaces + SET name = COALESCE($2, name), + icon = COALESCE($3, icon), + description = COALESCE($4, description), + updated_at = now() + WHERE id = $1 + RETURNING id, name, slug, icon, description, is_personal"#, + ) + .bind(workspace_id) + .bind(name) + .bind(icon) + .bind(description) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|row| WorkspaceRow { + id: row.get("id"), + name: row.get("name"), + slug: row.get("slug"), + icon: row.try_get("icon").ok(), + description: row.try_get("description").ok(), + is_personal: row.get("is_personal"), + })) + } + pub(super) async fn delete_workspace_impl(&self, workspace_id: Uuid) -> anyhow::Result { + let result = sqlx::query("DELETE FROM workspaces WHERE id = $1") + .bind(workspace_id) + .execute(&self.pool) + .await?; + Ok(result.rows_affected() > 0) + } + pub(super) async fn list_all_workspace_ids_impl(&self) -> anyhow::Result> { + let rows = sqlx::query("SELECT id FROM workspaces ORDER BY created_at") + .fetch_all(&self.pool) + .await?; + Ok(rows.into_iter().map(|row| row.get("id")).collect()) + } +} diff --git a/api/crates/infrastructure/src/workspaces/mod.rs b/api/crates/infrastructure/src/workspaces/mod.rs new file mode 100644 index 00000000..dec10232 --- /dev/null +++ b/api/crates/infrastructure/src/workspaces/mod.rs @@ -0,0 +1 @@ +pub mod db; diff --git a/api/crates/presentation/Cargo.toml b/api/crates/presentation/Cargo.toml new file mode 100644 index 00000000..91d59542 --- /dev/null +++ b/api/crates/presentation/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "presentation" +version = "0.1.0" +edition = "2024" + +[dependencies] +application = { path = "../application" } +domain = { path = "../domain" } +contracts = { path = "../contracts", features = ["openapi"] } + +anyhow = "1" +axum = { version = "0.7", features = ["macros", "json", "multipart", "ws"] } +http = "1" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +tokio = { version = "1.46", features = ["rt-multi-thread", "macros"] } +tracing = "0.1" +uuid = { version = "1", features = ["v4", "serde"] } +chrono = { version = "0.4", features = ["serde", "clock"] } +futures-util = { version = "0.3", features = ["sink"] } +rand = "0.8" +utoipa = { version = "4", features = ["axum_extras", "chrono", "uuid"] } +yrs = { version = "0.24", features = ["sync"] } +yrs-warp = "0.9" diff --git a/api/crates/presentation/src/context.rs b/api/crates/presentation/src/context.rs new file mode 100644 index 00000000..e48799bd --- /dev/null +++ b/api/crates/presentation/src/context.rs @@ -0,0 +1,385 @@ +use std::sync::Arc; + +use axum::extract::FromRef; +use futures_util::stream::BoxStream; + +use application::core::ports::storage::storage_ingest_queue::StorageIngestQueue; +use application::core::services::authorization::AuthorizationServiceFacade; +use application::core::services::health::HealthServiceFacade; +use application::core::services::markdown_render::MarkdownRenderServiceFacade; +use application::core::services::metrics::MetricsRegistryFacade; +use application::core::services::storage::ingest_enqueue::StorageIngestEnqueueServiceFacade; +use application::documents::ports::realtime::realtime_port::RealtimeEngine; +pub use application::documents::ports::realtime::realtime_types::{ + DynRealtimeSink, DynRealtimeStream, +}; +use application::documents::services::DocumentServiceFacade; +use application::documents::services::files::FileServiceFacade; +use application::documents::services::publishing::PublicServiceFacade; +use application::documents::services::sharing::ShareServiceFacade; +use application::documents::services::tagging::TagServiceFacade; +use application::git::services::GitServiceFacade; +use application::identity::services::api_tokens::ApiTokenServiceFacade; +use application::identity::services::auth::account::AccountServiceFacade; +use application::identity::services::auth::auth_service::AuthServiceFacade; +use application::identity::services::auth::external::ExternalAuthRegistryFacade; +use application::identity::services::auth::user_sessions::UserSessionServiceFacade; +use application::identity::services::user_shortcuts::UserShortcutServiceFacade; +use application::plugins::ports::plugin_event_publisher::PluginScopedEvent; +use application::plugins::ports::plugin_event_subscriber::PluginEventSubscriber; +use application::plugins::services::data::PluginDataServiceFacade; +use application::plugins::services::execution::PluginExecutionServiceFacade; +use application::plugins::services::management::PluginManagementServiceFacade; +use application::plugins::services::permissions::PluginPermissionServiceFacade; +use application::workspaces::services::WorkspaceServiceFacade; + +mod traits; +pub use traits::{HasAuthServices, HasAuthorizationService, HasShareService, HasWorkspaceService}; + +#[derive(Debug, Clone)] +pub struct PresentationConfig { + pub frontend_url: Option, + pub upload_max_bytes: usize, + pub public_base_url: Option, + pub session_cookie_secure: bool, +} + +#[derive(Clone)] +pub struct AppContext { + pub cfg: PresentationConfig, + services: Arc, + metrics: Arc, +} + +#[derive(Clone)] +pub struct AppServices { + core: CoreServices, + documents: DocumentServices, + git: GitServices, + identity: IdentityServices, + plugins: PluginServices, + workspaces: WorkspaceServices, +} + +#[derive(Clone)] +pub struct AppServicesDeps { + pub core: CoreServicesDeps, + pub documents: DocumentServicesDeps, + pub git: GitServicesDeps, + pub identity: IdentityServicesDeps, + pub plugins: PluginServicesDeps, + pub workspaces: WorkspaceServicesDeps, +} + +#[derive(Clone)] +pub struct CoreServicesDeps { + pub authorization: Arc, + pub markdown_render_service: Arc, + pub storage_ingest_queue: Arc, + pub storage_ingest_enqueuer: Arc, + pub health_service: Arc, +} + +#[derive(Clone)] +pub struct DocumentServicesDeps { + pub document_service: Arc, + pub share_service: Arc, + pub file_service: Arc, + pub public_service: Arc, + pub tag_service: Arc, + pub realtime_engine: Arc, +} + +#[derive(Clone)] +pub struct GitServicesDeps { + pub git_service: Arc, +} + +#[derive(Clone)] +pub struct IdentityServicesDeps { + pub api_token_service: Arc, + pub user_shortcut_service: Arc, + pub account_service: Arc, + pub auth_service: Arc, + pub session_service: Arc, + pub external_auth: Arc, +} + +#[derive(Clone)] +pub struct PluginServicesDeps { + pub plugin_execution_service: Arc, + pub plugin_management_service: Arc, + pub plugin_permission_service: Arc, + pub plugin_data_service: Arc, + pub plugin_event_subscriber: Arc, +} + +#[derive(Clone)] +pub struct WorkspaceServicesDeps { + pub workspace_service: Arc, +} + +#[derive(Clone)] +struct CoreServices { + authorization: Arc, + markdown_render_service: Arc, + storage_ingest_queue: Arc, + storage_ingest_enqueuer: Arc, + health_service: Arc, +} + +#[derive(Clone)] +struct DocumentServices { + document_service: Arc, + share_service: Arc, + file_service: Arc, + public_service: Arc, + tag_service: Arc, + realtime_engine: Arc, +} + +#[derive(Clone)] +struct GitServices { + git_service: Arc, +} + +#[derive(Clone)] +struct IdentityServices { + api_token_service: Arc, + user_shortcut_service: Arc, + account_service: Arc, + auth_service: Arc, + session_service: Arc, + external_auth: Arc, +} + +#[derive(Clone)] +struct PluginServices { + plugin_execution_service: Arc, + plugin_management_service: Arc, + plugin_permission_service: Arc, + plugin_data_service: Arc, + plugin_event_subscriber: Arc, +} + +#[derive(Clone)] +struct WorkspaceServices { + workspace_service: Arc, +} + +mod subcontexts; +pub use subcontexts::{ + CoreContext, DocumentsContext, GitContext, IdentityContext, PluginsContext, WorkspacesContext, + WsContext, +}; + +impl AppServices { + pub fn new(deps: AppServicesDeps) -> Self { + Self { + core: CoreServices { + authorization: deps.core.authorization, + markdown_render_service: deps.core.markdown_render_service, + storage_ingest_queue: deps.core.storage_ingest_queue, + storage_ingest_enqueuer: deps.core.storage_ingest_enqueuer, + health_service: deps.core.health_service, + }, + documents: DocumentServices { + document_service: deps.documents.document_service, + share_service: deps.documents.share_service, + file_service: deps.documents.file_service, + public_service: deps.documents.public_service, + tag_service: deps.documents.tag_service, + realtime_engine: deps.documents.realtime_engine, + }, + git: GitServices { + git_service: deps.git.git_service, + }, + identity: IdentityServices { + api_token_service: deps.identity.api_token_service, + user_shortcut_service: deps.identity.user_shortcut_service, + account_service: deps.identity.account_service, + auth_service: deps.identity.auth_service, + session_service: deps.identity.session_service, + external_auth: deps.identity.external_auth, + }, + plugins: PluginServices { + plugin_execution_service: deps.plugins.plugin_execution_service, + plugin_management_service: deps.plugins.plugin_management_service, + plugin_permission_service: deps.plugins.plugin_permission_service, + plugin_data_service: deps.plugins.plugin_data_service, + plugin_event_subscriber: deps.plugins.plugin_event_subscriber, + }, + workspaces: WorkspaceServices { + workspace_service: deps.workspaces.workspace_service, + }, + } + } +} + +impl AppContext { + pub fn new( + cfg: PresentationConfig, + services: AppServices, + metrics: Arc, + ) -> Self { + Self { + cfg, + services: Arc::new(services), + metrics, + } + } + + pub fn authorization(&self) -> Arc { + self.services.core.authorization.clone() + } + + pub fn document_service(&self) -> Arc { + self.services.documents.document_service.clone() + } + + pub fn share_service(&self) -> Arc { + self.services.documents.share_service.clone() + } + + pub fn file_service(&self) -> Arc { + self.services.documents.file_service.clone() + } + + pub fn public_service(&self) -> Arc { + self.services.documents.public_service.clone() + } + + pub fn tag_service(&self) -> Arc { + self.services.documents.tag_service.clone() + } + + pub fn user_shortcut_service(&self) -> Arc { + self.services.identity.user_shortcut_service.clone() + } + + pub fn git_service(&self) -> Arc { + self.services.git.git_service.clone() + } + + pub fn markdown_renderer(&self) -> Arc { + self.services.core.markdown_render_service.clone() + } + + pub fn workspace_service(&self) -> Arc { + self.services.workspaces.workspace_service.clone() + } + + pub fn storage_ingest_queue(&self) -> Arc { + self.services.core.storage_ingest_queue.clone() + } + + pub fn storage_ingest_enqueuer(&self) -> Arc { + self.services.core.storage_ingest_enqueuer.clone() + } + + pub fn plugin_execution_service(&self) -> Arc { + self.services.plugins.plugin_execution_service.clone() + } + + pub fn plugin_management(&self) -> Arc { + self.services.plugins.plugin_management_service.clone() + } + + pub fn plugin_permissions(&self) -> Arc { + self.services.plugins.plugin_permission_service.clone() + } + + pub fn plugin_data_service(&self) -> Arc { + self.services.plugins.plugin_data_service.clone() + } + + pub fn plugin_event_subscriber(&self) -> Arc { + self.services.plugins.plugin_event_subscriber.clone() + } + + pub fn health_service(&self) -> Arc { + self.services.core.health_service.clone() + } + + pub fn account_service(&self) -> Arc { + self.services.identity.account_service.clone() + } + + pub fn auth_service(&self) -> Arc { + self.services.identity.auth_service.clone() + } + + pub fn session_service(&self) -> Arc { + self.services.identity.session_service.clone() + } + + pub fn external_auth(&self) -> Arc { + self.services.identity.external_auth.clone() + } + + pub fn metrics(&self) -> Arc { + self.metrics.clone() + } + + pub async fn subscribe_plugin_events( + &self, + ) -> anyhow::Result> { + self.services + .plugins + .plugin_event_subscriber + .subscribe() + .await + .map_err(Into::into) + } + + pub fn api_token_service(&self) -> Arc { + self.services.identity.api_token_service.clone() + } + + pub fn realtime_engine(&self) -> Arc { + self.services.documents.realtime_engine.clone() + } + + pub async fn subscribe_realtime( + &self, + doc_id: &str, + sink: DynRealtimeSink, + stream: DynRealtimeStream, + can_edit: bool, + ) -> anyhow::Result<()> { + self.services + .documents + .realtime_engine + .subscribe(doc_id, sink, stream, can_edit) + .await + .map_err(Into::into) + } +} + +impl HasAuthServices for AppContext { + fn auth_service(&self) -> Arc { + AppContext::auth_service(self) + } + + fn session_service(&self) -> Arc { + AppContext::session_service(self) + } +} + +impl HasWorkspaceService for AppContext { + fn workspace_service(&self) -> Arc { + AppContext::workspace_service(self) + } +} + +impl HasShareService for AppContext { + fn share_service(&self) -> Arc { + AppContext::share_service(self) + } +} + +impl HasAuthorizationService for AppContext { + fn authorization(&self) -> Arc { + AppContext::authorization(self) + } +} diff --git a/api/crates/presentation/src/context/subcontexts.rs b/api/crates/presentation/src/context/subcontexts.rs new file mode 100644 index 00000000..242b6266 --- /dev/null +++ b/api/crates/presentation/src/context/subcontexts.rs @@ -0,0 +1,542 @@ +use super::*; + +#[derive(Clone)] +pub struct CoreContext { + pub cfg: PresentationConfig, + markdown_render_service: Arc, + storage_ingest_enqueuer: Arc, + health_service: Arc, + metrics: Arc, + auth_service: Arc, + session_service: Arc, + workspace_service: Arc, + share_service: Arc, +} + +impl CoreContext { + pub fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + pub fn session_service(&self) -> Arc { + self.session_service.clone() + } + + pub fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } + + pub fn share_service(&self) -> Arc { + self.share_service.clone() + } + + pub fn markdown_renderer(&self) -> Arc { + self.markdown_render_service.clone() + } + + pub fn storage_ingest_enqueuer(&self) -> Arc { + self.storage_ingest_enqueuer.clone() + } + + pub fn health_service(&self) -> Arc { + self.health_service.clone() + } + + pub fn metrics(&self) -> Arc { + self.metrics.clone() + } +} + +impl HasAuthServices for CoreContext { + fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + fn session_service(&self) -> Arc { + self.session_service.clone() + } +} + +impl HasWorkspaceService for CoreContext { + fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } +} + +impl HasShareService for CoreContext { + fn share_service(&self) -> Arc { + self.share_service.clone() + } +} + +impl FromRef for CoreContext { + fn from_ref(ctx: &AppContext) -> Self { + Self { + cfg: ctx.cfg.clone(), + markdown_render_service: ctx.markdown_renderer(), + storage_ingest_enqueuer: ctx.storage_ingest_enqueuer(), + health_service: ctx.health_service(), + metrics: ctx.metrics(), + auth_service: ctx.auth_service(), + session_service: ctx.session_service(), + workspace_service: ctx.workspace_service(), + share_service: ctx.share_service(), + } + } +} + +#[derive(Clone)] +pub struct DocumentsContext { + pub cfg: PresentationConfig, + authorization: Arc, + document_service: Arc, + file_service: Arc, + public_service: Arc, + share_service: Arc, + tag_service: Arc, + auth_service: Arc, + session_service: Arc, + workspace_service: Arc, +} + +impl DocumentsContext { + pub fn authorization(&self) -> Arc { + self.authorization.clone() + } + + pub fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + pub fn session_service(&self) -> Arc { + self.session_service.clone() + } + + pub fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } + + pub fn share_service(&self) -> Arc { + self.share_service.clone() + } + + pub fn document_service(&self) -> Arc { + self.document_service.clone() + } + + pub fn file_service(&self) -> Arc { + self.file_service.clone() + } + + pub fn public_service(&self) -> Arc { + self.public_service.clone() + } + + pub fn tag_service(&self) -> Arc { + self.tag_service.clone() + } +} + +impl HasAuthorizationService for DocumentsContext { + fn authorization(&self) -> Arc { + self.authorization.clone() + } +} + +impl HasShareService for DocumentsContext { + fn share_service(&self) -> Arc { + self.share_service.clone() + } +} + +impl HasAuthServices for DocumentsContext { + fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + fn session_service(&self) -> Arc { + self.session_service.clone() + } +} + +impl HasWorkspaceService for DocumentsContext { + fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } +} + +impl FromRef for DocumentsContext { + fn from_ref(ctx: &AppContext) -> Self { + Self { + cfg: ctx.cfg.clone(), + authorization: ctx.authorization(), + document_service: ctx.document_service(), + file_service: ctx.file_service(), + public_service: ctx.public_service(), + share_service: ctx.share_service(), + tag_service: ctx.tag_service(), + auth_service: ctx.auth_service(), + session_service: ctx.session_service(), + workspace_service: ctx.workspace_service(), + } + } +} + +#[derive(Clone)] +pub struct GitContext { + pub cfg: PresentationConfig, + git_service: Arc, + auth_service: Arc, + session_service: Arc, + workspace_service: Arc, +} + +impl GitContext { + pub fn git_service(&self) -> Arc { + self.git_service.clone() + } + + pub fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + pub fn session_service(&self) -> Arc { + self.session_service.clone() + } + + pub fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } +} + +impl HasAuthServices for GitContext { + fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + fn session_service(&self) -> Arc { + self.session_service.clone() + } +} + +impl HasWorkspaceService for GitContext { + fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } +} + +impl FromRef for GitContext { + fn from_ref(ctx: &AppContext) -> Self { + Self { + cfg: ctx.cfg.clone(), + git_service: ctx.git_service(), + auth_service: ctx.auth_service(), + session_service: ctx.session_service(), + workspace_service: ctx.workspace_service(), + } + } +} + +#[derive(Clone)] +pub struct IdentityContext { + pub cfg: PresentationConfig, + api_token_service: Arc, + user_shortcut_service: Arc, + account_service: Arc, + auth_service: Arc, + session_service: Arc, + external_auth: Arc, + workspace_service: Arc, +} + +impl IdentityContext { + pub fn api_token_service(&self) -> Arc { + self.api_token_service.clone() + } + + pub fn user_shortcut_service(&self) -> Arc { + self.user_shortcut_service.clone() + } + + pub fn account_service(&self) -> Arc { + self.account_service.clone() + } + + pub fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + pub fn session_service(&self) -> Arc { + self.session_service.clone() + } + + pub fn external_auth(&self) -> Arc { + self.external_auth.clone() + } + + pub fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } +} + +impl HasAuthServices for IdentityContext { + fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + fn session_service(&self) -> Arc { + self.session_service.clone() + } +} + +impl HasWorkspaceService for IdentityContext { + fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } +} + +impl FromRef for IdentityContext { + fn from_ref(ctx: &AppContext) -> Self { + Self { + cfg: ctx.cfg.clone(), + api_token_service: ctx.api_token_service(), + user_shortcut_service: ctx.user_shortcut_service(), + account_service: ctx.account_service(), + auth_service: ctx.auth_service(), + session_service: ctx.session_service(), + external_auth: ctx.external_auth(), + workspace_service: ctx.workspace_service(), + } + } +} + +#[derive(Clone)] +pub struct PluginsContext { + pub cfg: PresentationConfig, + authorization: Arc, + plugin_execution_service: Arc, + plugin_management_service: Arc, + plugin_permission_service: Arc, + plugin_data_service: Arc, + plugin_event_subscriber: Arc, + auth_service: Arc, + session_service: Arc, + workspace_service: Arc, + share_service: Arc, +} + +impl PluginsContext { + pub fn authorization(&self) -> Arc { + self.authorization.clone() + } + + pub fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + pub fn session_service(&self) -> Arc { + self.session_service.clone() + } + + pub fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } + + pub fn share_service(&self) -> Arc { + self.share_service.clone() + } + + pub fn plugin_execution_service(&self) -> Arc { + self.plugin_execution_service.clone() + } + + pub fn plugin_management(&self) -> Arc { + self.plugin_management_service.clone() + } + + pub fn plugin_permissions(&self) -> Arc { + self.plugin_permission_service.clone() + } + + pub fn plugin_data_service(&self) -> Arc { + self.plugin_data_service.clone() + } + + pub async fn subscribe_plugin_events( + &self, + ) -> anyhow::Result> { + self.plugin_event_subscriber + .subscribe() + .await + .map_err(Into::into) + } +} + +impl HasAuthorizationService for PluginsContext { + fn authorization(&self) -> Arc { + self.authorization.clone() + } +} + +impl HasAuthServices for PluginsContext { + fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + fn session_service(&self) -> Arc { + self.session_service.clone() + } +} + +impl HasWorkspaceService for PluginsContext { + fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } +} + +impl HasShareService for PluginsContext { + fn share_service(&self) -> Arc { + self.share_service.clone() + } +} + +impl FromRef for PluginsContext { + fn from_ref(ctx: &AppContext) -> Self { + Self { + cfg: ctx.cfg.clone(), + authorization: ctx.authorization(), + plugin_execution_service: ctx.plugin_execution_service(), + plugin_management_service: ctx.plugin_management(), + plugin_permission_service: ctx.plugin_permissions(), + plugin_data_service: ctx.plugin_data_service(), + plugin_event_subscriber: ctx.plugin_event_subscriber(), + auth_service: ctx.auth_service(), + session_service: ctx.session_service(), + workspace_service: ctx.workspace_service(), + share_service: ctx.share_service(), + } + } +} + +#[derive(Clone)] +pub struct WorkspacesContext { + pub cfg: PresentationConfig, + workspace_service: Arc, + account_service: Arc, + document_service: Arc, + auth_service: Arc, + session_service: Arc, +} + +impl WorkspacesContext { + pub fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } + + pub fn account_service(&self) -> Arc { + self.account_service.clone() + } + + pub fn document_service(&self) -> Arc { + self.document_service.clone() + } + + pub fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + pub fn session_service(&self) -> Arc { + self.session_service.clone() + } +} + +impl HasAuthServices for WorkspacesContext { + fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + fn session_service(&self) -> Arc { + self.session_service.clone() + } +} + +impl HasWorkspaceService for WorkspacesContext { + fn workspace_service(&self) -> Arc { + self.workspace_service.clone() + } +} + +impl FromRef for WorkspacesContext { + fn from_ref(ctx: &AppContext) -> Self { + Self { + cfg: ctx.cfg.clone(), + workspace_service: ctx.workspace_service(), + account_service: ctx.account_service(), + document_service: ctx.document_service(), + auth_service: ctx.auth_service(), + session_service: ctx.session_service(), + } + } +} + +#[derive(Clone)] +pub struct WsContext { + authorization: Arc, + realtime_engine: Arc, + auth_service: Arc, + session_service: Arc, +} + +impl WsContext { + pub fn authorization(&self) -> Arc { + self.authorization.clone() + } + + pub fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + pub fn session_service(&self) -> Arc { + self.session_service.clone() + } + + pub async fn subscribe_realtime( + &self, + doc_id: &str, + sink: DynRealtimeSink, + stream: DynRealtimeStream, + can_edit: bool, + ) -> anyhow::Result<()> { + self.realtime_engine + .subscribe(doc_id, sink, stream, can_edit) + .await + .map_err(Into::into) + } +} + +impl HasAuthServices for WsContext { + fn auth_service(&self) -> Arc { + self.auth_service.clone() + } + + fn session_service(&self) -> Arc { + self.session_service.clone() + } +} + +impl HasAuthorizationService for WsContext { + fn authorization(&self) -> Arc { + self.authorization.clone() + } +} + +impl FromRef for WsContext { + fn from_ref(ctx: &AppContext) -> Self { + Self { + authorization: ctx.authorization(), + realtime_engine: ctx.realtime_engine(), + auth_service: ctx.auth_service(), + session_service: ctx.session_service(), + } + } +} diff --git a/api/crates/presentation/src/context/traits.rs b/api/crates/presentation/src/context/traits.rs new file mode 100644 index 00000000..cb00fcf8 --- /dev/null +++ b/api/crates/presentation/src/context/traits.rs @@ -0,0 +1,24 @@ +use std::sync::Arc; + +use application::core::services::authorization::AuthorizationServiceFacade; +use application::documents::services::sharing::ShareServiceFacade; +use application::identity::services::auth::auth_service::AuthServiceFacade; +use application::identity::services::auth::user_sessions::UserSessionServiceFacade; +use application::workspaces::services::WorkspaceServiceFacade; + +pub trait HasAuthServices: Send + Sync { + fn auth_service(&self) -> Arc; + fn session_service(&self) -> Arc; +} + +pub trait HasWorkspaceService: Send + Sync { + fn workspace_service(&self) -> Arc; +} + +pub trait HasShareService: Send + Sync { + fn share_service(&self) -> Arc; +} + +pub trait HasAuthorizationService: Send + Sync { + fn authorization(&self) -> Arc; +} diff --git a/api/src/presentation/http/health.rs b/api/crates/presentation/src/http/core/health.rs similarity index 75% rename from api/src/presentation/http/health.rs rename to api/crates/presentation/src/http/core/health.rs index 580ac5e8..e35a0b6f 100644 --- a/api/src/presentation/http/health.rs +++ b/api/crates/presentation/src/http/core/health.rs @@ -2,8 +2,8 @@ use axum::{Json, Router, extract::State, routing::get}; use serde::Serialize; use utoipa::ToSchema; -use crate::application::services::health::OverallHealth; -use crate::presentation::context::AppContext; +use crate::context::{AppContext, CoreContext}; +use application::core::services::health::OverallHealth; #[derive(Debug, Serialize, ToSchema)] pub struct HealthResp { @@ -16,7 +16,7 @@ pub struct HealthResp { tag = "Health", responses((status = 200, body = HealthResp)) )] -pub async fn health(State(ctx): State) -> Json { +pub async fn health(State(ctx): State) -> Json { let service = ctx.health_service(); let status = match service.status().await.unwrap_or(OverallHealth::Degraded) { OverallHealth::Ok => "ok", @@ -25,6 +25,10 @@ pub async fn health(State(ctx): State) -> Json { Json(HealthResp { status }) } +pub mod openapi { + pub use super::*; +} + pub fn routes(ctx: AppContext) -> Router { Router::new().route("/health", get(health)).with_state(ctx) } diff --git a/api/crates/presentation/src/http/core/markdown/handlers/mod.rs b/api/crates/presentation/src/http/core/markdown/handlers/mod.rs new file mode 100644 index 00000000..6a2e3596 --- /dev/null +++ b/api/crates/presentation/src/http/core/markdown/handlers/mod.rs @@ -0,0 +1,120 @@ +use std::collections::HashMap; + +use axum::{Json, extract::State, http::StatusCode}; +use uuid::Uuid; + +use crate::context::CoreContext; +use crate::http::error::ApiError; +use crate::http::identity::auth::Bearer; +use application::core::dtos::markdown::RenderOptions; +use application::core::services::errors::ServiceError; +use application::core::services::markdown_render::MarkdownRenderTask; + +use super::types::{RenderManyRequest, RenderManyResponse, RenderRequest, RenderResponseBody}; +use super::user_scope::resolve_user_scope_from_inputs; + +#[utoipa::path(post, path = "/api/markdown/render", tag = "Markdown", + request_body = RenderRequest, + responses((status = 200, body = RenderResponseBody)))] +pub async fn render_markdown( + State(ctx): State, + bearer: Option, + Json(req): Json, +) -> Result, ApiError> { + // Per-item size guard (2MB) + if req.text.len() > 2 * 1024 * 1024 { + return Err(ApiError::new( + StatusCode::PAYLOAD_TOO_LARGE, + "payload_too_large", + )); + } + let RenderRequest { text, options } = req; + let options: RenderOptions = options.into(); + + let bearer_token = bearer.as_ref().map(|b| b.0.as_str()); + let user_scope = + resolve_user_scope_from_inputs(&ctx, bearer_token, options.token.as_deref()).await; + + let renderer = ctx.markdown_renderer(); + let resp = renderer + .render_single(text, options, user_scope) + .await + .map_err(map_markdown_error)?; + Ok(Json(RenderResponseBody::from(resp))) +} + +#[utoipa::path(post, path = "/api/markdown/render-many", tag = "Markdown", + request_body = RenderManyRequest, + responses((status = 200, body = RenderManyResponse)))] +pub async fn render_markdown_many( + State(ctx): State, + bearer: Option, + Json(req): Json, +) -> Result, ApiError> { + // Guard: item count and total size + const MAX_ITEMS: usize = 128; + const MAX_TOTAL_BYTES: usize = 5 * 1024 * 1024; // 5MB + let items = req.items; + if items.len() > MAX_ITEMS { + return Err(ApiError::new( + StatusCode::PAYLOAD_TOO_LARGE, + "payload_too_large", + )); + } + let total: usize = items.iter().map(|i| i.text.len()).sum(); + if total > MAX_TOTAL_BYTES { + return Err(ApiError::new( + StatusCode::PAYLOAD_TOO_LARGE, + "payload_too_large", + )); + } + + let bearer_token = bearer.as_ref().map(|b| b.0.clone()); + let bearer_scope = resolve_user_scope_from_inputs(&ctx, bearer_token.as_deref(), None).await; + let mut share_scope_cache: HashMap> = HashMap::new(); + let mut tasks = Vec::with_capacity(items.len()); + + for item in items { + if item.text.len() > 2 * 1024 * 1024 { + return Err(ApiError::new( + StatusCode::PAYLOAD_TOO_LARGE, + "payload_too_large", + )); + } + let RenderRequest { text, options } = item; + let options: RenderOptions = options.into(); + let user_scope = if bearer_scope.is_some() { + bearer_scope + } else if let Some(token) = options.token.as_deref() { + if let Some(scope) = share_scope_cache.get(token) { + *scope + } else { + let scope = resolve_user_scope_from_inputs(&ctx, None, Some(token)).await; + share_scope_cache.insert(token.to_string(), scope); + scope + } + } else { + None + }; + tasks.push(MarkdownRenderTask { + text, + options, + user_scope, + }); + } + + let renderer = ctx.markdown_renderer(); + let responses = renderer + .render_many(tasks) + .await + .map_err(map_markdown_error)?; + let items = responses + .into_iter() + .map(RenderResponseBody::from) + .collect(); + Ok(Json(RenderManyResponse { items })) +} + +fn map_markdown_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error_no_log(err) +} diff --git a/api/crates/presentation/src/http/core/markdown/mod.rs b/api/crates/presentation/src/http/core/markdown/mod.rs new file mode 100644 index 00000000..2b015277 --- /dev/null +++ b/api/crates/presentation/src/http/core/markdown/mod.rs @@ -0,0 +1,21 @@ +mod handlers; +pub mod types; +mod user_scope; + +use axum::{Router, routing::post}; + +use crate::context::AppContext; + +pub use handlers::{render_markdown, render_markdown_many}; +pub use types::*; + +pub mod openapi { + pub use super::handlers::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + .route("/markdown/render", post(render_markdown)) + .route("/markdown/render-many", post(render_markdown_many)) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/core/markdown/types.rs b/api/crates/presentation/src/http/core/markdown/types.rs new file mode 100644 index 00000000..2fc3aff8 --- /dev/null +++ b/api/crates/presentation/src/http/core/markdown/types.rs @@ -0,0 +1,106 @@ +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; + +use application::core::dtos::markdown::{PlaceholderItem, RenderOptions, RenderResponse}; + +#[derive(Debug, Clone, Deserialize, Serialize, ToSchema, Default)] +#[serde(default)] +pub struct RenderOptionsPayload { + pub flavor: Option, + pub theme: Option, + pub features: Option>, + pub sanitize: Option, + pub hardbreaks: Option, + pub doc_id: Option, + pub base_origin: Option, + pub absolute_attachments: Option, + pub token: Option, +} + +impl From for RenderOptions { + fn from(value: RenderOptionsPayload) -> Self { + RenderOptions { + flavor: value.flavor, + theme: value.theme, + features: value.features, + sanitize: value.sanitize, + hardbreaks: value.hardbreaks, + doc_id: value.doc_id, + base_origin: value.base_origin, + absolute_attachments: value.absolute_attachments, + token: value.token, + } + } +} + +impl From for RenderOptionsPayload { + fn from(value: RenderOptions) -> Self { + Self { + flavor: value.flavor, + theme: value.theme, + features: value.features, + sanitize: value.sanitize, + hardbreaks: value.hardbreaks, + doc_id: value.doc_id, + base_origin: value.base_origin, + absolute_attachments: value.absolute_attachments, + token: value.token, + } + } +} + +#[derive(Debug, Clone, Serialize, ToSchema)] +pub struct PlaceholderItemPayload { + pub kind: String, + pub id: String, + pub code: String, +} + +impl From for PlaceholderItemPayload { + fn from(value: PlaceholderItem) -> Self { + Self { + kind: value.kind, + id: value.id, + code: value.code, + } + } +} + +#[derive(Debug, Clone, Serialize, ToSchema)] +pub struct RenderResponseBody { + pub html: String, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub placeholders: Vec, + pub hash: String, +} + +impl From for RenderResponseBody { + fn from(value: RenderResponse) -> Self { + Self { + html: value.html, + placeholders: value + .placeholders + .into_iter() + .map(PlaceholderItemPayload::from) + .collect(), + hash: value.hash, + } + } +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct RenderRequest { + pub text: String, + #[serde(default)] + pub options: RenderOptionsPayload, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct RenderManyRequest { + pub items: Vec, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct RenderManyResponse { + pub items: Vec, +} diff --git a/api/crates/presentation/src/http/core/markdown/user_scope.rs b/api/crates/presentation/src/http/core/markdown/user_scope.rs new file mode 100644 index 00000000..6ff0115b --- /dev/null +++ b/api/crates/presentation/src/http/core/markdown/user_scope.rs @@ -0,0 +1,79 @@ +use uuid::Uuid; + +use crate::context::{HasAuthServices, HasShareService, HasWorkspaceService}; +use crate::security::{request_status, token}; +use application::core::services::access; +use domain::documents::share; + +pub(super) async fn resolve_user_scope_from_inputs( + ctx: &(impl HasAuthServices + HasWorkspaceService + HasShareService), + bearer_token: Option<&str>, + share_token: Option<&str>, +) -> Option { + if let Some(token) = bearer_token { + if let Some(workspace_id) = ctx.auth_service().workspace_from_token_claim(token) { + return Some(workspace_id); + } + if let Ok(Some(workspace_id)) = ctx.auth_service().workspace_from_token_async(token).await { + return Some(workspace_id); + } + match token::resolve_actor_from_token_str(ctx, token).await { + Ok(access::Actor::User(uid)) => { + if let Ok(workspaces) = ctx.workspace_service().list_for_user(uid).await { + if workspaces.is_empty() { + return None; + } + if let Some(default_ws) = workspaces.iter().find(|ws| ws.is_default) { + return Some(default_ws.id); + } + return Some(workspaces[0].id); + } + } + Ok(access::Actor::ShareToken(t)) => { + if let Ok(Some(ctx_share)) = ctx.share_service().resolve_share_context(&t).await { + if share::is_expired(ctx_share.expires_at.as_ref(), chrono::Utc::now()) { + return None; + } + return Some(ctx_share.workspace_id); + } + } + Ok(_) => {} + Err(token::ActorResolveError::TokenExpired) => { + request_status::mark_token_expired(); + return None; + } + Err(token::ActorResolveError::Unauthorized) => {} + } + } + if let Some(token) = share_token { + // Share token: resolve its workspace for renderer so plugin manifests can be loaded. + match token::resolve_actor_from_token_str(ctx, token).await { + Ok(access::Actor::User(uid)) => { + if let Ok(workspaces) = ctx.workspace_service().list_for_user(uid).await { + if workspaces.is_empty() { + return None; + } + if let Some(default_ws) = workspaces.iter().find(|ws| ws.is_default) { + return Some(default_ws.id); + } + return Some(workspaces[0].id); + } + } + Ok(access::Actor::ShareToken(t)) => { + if let Ok(Some(ctx_share)) = ctx.share_service().resolve_share_context(&t).await { + if share::is_expired(ctx_share.expires_at.as_ref(), chrono::Utc::now()) { + return None; + } + return Some(ctx_share.workspace_id); + } + } + Ok(_) => {} + Err(token::ActorResolveError::TokenExpired) => { + request_status::mark_token_expired(); + return None; + } + Err(token::ActorResolveError::Unauthorized) => {} + } + } + None +} diff --git a/api/src/presentation/http/metrics.rs b/api/crates/presentation/src/http/core/metrics.rs similarity index 70% rename from api/src/presentation/http/metrics.rs rename to api/crates/presentation/src/http/core/metrics.rs index c34441f4..bfca36d2 100644 --- a/api/src/presentation/http/metrics.rs +++ b/api/crates/presentation/src/http/core/metrics.rs @@ -4,9 +4,9 @@ use axum::{ response::Response, }; -use crate::presentation::context::AppContext; +use crate::context::CoreContext; -pub async fn metrics_handler(State(ctx): State) -> Result { +pub async fn metrics_handler(State(ctx): State) -> Result { let body = ctx.metrics().render(); Response::builder() .status(StatusCode::OK) diff --git a/api/crates/presentation/src/http/core/mod.rs b/api/crates/presentation/src/http/core/mod.rs new file mode 100644 index 00000000..4f37f2a6 --- /dev/null +++ b/api/crates/presentation/src/http/core/mod.rs @@ -0,0 +1,4 @@ +pub mod health; +pub mod markdown; +pub mod metrics; +pub mod storage_ingest; diff --git a/api/crates/presentation/src/http/core/storage_ingest/handlers/mod.rs b/api/crates/presentation/src/http/core/storage_ingest/handlers/mod.rs new file mode 100644 index 00000000..11673bcb --- /dev/null +++ b/api/crates/presentation/src/http/core/storage_ingest/handlers/mod.rs @@ -0,0 +1,57 @@ +use axum::{Json, extract::State, http::StatusCode}; + +use crate::context::CoreContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use application::core::dtos::storage_ingest::{IngestBatch, IngestEvent}; + +use super::types::IngestBatchRequest; + +#[utoipa::path( + post, + path = "/api/storage/ingest", + tag = "Storage", + request_body = IngestBatchRequest, + responses((status = 202, description = "Events enqueued"), (status = 400, description = "Invalid request")), +)] +pub async fn enqueue_ingest_events( + State(ctx): State, + auth: WorkspaceAuth, + Json(body): Json, +) -> Result { + let snapshot = auth.permissions.to_vec(); + let batch = IngestBatch { + events: body + .events + .into_iter() + .map(|event| IngestEvent { + repo_path: event.repo_path, + kind: event.kind.into(), + backend: event.backend, + content_hash: event.content_hash, + payload: event.payload, + }) + .collect(), + }; + + let count = ctx + .storage_ingest_enqueuer() + .enqueue_batch( + auth.workspace_id, + auth.user_id, + Some(auth.user_id), + &snapshot, + batch, + ) + .await + .map_err(|err| { + crate::http::error::map_service_error(err, "storage_ingest_enqueue_error") + })?; + + tracing::info!( + user_id = %auth.user_id, + events = count, + "storage_ingest_events_enqueued" + ); + Ok(StatusCode::ACCEPTED) +} diff --git a/api/crates/presentation/src/http/core/storage_ingest/mod.rs b/api/crates/presentation/src/http/core/storage_ingest/mod.rs new file mode 100644 index 00000000..dab46c0f --- /dev/null +++ b/api/crates/presentation/src/http/core/storage_ingest/mod.rs @@ -0,0 +1,19 @@ +mod handlers; +pub mod types; + +use axum::{Router, routing::post}; + +use crate::context::AppContext; + +pub use handlers::enqueue_ingest_events; +pub use types::*; + +pub mod openapi { + pub use super::handlers::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + .route("/storage/ingest", post(enqueue_ingest_events)) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/core/storage_ingest/types.rs b/api/crates/presentation/src/http/core/storage_ingest/types.rs new file mode 100644 index 00000000..00c80f2c --- /dev/null +++ b/api/crates/presentation/src/http/core/storage_ingest/types.rs @@ -0,0 +1,35 @@ +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use utoipa::ToSchema; + +use application::core::ports::storage::storage_ingest_queue::StorageIngestKind; + +#[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] +pub struct IngestBatchRequest { + pub events: Vec, +} + +#[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] +#[serde(rename_all = "lowercase")] +pub enum IngestKindParam { + Upsert, + Delete, +} + +impl From for StorageIngestKind { + fn from(value: IngestKindParam) -> Self { + match value { + IngestKindParam::Upsert => StorageIngestKind::Upsert, + IngestKindParam::Delete => StorageIngestKind::Delete, + } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] +pub struct IngestEventRequest { + pub repo_path: String, + pub kind: IngestKindParam, + pub backend: Option, + pub content_hash: Option, + pub payload: Option, +} diff --git a/api/crates/presentation/src/http/documents/files/download.rs b/api/crates/presentation/src/http/documents/files/download.rs new file mode 100644 index 00000000..c1aea111 --- /dev/null +++ b/api/crates/presentation/src/http/documents/files/download.rs @@ -0,0 +1,59 @@ +use axum::{ + extract::{Path as AxumPath, Query, State}, + response::Response, +}; +use uuid::Uuid; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use application::core::services::access; +use domain::access::permissions::PERM_DOC_VIEW; + +use super::types::{FileByNameQuery, file_payload_response, map_file_error}; + +#[utoipa::path( + get, + path = "/api/files/{id}", + tag = "Files", + params(("id" = Uuid, Path, description = "File ID")), + responses((status = 200, description = "OK", body = Vec, content_type = "application/octet-stream")) +)] +pub async fn get_file( + State(ctx): State, + auth: WorkspaceAuth, + AxumPath(id): AxumPath, +) -> Result { + auth.ensure_permission(PERM_DOC_VIEW)?; + let actor = access::Actor::User(auth.user_id); + let payload = ctx + .file_service() + .download_owned_file(&actor, auth.workspace_id, id) + .await + .map_err(map_file_error)?; + Ok(file_payload_response(payload)) +} + +#[utoipa::path( + get, + path = "/api/files/documents/{filename}", + tag = "Files", + params(("filename" = String, Path, description = "File name"), ("document_id" = Uuid, Query, description = "Document ID")), + responses((status = 200, description = "OK", body = Vec, content_type = "application/octet-stream")) +)] +pub async fn get_file_by_name( + State(ctx): State, + auth: WorkspaceAuth, + AxumPath(filename): AxumPath, + Query(q): Query, +) -> Result { + auth.ensure_permission(PERM_DOC_VIEW)?; + + let actor = access::Actor::User(auth.user_id); + let payload = ctx + .file_service() + .get_file_by_name(&actor, q.document_id, &filename) + .await + .map_err(map_file_error)?; + Ok(file_payload_response(payload)) +} diff --git a/api/crates/presentation/src/http/documents/files/mod.rs b/api/crates/presentation/src/http/documents/files/mod.rs new file mode 100644 index 00000000..7fc23e50 --- /dev/null +++ b/api/crates/presentation/src/http/documents/files/mod.rs @@ -0,0 +1,29 @@ +mod download; +mod serve; +pub mod types; +mod upload; + +use axum::{ + Router, + routing::{get, post}, +}; + +use crate::context::AppContext; + +pub use download::{get_file, get_file_by_name}; +pub use serve::serve_upload; +pub use types::*; +pub use upload::upload_file; + +pub mod openapi { + pub use super::download::*; + pub use super::upload::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + .route("/files", post(upload_file)) + .route("/files/:id", get(get_file)) + .route("/files/documents/:filename", get(get_file_by_name)) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/documents/files/serve.rs b/api/crates/presentation/src/http/documents/files/serve.rs new file mode 100644 index 00000000..f2306170 --- /dev/null +++ b/api/crates/presentation/src/http/documents/files/serve.rs @@ -0,0 +1,44 @@ +use axum::{ + extract::{Path as AxumPath, Query, State}, + http::HeaderMap, + response::Response, +}; +use uuid::Uuid; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::security::token; +use application::core::services::access; + +use super::types::file_payload_response; + +/// Serve static files from uploads directory with authentication support +pub async fn serve_upload( + State(ctx): State, + AxumPath(path): AxumPath, + Query(params): Query>, + headers: HeaderMap, +) -> Result { + let share_token = params.get("token").cloned(); + let bearer = token::bearer_from_headers(&headers); + + let parts: Vec<&str> = path.split('/').collect(); + if parts.len() < 2 { + return Err(ApiError::forbidden("forbidden")); + } + let doc_id = + Uuid::parse_str(parts[0]).map_err(|_| ApiError::bad_request("invalid_document_id"))?; + + let actor = token::resolve_actor_from_parts(&ctx, bearer, share_token.as_deref()) + .await + .map_err(token::map_actor_error)? + .unwrap_or(access::Actor::Public); + let attachment_path = parts[1..].join("/"); + let payload = ctx + .file_service() + .serve_upload(&actor, doc_id, &attachment_path) + .await + .map_err(super::types::map_file_error)?; + + Ok(file_payload_response(payload)) +} diff --git a/api/crates/presentation/src/http/documents/files/types.rs b/api/crates/presentation/src/http/documents/files/types.rs new file mode 100644 index 00000000..0742e1e3 --- /dev/null +++ b/api/crates/presentation/src/http/documents/files/types.rs @@ -0,0 +1,53 @@ +use axum::{ + http::{HeaderMap, HeaderValue}, + response::IntoResponse, +}; +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; +use uuid::Uuid; + +use application::core::services::errors::ServiceError; +use application::documents::services::files::FilePayload; + +#[derive(Debug, Serialize, ToSchema)] +pub struct UploadFileResponse { + pub id: Uuid, + pub url: String, + pub filename: String, + pub content_type: Option, + pub size: i64, +} + +pub fn map_file_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error(err, "file_service_error") +} + +pub fn file_payload_response(payload: FilePayload) -> axum::response::Response { + let mut headers = HeaderMap::new(); + if let Some(ct) = payload.content_type { + headers.insert( + axum::http::header::CONTENT_TYPE, + HeaderValue::from_str(&ct) + .unwrap_or(HeaderValue::from_static("application/octet-stream")), + ); + } + headers.insert( + axum::http::header::HeaderName::from_static("x-content-type-options"), + HeaderValue::from_static("nosniff"), + ); + (headers, payload.bytes).into_response() +} + +#[derive(ToSchema)] +#[allow(dead_code)] +pub struct UploadFileMultipart { + #[schema(value_type = String, format = Binary)] + pub file: String, + #[schema(value_type = String, format = Uuid)] + pub document_id: String, +} + +#[derive(Debug, Deserialize)] +pub struct FileByNameQuery { + pub document_id: Uuid, +} diff --git a/api/crates/presentation/src/http/documents/files/upload.rs b/api/crates/presentation/src/http/documents/files/upload.rs new file mode 100644 index 00000000..4d609a32 --- /dev/null +++ b/api/crates/presentation/src/http/documents/files/upload.rs @@ -0,0 +1,104 @@ +use axum::{ + Json, + extract::{Multipart, State}, + http::StatusCode, +}; +use uuid::Uuid; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use domain::access::permissions::PERM_FILE_UPLOAD; + +use super::types::{UploadFileResponse, map_file_error}; + +#[utoipa::path( + post, + path = "/api/files", + tag = "Files", + request_body( + content = UploadFileMultipart, + content_type = "multipart/form-data", + ), + responses( + (status = 201, description = "File uploaded", body = UploadFileResponse) + ) +)] +pub async fn upload_file( + State(ctx): State, + auth: WorkspaceAuth, + mut multipart: Multipart, +) -> Result<(StatusCode, Json), ApiError> { + auth.ensure_permission(PERM_FILE_UPLOAD)?; + + let mut document_id: Option = None; + let mut file_bytes: Option> = None; + let mut orig_filename: Option = None; + let mut content_type: Option = None; + + while let Some(field) = multipart + .next_field() + .await + .map_err(|_| ApiError::bad_request("invalid_multipart"))? + { + let name = field.name().map(|s| s.to_string()); + let file_name = field.file_name().map(|s| s.to_string()); + let ct = field.content_type().map(|s| s.to_string()); + match name.as_deref() { + Some("document_id") => { + let t = field + .text() + .await + .map_err(|_| ApiError::bad_request("invalid_document_id"))?; + document_id = Some( + Uuid::parse_str(t.trim()) + .map_err(|_| ApiError::bad_request("invalid_document_id"))?, + ); + } + Some("file") => { + orig_filename = file_name.clone(); + content_type = ct.clone(); + let data = field + .bytes() + .await + .map_err(|_| ApiError::bad_request("invalid_upload"))?; + if data.len() > ctx.cfg.upload_max_bytes { + return Err(ApiError::new( + StatusCode::PAYLOAD_TOO_LARGE, + "payload_too_large", + )); + } + file_bytes = Some(data.to_vec()); + } + _ => {} + } + } + + let doc_id = document_id.ok_or(ApiError::bad_request("missing_document_id"))?; + let bytes = file_bytes.ok_or(ApiError::bad_request("missing_file"))?; + + let public_base_url = ctx.cfg.public_base_url.clone(); + let file_service = ctx.file_service(); + let f = file_service + .upload_file( + auth.workspace_id, + auth.user_id, + doc_id, + bytes, + orig_filename, + content_type.clone(), + public_base_url, + ) + .await + .map_err(map_file_error)?; + Ok(( + StatusCode::CREATED, + Json(UploadFileResponse { + id: f.id, + url: f.url, + filename: f.filename, + content_type: f.content_type, + size: f.size, + }), + )) +} diff --git a/api/crates/presentation/src/http/documents/handlers/content.rs b/api/crates/presentation/src/http/documents/handlers/content.rs new file mode 100644 index 00000000..9a0b15c0 --- /dev/null +++ b/api/crates/presentation/src/http/documents/handlers/content.rs @@ -0,0 +1,200 @@ +use axum::{ + Json, + extract::{Path, Query, State}, + http::{HeaderMap, HeaderValue, StatusCode}, + response::{IntoResponse, Response}, +}; +use serde_json::{Value, json}; +use uuid::Uuid; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::http::extractors::AuthedUser; +use crate::security::token::{self, Bearer}; +use application::core::services::access; +use application::core::services::errors::ServiceError; +use application::documents::services::DocumentPatchOperation; + +#[allow(unused_imports)] +use crate::http::documents::types::{ + Document, DocumentArchiveBinary, DocumentDownloadBinary, DownloadDocumentQuery, DownloadFormat, + PatchDocumentContentRequest, SnapshotTokenQuery, UpdateDocumentContentRequest, + map_service_error, to_http_document, +}; + +#[utoipa::path(get, path = "/api/documents/{id}/content", tag = "Documents", params(("id" = Uuid, Path, description = "Document ID"),), responses((status = 200)))] +pub async fn get_document_content( + State(ctx): State, + auth: AuthedUser, + Path(id): Path, +) -> Result, ApiError> { + let actor = access::Actor::User(auth.user_id); + let service = ctx.document_service(); + let content = service + .get_content(&actor, id) + .await + .map_err(map_service_error)?; + Ok(Json(json!({"content": content}))) +} + +#[utoipa::path( + put, + path = "/api/documents/{id}/content", + tag = "Documents", + params( + ("id" = Uuid, Path, description = "Document ID"), + ("token" = Option, Query, description = "Share token (optional)") + ), + request_body = UpdateDocumentContentRequest, + responses((status = 200, body = Document)) +)] +pub async fn update_document_content( + State(ctx): State, + bearer: Option, + Path(id): Path, + q: Option>, + Json(body): Json, +) -> Result, ApiError> { + let params = q.map(|Query(v)| v).unwrap_or_default(); + let token = params.token.as_deref(); + let actor = token::resolve_actor_from_parts(&ctx, bearer, token) + .await + .map_err(token::map_actor_error)? + .ok_or(ApiError::unauthorized("unauthorized"))?; + let service = ctx.document_service(); + let updated = service + .update_content(&actor, id, &body.content) + .await + .map_err(map_service_error)?; + Ok(Json(to_http_document(updated))) +} + +#[utoipa::path( + patch, + path = "/api/documents/{id}/content", + tag = "Documents", + params( + ("id" = Uuid, Path, description = "Document ID"), + ("token" = Option, Query, description = "Share token (optional)") + ), + request_body = PatchDocumentContentRequest, + responses((status = 200, body = Document)) +)] +pub async fn patch_document_content( + State(ctx): State, + bearer: Option, + Path(id): Path, + q: Option>, + Json(body): Json, +) -> Result, ApiError> { + if body.operations.is_empty() { + return Err(ApiError::bad_request("missing_operations")); + } + let params = q.map(|Query(v)| v).unwrap_or_default(); + let token = params.token.as_deref(); + let actor = token::resolve_actor_from_parts(&ctx, bearer, token) + .await + .map_err(token::map_actor_error)? + .ok_or(ApiError::unauthorized("unauthorized"))?; + let service = ctx.document_service(); + let operations: Vec = body + .operations + .into_iter() + .map(DocumentPatchOperation::from) + .collect(); + let updated = service + .patch_content(&actor, id, &operations) + .await + .map_err(map_service_error)?; + Ok(Json(to_http_document(updated))) +} + +#[utoipa::path( + get, + path = "/api/documents/{id}/download", + tag = "Documents", + operation_id = "download_document", + params( + ("id" = Uuid, Path, description = "Document ID"), + ("token" = Option, Query, description = "Share token (optional)"), + ("format" = Option, Query, description = "Download format (see schema for supported values)") + ), + responses( + (status = 200, description = "Document download", body = DocumentDownloadBinary, content_type = "application/octet-stream"), + (status = 401, description = "Unauthorized"), + (status = 404, description = "Document not found") + ) +)] +pub async fn download_document( + State(ctx): State, + bearer: Option, + Query(params): Query, + Path(id): Path, +) -> Result { + let token = params.token.as_deref(); + let format = params.format; + + let actor = match token::resolve_actor_from_parts(&ctx, bearer, token).await { + Ok(Some(actor)) => actor, + Ok(None) => return Err(ApiError::unauthorized("unauthorized")), + Err(err) => return Err(token::map_actor_error(err)), + }; + + let service = ctx.document_service(); + let download = match service.download_document(&actor, id, format.into()).await { + Ok(payload) => payload, + Err(ServiceError::Unauthorized) + | Err(ServiceError::TokenExpired) + | Err(ServiceError::Forbidden) + | Err(ServiceError::NotFound) => { + return Err(ApiError::not_found("not_found")); + } + Err(ServiceError::Conflict) => { + return Err(ApiError::conflict("conflict")); + } + Err(ServiceError::BadRequest(_)) => { + return Err(ApiError::bad_request("bad_request")); + } + Err(ServiceError::Unexpected(error)) => { + tracing::error!( + document_id = %id, + ?format, + error = ?error, + "document_download_failed" + ); + return Err(ApiError::new( + StatusCode::INTERNAL_SERVER_ERROR, + "internal_error", + )); + } + }; + + let mut headers = HeaderMap::new(); + let content_type = match HeaderValue::from_str(&download.content_type) { + Ok(value) => value, + Err(_) => { + return Err(ApiError::new( + StatusCode::INTERNAL_SERVER_ERROR, + "internal_error", + )); + } + }; + headers.insert(axum::http::header::CONTENT_TYPE, content_type); + headers.insert( + axum::http::header::HeaderName::from_static("x-content-type-options"), + HeaderValue::from_static("nosniff"), + ); + let disposition = format!("attachment; filename=\"{}\"", download.filename); + let content_disposition = match HeaderValue::from_str(&disposition) { + Ok(value) => value, + Err(_) => { + return Err(ApiError::new( + StatusCode::INTERNAL_SERVER_ERROR, + "internal_error", + )); + } + }; + headers.insert(axum::http::header::CONTENT_DISPOSITION, content_disposition); + + Ok((headers, download.bytes).into_response()) +} diff --git a/api/crates/presentation/src/http/documents/handlers/crud.rs b/api/crates/presentation/src/http/documents/handlers/crud.rs new file mode 100644 index 00000000..b89ef78f --- /dev/null +++ b/api/crates/presentation/src/http/documents/handlers/crud.rs @@ -0,0 +1,229 @@ +use axum::{ + Json, + extract::{Path, Query, State}, + http::StatusCode, +}; +use uuid::Uuid; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use crate::security::token::{self, Bearer}; +use domain::access::permissions::PERM_DOC_VIEW; +use domain::documents::doc_type::DocumentType; + +use crate::http::documents::types::{ + CreateDocumentRequest, Document, DocumentListResponse, DocumentStateFilter, DoubleOption, + DuplicateDocumentRequest, ListDocumentsQuery, UpdateDocumentRequest, map_service_error, + to_http_document, +}; + +#[utoipa::path(get, path = "/api/documents", tag = "Documents", + params( + ("query" = Option, Query, description = "Search query"), + ("tag" = Option, Query, description = "Filter by tag"), + ("state" = Option, Query, description = "Filter by document state (active|archived|all)") + ), + responses((status = 200, body = DocumentListResponse)))] +pub async fn list_documents( + State(ctx): State, + auth: WorkspaceAuth, + q: Option>, +) -> Result, ApiError> { + auth.ensure_permission(PERM_DOC_VIEW)?; + let (qstr, tag, state_param) = q + .map(|Query(v)| (v.query, v.tag, v.state)) + .unwrap_or((None, None, None)); + let state = state_param + .map(DocumentStateFilter::into) + .unwrap_or_default(); + + let service = ctx.document_service(); + let docs = service + .list_for_user(auth.workspace_id, qstr, tag, state) + .await + .map_err(map_service_error)?; + + let items: Vec = docs.into_iter().map(to_http_document).collect(); + Ok(Json(DocumentListResponse { items })) +} + +#[utoipa::path(post, path = "/api/documents", tag = "Documents", request_body = CreateDocumentRequest, responses((status = 200, body = Document)))] +pub async fn create_document( + State(ctx): State, + auth: WorkspaceAuth, + Json(req): Json, +) -> Result, ApiError> { + let title = req.title.unwrap_or_else(|| "Untitled".into()); + let dtype = req + .r#type + .unwrap_or_else(|| DocumentType::Document.as_str().to_string()); + let doc_type = DocumentType::try_from(dtype.as_str()) + .map_err(|_| ApiError::bad_request("invalid_document_type"))?; + let service = ctx.document_service(); + let doc = service + .create_for_user( + auth.workspace_id, + auth.user_id, + &auth.permissions, + &title, + req.parent_id, + doc_type, + None, + ) + .await + .map_err(map_service_error)?; + + Ok(Json(to_http_document(doc))) +} + +#[utoipa::path(get, path = "/api/documents/{id}", tag = "Documents", + params(("id" = Uuid, Path, description = "Document ID"), ("token" = Option, Query, description = "Share token (optional)")), + responses((status = 200, body = Document)))] +pub async fn get_document( + State(ctx): State, + bearer: Option, + Query(params): Query>, + Path(id): Path, +) -> Result, ApiError> { + let token = params.get("token").map(|s| s.as_str()); + let actor = token::resolve_actor_from_parts(&ctx, bearer, token) + .await + .map_err(token::map_actor_error)? + .ok_or(ApiError::unauthorized("unauthorized"))?; + let service = ctx.document_service(); + let doc = service + .get_for_actor(&actor, id) + .await + .map_err(map_service_error)?; + + Ok(Json(to_http_document(doc))) +} + +#[utoipa::path(delete, path = "/api/documents/{id}", tag = "Documents", params(("id" = Uuid, Path, description = "Document ID"),), responses((status = 204)))] +pub async fn delete_document( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, +) -> Result { + let service = ctx.document_service(); + let ok = service + .delete_for_user(auth.workspace_id, id, Some(auth.user_id), &auth.permissions) + .await + .map_err(map_service_error)?; + if ok { + Ok(StatusCode::NO_CONTENT) + } else { + Err(ApiError::not_found("not_found")) + } +} + +#[utoipa::path(patch, path = "/api/documents/{id}", tag = "Documents", request_body = UpdateDocumentRequest, + params(("id" = Uuid, Path, description = "Document ID"),), responses((status = 200, body = Document)))] +pub async fn update_document( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, + Json(req): Json, +) -> Result, ApiError> { + let parent_opt = match req.parent_id.clone() { + DoubleOption::NotProvided => None, + DoubleOption::Null => Some(None), + DoubleOption::Some(v) => Some(Some(v)), + }; + let service = ctx.document_service(); + let doc = service + .update_metadata( + auth.workspace_id, + id, + auth.user_id, + &auth.permissions, + req.title.clone(), + parent_opt, + ) + .await + .map_err(map_service_error)?; + Ok(Json(to_http_document(doc))) +} + +#[utoipa::path( + post, + path = "/api/documents/{id}/duplicate", + tag = "Documents", + request_body = DuplicateDocumentRequest, + params(("id" = Uuid, Path, description = "Document ID"),), + responses((status = 200, body = Document)) +)] +pub async fn duplicate_document( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, + Json(req): Json, +) -> Result, ApiError> { + let parent_opt = match req.parent_id.clone() { + DoubleOption::NotProvided => None, + DoubleOption::Null => Some(None), + DoubleOption::Some(v) => Some(Some(v)), + }; + let doc = ctx + .document_service() + .duplicate_document( + auth.workspace_id, + id, + auth.user_id, + &auth.permissions, + req.title.clone(), + parent_opt, + ) + .await + .map_err(map_service_error)?; + Ok(Json(to_http_document(doc))) +} + +#[utoipa::path( + post, + path = "/api/documents/{id}/archive", + tag = "Documents", + params(("id" = Uuid, Path, description = "Document ID")), + responses( + (status = 200, body = Document), + (status = 404, description = "Document not found"), + (status = 409, description = "Document already archived") + ) +)] +pub async fn archive_document( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, +) -> Result, ApiError> { + let doc = ctx + .document_service() + .archive_document(auth.workspace_id, id, auth.user_id, &auth.permissions) + .await + .map_err(map_service_error)?; + Ok(Json(to_http_document(doc))) +} + +#[utoipa::path( + post, + path = "/api/documents/{id}/unarchive", + tag = "Documents", + params(("id" = Uuid, Path, description = "Document ID")), + responses( + (status = 200, body = Document), + (status = 404, description = "Document not found"), + (status = 409, description = "Document is not archived") + ) +)] +pub async fn unarchive_document( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, +) -> Result, ApiError> { + let doc = ctx + .document_service() + .unarchive_document(auth.workspace_id, id, auth.user_id, &auth.permissions) + .await + .map_err(map_service_error)?; + Ok(Json(to_http_document(doc))) +} diff --git a/api/crates/presentation/src/http/documents/handlers/links.rs b/api/crates/presentation/src/http/documents/handlers/links.rs new file mode 100644 index 00000000..1045a424 --- /dev/null +++ b/api/crates/presentation/src/http/documents/handlers/links.rs @@ -0,0 +1,83 @@ +use axum::{ + Json, + extract::{Path, State}, +}; +use uuid::Uuid; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use application::core::services::access; +use domain::access::permissions::PERM_DOC_VIEW; + +use crate::http::documents::types::{ + BacklinkInfo, BacklinksResponse, OutgoingLink, OutgoingLinksResponse, map_service_error, +}; + +#[utoipa::path(get, path = "/api/documents/{id}/backlinks", tag = "Documents", operation_id = "getBacklinks", + params(("id" = Uuid, Path, description = "Document ID")), + responses((status = 200, body = BacklinksResponse)))] +pub async fn get_backlinks( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, +) -> Result, ApiError> { + auth.ensure_permission(PERM_DOC_VIEW)?; + let actor = access::Actor::User(auth.user_id); + let service = ctx.document_service(); + let items = service + .backlinks(&actor, auth.workspace_id, id) + .await + .map_err(map_service_error)?; + let backlinks: Vec = items + .into_iter() + .map(|r| BacklinkInfo { + document_id: r.document_id.to_string(), + title: r.title.into_string(), + document_type: r.document_type.to_string(), + file_path: r.file_path, + link_type: r.link_type, + link_text: r.link_text, + link_count: r.link_count, + }) + .collect(); + Ok(Json(BacklinksResponse { + total_count: backlinks.len(), + backlinks, + })) +} + +#[utoipa::path(get, path = "/api/documents/{id}/links", tag = "Documents", operation_id = "getOutgoingLinks", + params(("id" = Uuid, Path, description = "Document ID")), + responses((status = 200, body = OutgoingLinksResponse)))] +pub async fn get_outgoing_links( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, +) -> Result, ApiError> { + auth.ensure_permission(PERM_DOC_VIEW)?; + let actor = access::Actor::User(auth.user_id); + let service = ctx.document_service(); + let items = service + .outgoing_links(&actor, auth.workspace_id, id) + .await + .map_err(map_service_error)?; + let links = items + .into_iter() + .map(|r| OutgoingLink { + document_id: r.document_id.to_string(), + title: r.title.into_string(), + document_type: r.document_type.to_string(), + file_path: r.file_path, + link_type: r.link_type, + link_text: r.link_text, + position_start: r.position_start, + position_end: r.position_end, + }) + .collect::>(); + + Ok(Json(OutgoingLinksResponse { + total_count: links.len(), + links, + })) +} diff --git a/api/crates/presentation/src/http/documents/handlers/mod.rs b/api/crates/presentation/src/http/documents/handlers/mod.rs new file mode 100644 index 00000000..32bd27a8 --- /dev/null +++ b/api/crates/presentation/src/http/documents/handlers/mod.rs @@ -0,0 +1,11 @@ +pub mod content; +pub mod crud; +pub mod links; +pub mod search; +pub mod snapshots; + +pub use content::*; +pub use crud::*; +pub use links::*; +pub use search::*; +pub use snapshots::*; diff --git a/api/crates/presentation/src/http/documents/handlers/search.rs b/api/crates/presentation/src/http/documents/handlers/search.rs new file mode 100644 index 00000000..a140d5a0 --- /dev/null +++ b/api/crates/presentation/src/http/documents/handlers/search.rs @@ -0,0 +1,40 @@ +use axum::{ + Json, + extract::{Query, State}, +}; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use domain::access::permissions::PERM_DOC_VIEW; + +use crate::http::documents::types::{SearchQuery, SearchResult, map_service_error}; + +#[utoipa::path(get, path = "/api/documents/search", tag = "Documents", + params(("q" = Option, Query, description = "Query")), + responses((status = 200, body = [SearchResult])))] +pub async fn search_documents( + State(ctx): State, + auth: WorkspaceAuth, + q: Option>, +) -> Result>, ApiError> { + auth.ensure_permission(PERM_DOC_VIEW)?; + let query_text = q.and_then(|Query(v)| v.q); + + let service = ctx.document_service(); + let hits = service + .search_for_user(auth.workspace_id, query_text, 20) + .await + .map_err(map_service_error)?; + let items = hits + .into_iter() + .map(|h| SearchResult { + id: h.id, + title: h.title.into_string(), + document_type: h.doc_type.to_string(), + path: h.path, + updated_at: h.updated_at, + }) + .collect(); + Ok(Json(items)) +} diff --git a/api/crates/presentation/src/http/documents/handlers/snapshots.rs b/api/crates/presentation/src/http/documents/handlers/snapshots.rs new file mode 100644 index 00000000..4966c011 --- /dev/null +++ b/api/crates/presentation/src/http/documents/handlers/snapshots.rs @@ -0,0 +1,182 @@ +use axum::{ + Json, + extract::{Path, Query, State}, + http::{HeaderMap, HeaderValue, StatusCode}, + response::{IntoResponse, Response}, +}; +use uuid::Uuid; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::security::token::{self, Bearer}; + +#[allow(unused_imports)] +use crate::http::documents::types::{ + DocumentArchiveBinary, SnapshotDiffBaseParam, SnapshotDiffQuery, SnapshotDiffResponse, + SnapshotListResponse, SnapshotRestoreResponse, SnapshotTokenQuery, map_service_error, + snapshot_diff_side_response_from, snapshot_summary_from, +}; + +#[utoipa::path( + get, + path = "/api/documents/{id}/snapshots", + tag = "Documents", + params( + ("id" = Uuid, Path, description = "Document ID"), + ("token" = Option, Query, description = "Share token (optional)"), + ("limit" = Option, Query, description = "Maximum number of snapshots to return"), + ("offset" = Option, Query, description = "Offset for pagination") + ), + responses((status = 200, body = SnapshotListResponse)) +)] +pub async fn list_document_snapshots( + State(ctx): State, + bearer: Option, + Path(id): Path, + q: Option>, +) -> Result, ApiError> { + let params = q.map(|Query(v)| v).unwrap_or_default(); + let token = params.token.as_deref(); + let actor = token::resolve_actor_from_parts(&ctx, bearer, token) + .await + .map_err(token::map_actor_error)? + .ok_or(ApiError::unauthorized("unauthorized"))?; + + let limit = params.limit.unwrap_or(50).clamp(1, 200); + let offset = params.offset.unwrap_or(0).max(0); + + let service = ctx.document_service(); + let records = service + .list_snapshots(&actor, id, limit, offset) + .await + .map_err(map_service_error)?; + let items = records.into_iter().map(snapshot_summary_from).collect(); + + Ok(Json(SnapshotListResponse { items })) +} + +#[utoipa::path( + get, + path = "/api/documents/{id}/snapshots/{snapshot_id}/diff", + tag = "Documents", + params( + ("id" = Uuid, Path, description = "Document ID"), + ("snapshot_id" = Uuid, Path, description = "Snapshot ID"), + ("token" = Option, Query, description = "Share token (optional)"), + ("compare" = Option, Query, description = "Snapshot ID to compare against (defaults to current document state)"), + ("base" = Option, Query, description = "Base comparison to use when compare is not provided (auto|current|previous)") + ), + responses((status = 200, body = SnapshotDiffResponse)) +)] +pub async fn get_document_snapshot_diff( + State(ctx): State, + bearer: Option, + Path((id, snapshot_id)): Path<(Uuid, Uuid)>, + q: Option>, +) -> Result, ApiError> { + let params = q.map(|Query(v)| v).unwrap_or_default(); + let token = params.token.as_deref(); + let actor = token::resolve_actor_from_parts(&ctx, bearer, token) + .await + .map_err(token::map_actor_error)? + .ok_or(ApiError::unauthorized("unauthorized"))?; + + let base_mode = params + .base + .map(SnapshotDiffBaseParam::into) + .unwrap_or(SnapshotDiffBaseParam::Auto.into()); + + let service = ctx.document_service(); + let result = service + .snapshot_diff(&actor, id, snapshot_id, params.compare, base_mode) + .await + .map_err(map_service_error)?; + + let diff = result.diff; + let base = snapshot_diff_side_response_from(result.base); + let target = snapshot_diff_side_response_from(result.target); + + Ok(Json(SnapshotDiffResponse { base, target, diff })) +} + +#[utoipa::path( + post, + path = "/api/documents/{id}/snapshots/{snapshot_id}/restore", + tag = "Documents", + params( + ("id" = Uuid, Path, description = "Document ID"), + ("snapshot_id" = Uuid, Path, description = "Snapshot ID"), + ("token" = Option, Query, description = "Share token (optional)") + ), + responses((status = 200, body = SnapshotRestoreResponse)) +)] +pub async fn restore_document_snapshot( + State(ctx): State, + bearer: Option, + Path((id, snapshot_id)): Path<(Uuid, Uuid)>, + q: Option>, +) -> Result, ApiError> { + let params = q.map(|Query(v)| v).unwrap_or_default(); + let token = params.token.as_deref(); + let actor = token::resolve_actor_from_parts(&ctx, bearer, token) + .await + .map_err(token::map_actor_error)? + .ok_or(ApiError::unauthorized("unauthorized"))?; + + let service = ctx.document_service(); + let restored = service + .restore_snapshot(&actor, id, snapshot_id) + .await + .map_err(map_service_error)?; + + Ok(Json(SnapshotRestoreResponse { + snapshot: snapshot_summary_from(restored), + })) +} + +#[utoipa::path( + get, + path = "/api/documents/{id}/snapshots/{snapshot_id}/download", + tag = "Documents", + params( + ("id" = Uuid, Path, description = "Document ID"), + ("snapshot_id" = Uuid, Path, description = "Snapshot ID"), + ("token" = Option, Query, description = "Share token (optional)") + ), + responses( + (status = 200, description = "Snapshot archive", body = DocumentArchiveBinary, content_type = "application/zip"), + (status = 401, description = "Unauthorized"), + (status = 404, description = "Snapshot not found") + ) +)] +pub async fn download_document_snapshot( + State(ctx): State, + bearer: Option, + Path((id, snapshot_id)): Path<(Uuid, Uuid)>, + q: Option>, +) -> Result { + let params = q.map(|Query(v)| v).unwrap_or_default(); + let token = params.token.as_deref(); + let actor = token::resolve_actor_from_parts(&ctx, bearer, token) + .await + .map_err(token::map_actor_error)? + .ok_or(ApiError::unauthorized("unauthorized"))?; + + let service = ctx.document_service(); + let download = service + .download_snapshot(&actor, id, snapshot_id) + .await + .map_err(map_service_error)?; + + let mut headers = HeaderMap::new(); + headers.insert( + axum::http::header::CONTENT_TYPE, + HeaderValue::from_static("application/zip"), + ); + let disposition = format!("attachment; filename=\"{}\"", download.filename); + let content_disposition = HeaderValue::from_str(&disposition) + .map_err(|_| ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error"))?; + headers.insert(axum::http::header::CONTENT_DISPOSITION, content_disposition); + + Ok((headers, download.bytes).into_response()) +} diff --git a/api/crates/presentation/src/http/documents/mod.rs b/api/crates/presentation/src/http/documents/mod.rs new file mode 100644 index 00000000..e465b03c --- /dev/null +++ b/api/crates/presentation/src/http/documents/mod.rs @@ -0,0 +1,64 @@ +pub mod files; +mod handlers; +pub mod publishing; +pub mod sharing; +pub mod tagging; +pub mod types; + +use axum::{ + Router, + routing::{get, post}, +}; + +use crate::context::AppContext; + +pub use handlers::{ + archive_document, create_document, delete_document, download_document, + download_document_snapshot, duplicate_document, get_backlinks, get_document, + get_document_content, get_document_snapshot_diff, get_outgoing_links, list_document_snapshots, + list_documents, patch_document_content, restore_document_snapshot, search_documents, + unarchive_document, update_document, update_document_content, +}; +pub use types::*; + +pub mod openapi { + pub use super::handlers::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + .route("/documents", get(list_documents).post(create_document)) + .route( + "/documents/:id", + get(get_document) + .delete(delete_document) + .patch(update_document), + ) + .route( + "/documents/:id/content", + get(get_document_content) + .put(update_document_content) + .patch(patch_document_content), + ) + .route("/documents/:id/duplicate", post(duplicate_document)) + .route("/documents/:id/archive", post(archive_document)) + .route("/documents/:id/unarchive", post(unarchive_document)) + .route("/documents/:id/snapshots", get(list_document_snapshots)) + .route( + "/documents/:id/snapshots/:snapshot_id/diff", + get(get_document_snapshot_diff), + ) + .route( + "/documents/:id/snapshots/:snapshot_id/restore", + post(restore_document_snapshot), + ) + .route( + "/documents/:id/snapshots/:snapshot_id/download", + get(download_document_snapshot), + ) + .route("/documents/:id/download", get(download_document)) + .route("/documents/:id/backlinks", get(get_backlinks)) + .route("/documents/:id/links", get(get_outgoing_links)) + .route("/documents/search", get(search_documents)) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/documents/publishing/handlers/mod.rs b/api/crates/presentation/src/http/documents/publishing/handlers/mod.rs new file mode 100644 index 00000000..1624d4ca --- /dev/null +++ b/api/crates/presentation/src/http/documents/publishing/handlers/mod.rs @@ -0,0 +1,149 @@ +use axum::{ + Json, + extract::{Path, State}, + http::StatusCode, +}; +use uuid::Uuid; + +use crate::context::DocumentsContext; +use crate::http::documents::{Document, to_http_document}; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use application::core::services::errors::ServiceError; + +use super::types::{PublicDocumentSummary, PublishResponse}; + +fn map_public_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error(err, "public_service_error") +} + +#[utoipa::path( + post, + path = "/api/public/documents/{id}", + tag = "Public Documents", + params(("id" = Uuid, Path, description = "Document ID")), + responses((status = 200, description = "Published", body = PublishResponse)) +)] +pub async fn publish_document( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, +) -> Result, ApiError> { + let service = ctx.public_service(); + let out = service + .publish_document(auth.workspace_id, &auth.permissions, id) + .await + .map_err(map_public_error)?; + Ok(Json(PublishResponse { + slug: out.slug, + public_url: out.public_url, + })) +} + +#[utoipa::path( + delete, + path = "/api/public/documents/{id}", + tag = "Public Documents", + params(("id" = Uuid, Path, description = "Document ID")), + responses((status = 204, description = "Unpublished")) +)] +pub async fn unpublish_document( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, +) -> Result { + let ok = ctx + .public_service() + .unpublish_document(auth.workspace_id, &auth.permissions, id) + .await + .map_err(map_public_error)?; + if ok { + Ok(StatusCode::NO_CONTENT) + } else { + Err(ApiError::forbidden("forbidden")) + } +} + +#[utoipa::path( + get, + path = "/api/public/documents/{id}", + tag = "Public Documents", + params(("id" = Uuid, Path, description = "Document ID")), + responses((status = 200, description = "Published status", body = PublishResponse)) +)] +pub async fn get_publish_status( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, +) -> Result, ApiError> { + let out = ctx + .public_service() + .get_publish_status(auth.workspace_id, &auth.permissions, id) + .await + .map_err(map_public_error)?; + Ok(Json(PublishResponse { + slug: out.slug, + public_url: out.public_url, + })) +} + +// Slug-based endpoints are intentionally omitted to simplify routing and match legacy pattern strictly. + +#[utoipa::path( + get, + path = "/api/public/workspaces/{slug}", + tag = "Public Documents", + params(("slug" = String, Path, description = "Workspace slug")), + responses((status = 200, description = "Public documents for workspace", body = [PublicDocumentSummary])) +)] +pub async fn list_workspace_public_documents( + State(ctx): State, + Path(slug): Path, +) -> Result>, ApiError> { + let items = ctx + .public_service() + .list_workspace_public_documents(&slug) + .await + .map_err(map_public_error)?; + Ok(Json( + items.into_iter().map(PublicDocumentSummary::from).collect(), + )) +} + +#[utoipa::path( + get, + path = "/api/public/workspaces/{slug}/{id}", + tag = "Public Documents", + params(("slug" = String, Path, description = "Workspace slug"), ("id" = Uuid, Path, description = "Document ID")), + responses((status = 200, description = "Document metadata", body = Document)) +)] +pub async fn get_public_by_workspace_and_id( + State(ctx): State, + Path((slug, id)): Path<(String, Uuid)>, +) -> Result, ApiError> { + let doc = ctx + .public_service() + .get_public_by_workspace_and_id(&slug, id) + .await + .map_err(map_public_error)?; + Ok(Json(to_http_document(doc))) +} + +#[utoipa::path( + get, + path = "/api/public/workspaces/{slug}/{id}/content", + tag = "Public Documents", + params(("slug" = String, Path, description = "Workspace slug"), ("id" = Uuid, Path, description = "Document ID")), + responses((status = 200, description = "Document content")) +)] +pub async fn get_public_content_by_workspace_and_id( + State(ctx): State, + Path((slug, id)): Path<(String, Uuid)>, +) -> Result, ApiError> { + let content = ctx + .public_service() + .get_public_content_by_workspace_and_id(&slug, id) + .await + .map_err(map_public_error)?; + Ok(Json(serde_json::json!({"content": content, "id": id}))) +} diff --git a/api/crates/presentation/src/http/documents/publishing/mod.rs b/api/crates/presentation/src/http/documents/publishing/mod.rs new file mode 100644 index 00000000..9feac42c --- /dev/null +++ b/api/crates/presentation/src/http/documents/publishing/mod.rs @@ -0,0 +1,41 @@ +mod handlers; +pub mod types; + +use axum::Router; +use axum::routing::{get, post}; + +use crate::context::AppContext; + +pub use handlers::{ + get_public_by_workspace_and_id, get_public_content_by_workspace_and_id, get_publish_status, + list_workspace_public_documents, publish_document, unpublish_document, +}; +pub use types::*; + +pub mod openapi { + pub use super::handlers::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + .route( + "/documents/:id", + post(publish_document) + .delete(unpublish_document) + .get(get_publish_status), + ) + .route("/workspaces/:slug", get(list_workspace_public_documents)) + .route("/workspaces/:slug/:id", get(get_public_by_workspace_and_id)) + .route( + "/workspaces/:slug/:id/content", + get(get_public_content_by_workspace_and_id), + ) + // legacy aliases + .route("/users/:slug", get(list_workspace_public_documents)) + .route("/users/:slug/:id", get(get_public_by_workspace_and_id)) + .route( + "/users/:slug/:id/content", + get(get_public_content_by_workspace_and_id), + ) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/documents/publishing/types.rs b/api/crates/presentation/src/http/documents/publishing/types.rs new file mode 100644 index 00000000..690fac73 --- /dev/null +++ b/api/crates/presentation/src/http/documents/publishing/types.rs @@ -0,0 +1,30 @@ +use serde::Serialize; +use utoipa::ToSchema; +use uuid::Uuid; + +use application::documents::dtos::PublicDocumentSummaryDto; + +#[derive(Debug, Serialize, ToSchema)] +pub struct PublishResponse { + pub slug: String, + pub public_url: String, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct PublicDocumentSummary { + pub id: Uuid, + pub title: String, + pub updated_at: chrono::DateTime, + pub published_at: chrono::DateTime, +} + +impl From for PublicDocumentSummary { + fn from(value: PublicDocumentSummaryDto) -> Self { + Self { + id: value.id, + title: value.title, + updated_at: value.updated_at, + published_at: value.published_at, + } + } +} diff --git a/api/crates/presentation/src/http/documents/sharing/active.rs b/api/crates/presentation/src/http/documents/sharing/active.rs new file mode 100644 index 00000000..f1b64118 --- /dev/null +++ b/api/crates/presentation/src/http/documents/sharing/active.rs @@ -0,0 +1,31 @@ +use axum::{Json, extract::State}; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; + +use super::types::{ActiveShareItem, frontend_base, map_share_error}; +use application::documents::dtos::ActiveShareItemDto; + +#[utoipa::path( + get, + path = "/api/shares/active", + tag = "Sharing", + responses((status = 200, description = "Active shares", body = [ActiveShareItem])) +)] +pub async fn list_active_shares( + State(ctx): State, + auth: WorkspaceAuth, +) -> Result>, ApiError> { + let service = ctx.share_service(); + let items: Vec = service + .list_active(auth.workspace_id, &auth.permissions) + .await + .map_err(map_share_error)?; + let base = frontend_base(&ctx.cfg); + let out: Vec = items + .into_iter() + .map(|dto| ActiveShareItem::from((dto, base.clone()))) + .collect(); + Ok(Json(out)) +} diff --git a/api/crates/presentation/src/http/documents/sharing/applicable.rs b/api/crates/presentation/src/http/documents/sharing/applicable.rs new file mode 100644 index 00000000..1fb0e8e4 --- /dev/null +++ b/api/crates/presentation/src/http/documents/sharing/applicable.rs @@ -0,0 +1,34 @@ +use axum::{ + Json, + extract::{Query, State}, +}; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use application::core::services::access; + +use super::types::{ApplicableQuery, ApplicableShareItem, map_share_error}; + +#[utoipa::path(get, path = "/api/shares/applicable", tag = "Sharing", + params(("doc_id" = Uuid, Query, description = "Document ID")), + responses((status = 200, description = "Shares that include the document", body = [ApplicableShareItem])))] +pub async fn list_applicable_shares( + State(ctx): State, + auth: WorkspaceAuth, + Query(q): Query, +) -> Result>, ApiError> { + let actor = access::Actor::User(auth.user_id); + ctx.authorization() + .require_view(&actor, q.doc_id) + .await + .map_err(|err| crate::http::error::map_service_error(err, "authorization_error"))?; + + let service = ctx.share_service(); + let rows = service + .list_applicable(auth.workspace_id, &auth.permissions, q.doc_id) + .await + .map_err(map_share_error)?; + let items: Vec = rows.into_iter().map(Into::into).collect(); + Ok(Json(items)) +} diff --git a/api/crates/presentation/src/http/documents/sharing/mod.rs b/api/crates/presentation/src/http/documents/sharing/mod.rs new file mode 100644 index 00000000..85543b7e --- /dev/null +++ b/api/crates/presentation/src/http/documents/sharing/mod.rs @@ -0,0 +1,51 @@ +mod active; +mod applicable; +mod mounts; +mod shares; +pub mod types; +mod validation; + +use axum::{ + Router, + routing::{delete, get, post}, +}; + +use crate::context::AppContext; + +pub use active::list_active_shares; +pub use applicable::list_applicable_shares; +pub use mounts::{ + create_share_mount, delete_share_mount, list_share_mounts, materialize_folder_share, +}; +pub use shares::{create_share, delete_share, list_document_shares}; +pub use types::*; +pub use validation::{browse_share, validate_share_token}; + +pub mod openapi { + pub use super::active::*; + pub use super::applicable::*; + pub use super::mounts::*; + pub use super::shares::*; + pub use super::validation::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + .route("/shares", post(create_share)) + .route( + "/shares/mounts", + post(create_share_mount).get(list_share_mounts), + ) + .route("/shares/browse", get(browse_share)) + .route("/shares/validate", get(validate_share_token)) + .route("/shares/documents/:id", get(list_document_shares)) + .route("/shares/applicable", get(list_applicable_shares)) + .route( + "/shares/folders/:token/materialize", + post(materialize_folder_share), + ) + .route("/shares/active", get(list_active_shares)) + .route("/shares/mounts/:id", delete(delete_share_mount)) + .route("/shares/:token", delete(delete_share)) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/documents/sharing/mounts.rs b/api/crates/presentation/src/http/documents/sharing/mounts.rs new file mode 100644 index 00000000..717dbeef --- /dev/null +++ b/api/crates/presentation/src/http/documents/sharing/mounts.rs @@ -0,0 +1,110 @@ +use axum::{ + Json, + extract::{Path, State}, + http::StatusCode, +}; +use uuid::Uuid; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; + +use super::types::{CreateShareMountRequest, MaterializeResponse, ShareMountItem, map_share_error}; + +#[utoipa::path( + post, + path = "/api/shares/mounts", + tag = "Sharing", + request_body = CreateShareMountRequest, + responses((status = 200, description = "Saved share mount", body = ShareMountItem)) +)] +pub async fn create_share_mount( + State(ctx): State, + auth: WorkspaceAuth, + Json(req): Json, +) -> Result, ApiError> { + let service = ctx.share_service(); + let item = service + .save_share_mount( + auth.workspace_id, + auth.user_id, + &auth.permissions, + &req.token, + req.parent_folder_id, + ) + .await + .map_err(map_share_error)?; + Ok(Json(item.into())) +} + +#[utoipa::path( + get, + path = "/api/shares/mounts", + tag = "Sharing", + responses((status = 200, description = "Share mounts", body = [ShareMountItem])) +)] +pub async fn list_share_mounts( + State(ctx): State, + auth: WorkspaceAuth, +) -> Result>, ApiError> { + let service = ctx.share_service(); + let items = service + .list_share_mounts(auth.workspace_id, &auth.permissions) + .await + .map_err(map_share_error)?; + Ok(Json(items.into_iter().map(Into::into).collect())) +} + +#[utoipa::path( + delete, + path = "/api/shares/mounts/{id}", + tag = "Sharing", + params(("id" = Uuid, Path, description = "Share mount ID")), + responses((status = 204, description = "Share mount removed")) +)] +pub async fn delete_share_mount( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, +) -> Result { + let service = ctx.share_service(); + let deleted = service + .delete_share_mount(auth.workspace_id, &auth.permissions, id) + .await + .map_err(map_share_error)?; + if deleted { + Ok(StatusCode::NO_CONTENT) + } else { + Err(ApiError::not_found("not_found")) + } +} + +#[utoipa::path(post, path = "/api/shares/folders/{token}/materialize", tag = "Sharing", + params(("token" = String, Path, description = "Folder share token")), + responses((status = 200, description = "Created doc shares", body = MaterializeResponse)) +)] +pub async fn materialize_folder_share( + State(ctx): State, + auth: WorkspaceAuth, + Path(token): Path, +) -> Result, ApiError> { + let service = ctx.share_service(); + let meta = service + .share_document_meta(&token) + .await + .map_err(map_share_error)? + .ok_or(ApiError::not_found("not_found"))?; + if meta.workspace_id != auth.workspace_id { + return Err(ApiError::forbidden("forbidden")); + } + let actor = application::core::services::access::Actor::User(auth.user_id); + ctx.authorization() + .require_edit(&actor, meta.document_id) + .await + .map_err(|err| crate::http::error::map_service_error(err, "authorization_error"))?; + let created = service + .materialize_folder_share(auth.workspace_id, auth.user_id, &auth.permissions, &token) + .await + .map_err(map_share_error)?; + Ok(Json(MaterializeResponse { created })) +} diff --git a/api/crates/presentation/src/http/documents/sharing/shares.rs b/api/crates/presentation/src/http/documents/sharing/shares.rs new file mode 100644 index 00000000..476bba3e --- /dev/null +++ b/api/crates/presentation/src/http/documents/sharing/shares.rs @@ -0,0 +1,124 @@ +use axum::{ + Json, + extract::{Path, State}, + http::StatusCode, +}; +use uuid::Uuid; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use application::core::services::access; +use domain::documents::share::SHARE_PERMISSION_VIEW; + +use application::documents::dtos::ShareItemDto; + +use super::types::{ + CreateShareRequest, CreateShareResponse, ShareItem, build_share_url, frontend_base, + map_share_error, +}; + +#[utoipa::path( + post, + path = "/api/shares", + tag = "Sharing", + request_body = CreateShareRequest, + responses((status = 200, description = "Share link created", body = CreateShareResponse)) +)] +pub async fn create_share( + State(ctx): State, + auth: WorkspaceAuth, + Json(req): Json, +) -> Result, ApiError> { + let actor = access::Actor::User(auth.user_id); + ctx.authorization() + .require_edit(&actor, req.document_id) + .await + .map_err(|err| crate::http::error::map_service_error(err, "authorization_error"))?; + let permission = req.permission.as_deref().unwrap_or(SHARE_PERMISSION_VIEW); + let service = ctx.share_service(); + let res = service + .create_share( + auth.workspace_id, + auth.user_id, + &auth.permissions, + req.document_id, + permission, + req.expires_at, + ) + .await + .map_err(map_share_error)?; + let base = frontend_base(&ctx.cfg); + let url = build_share_url(&base, &res.document_type, res.document_id, &res.token); + Ok(Json(CreateShareResponse { + token: res.token, + url, + })) +} + +#[utoipa::path( + get, + path = "/api/shares/documents/{id}", + tag = "Sharing", + params(("id" = Uuid, Path, description = "Document ID")), + responses((status = 200, description = "OK", body = [ShareItem])) +)] +pub async fn list_document_shares( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, +) -> Result>, ApiError> { + let actor = access::Actor::User(auth.user_id); + ctx.authorization() + .require_edit(&actor, id) + .await + .map_err(|err| crate::http::error::map_service_error(err, "authorization_error"))?; + let service = ctx.share_service(); + let rows: Vec = service + .list_document_shares(auth.workspace_id, &auth.permissions, id) + .await + .map_err(map_share_error)?; + let base = frontend_base(&ctx.cfg); + let items: Vec = rows + .into_iter() + .map(|r| ShareItem::from_dto(&base, r)) + .collect(); + Ok(Json(items)) +} + +#[utoipa::path( + delete, + path = "/api/shares/{token}", + tag = "Sharing", + params(("token" = String, Path, description = "Share token")), + responses((status = 204, description = "Share link deleted")) +)] +pub async fn delete_share( + State(ctx): State, + auth: WorkspaceAuth, + Path(token): Path, +) -> Result { + let service = ctx.share_service(); + let meta = service + .share_document_meta(&token) + .await + .map_err(map_share_error)? + .ok_or(ApiError::not_found("not_found"))?; + if meta.workspace_id != auth.workspace_id { + return Err(ApiError::forbidden("forbidden")); + } + let actor = access::Actor::User(auth.user_id); + ctx.authorization() + .require_edit(&actor, meta.document_id) + .await + .map_err(|err| crate::http::error::map_service_error(err, "authorization_error"))?; + let ok = service + .delete_share(auth.workspace_id, &auth.permissions, &token) + .await + .map_err(map_share_error)?; + if ok { + Ok(StatusCode::NO_CONTENT) + } else { + Err(ApiError::not_found("not_found")) + } +} diff --git a/api/crates/presentation/src/http/documents/sharing/types.rs b/api/crates/presentation/src/http/documents/sharing/types.rs new file mode 100644 index 00000000..caf43507 --- /dev/null +++ b/api/crates/presentation/src/http/documents/sharing/types.rs @@ -0,0 +1,244 @@ +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; +use uuid::Uuid; + +use crate::context::PresentationConfig; +use application::core::services::errors::ServiceError; +use application::documents::dtos::{ + ActiveShareItemDto, ApplicableShareDto, ShareBrowseResponseDto, ShareBrowseTreeItemDto, + ShareDocumentDto, ShareItemDto, ShareMountDto, +}; +use domain::documents::doc_type::{DOC_TYPE_DOCUMENT, DOC_TYPE_FOLDER}; + +pub fn frontend_base(cfg: &PresentationConfig) -> String { + cfg.frontend_url + .clone() + .unwrap_or_else(|| "http://localhost:3000".into()) +} + +pub fn build_share_url(base: &str, document_type: &str, document_id: Uuid, token: &str) -> String { + let base = base.trim_end_matches('/'); + if document_type == DOC_TYPE_FOLDER { + format!("{}/share/{}", base, token) + } else { + format!("{}/document/{}?token={}", base, document_id, token) + } +} + +pub fn share_scope(document_type: &str) -> String { + if document_type == DOC_TYPE_FOLDER { + DOC_TYPE_FOLDER.to_string() + } else { + DOC_TYPE_DOCUMENT.to_string() + } +} + +pub fn map_share_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error(err, "share_service_error") +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct CreateShareRequest { + pub document_id: Uuid, + pub permission: Option, + pub expires_at: Option>, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct CreateShareResponse { + pub token: String, + pub url: String, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct ShareItem { + pub id: Uuid, + pub token: String, + pub permission: String, + pub expires_at: Option>, + pub url: String, + pub scope: String, + pub parent_share_id: Option, +} + +#[derive(Debug, Deserialize)] +pub struct ApplicableQuery { + pub doc_id: Uuid, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct ApplicableShareItem { + pub token: String, + pub permission: String, + pub scope: String, + pub excluded: bool, +} + +impl From for ApplicableShareItem { + fn from(d: ApplicableShareDto) -> Self { + ApplicableShareItem { + token: d.token, + permission: d.permission, + scope: d.scope, + excluded: d.excluded, + } + } +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct ShareDocumentResponse { + pub id: Uuid, + pub title: String, + pub permission: String, + pub content: Option, +} + +impl From for ShareDocumentResponse { + fn from(d: ShareDocumentDto) -> Self { + ShareDocumentResponse { + id: d.id, + title: d.title, + permission: d.permission, + content: d.content, + } + } +} + +#[derive(Debug, Deserialize)] +pub struct ShareTokenQuery { + pub token: String, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct ActiveShareItem { + pub id: Uuid, + pub token: String, + pub permission: String, + pub expires_at: Option>, + pub created_at: chrono::DateTime, + pub document_id: Uuid, + pub document_title: String, + pub document_type: String, + pub url: String, + pub parent_share_id: Option, +} + +impl From<(ActiveShareItemDto, String)> for ActiveShareItem { + fn from((dto, base): (ActiveShareItemDto, String)) -> Self { + let url = build_share_url(&base, &dto.document_type, dto.document_id, &dto.token); + ActiveShareItem { + id: dto.id, + token: dto.token, + permission: dto.permission, + expires_at: dto.expires_at, + created_at: dto.created_at, + document_id: dto.document_id, + document_title: dto.document_title, + document_type: dto.document_type, + url, + parent_share_id: dto.parent_share_id, + } + } +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct CreateShareMountRequest { + pub token: String, + pub parent_folder_id: Option, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct ShareMountItem { + pub id: Uuid, + pub token: String, + pub target_document_id: Uuid, + pub target_document_type: String, + pub target_title: String, + pub permission: String, + pub parent_folder_id: Option, + pub created_at: chrono::DateTime, +} + +impl From for ShareMountItem { + fn from(d: ShareMountDto) -> Self { + ShareMountItem { + id: d.id, + token: d.token, + target_document_id: d.target_document_id, + target_document_type: d.target_document_type, + target_title: d.target_title, + permission: d.permission, + parent_folder_id: d.parent_folder_id, + created_at: d.created_at, + } + } +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct ShareBrowseTreeItem { + pub id: Uuid, + pub title: String, + pub parent_id: Option, + #[schema(example = "document")] + pub r#type: String, + pub created_at: chrono::DateTime, + pub updated_at: chrono::DateTime, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct ShareBrowseResponse { + pub tree: Vec, +} + +impl From for ShareBrowseTreeItem { + fn from(t: ShareBrowseTreeItemDto) -> Self { + ShareBrowseTreeItem { + id: t.id, + title: t.title, + parent_id: t.parent_id, + r#type: t.r#type, + created_at: t.created_at, + updated_at: t.updated_at, + } + } +} + +impl From for ShareBrowseResponse { + fn from(d: ShareBrowseResponseDto) -> Self { + ShareBrowseResponse { + tree: d.tree.into_iter().map(Into::into).collect(), + } + } +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct MaterializeResponse { + pub created: i64, +} + +impl ShareItem { + pub fn from_dto(base: &str, dto: ShareItemDto) -> Self { + let ShareItemDto { + id, + token, + permission, + expires_at, + document_id, + document_type, + parent_share_id, + .. + } = dto; + let url = build_share_url(base, &document_type, document_id, &token); + ShareItem { + id, + token, + permission, + expires_at, + url, + scope: share_scope(&document_type), + parent_share_id, + } + } +} + +pub use axum::http::StatusCode; diff --git a/api/crates/presentation/src/http/documents/sharing/validation.rs b/api/crates/presentation/src/http/documents/sharing/validation.rs new file mode 100644 index 00000000..dcd98ef8 --- /dev/null +++ b/api/crates/presentation/src/http/documents/sharing/validation.rs @@ -0,0 +1,49 @@ +use axum::{ + Json, + extract::{Query, State}, +}; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; + +use super::types::{ShareBrowseResponse, ShareDocumentResponse, ShareTokenQuery, map_share_error}; + +#[utoipa::path( + get, + path = "/api/shares/validate", + tag = "Sharing", + params(("token" = String, Query, description = "Share token")), + responses((status = 200, description = "Document info", body = ShareDocumentResponse)) +)] +pub async fn validate_share_token( + State(ctx): State, + Query(query): Query, +) -> Result, ApiError> { + let service = ctx.share_service(); + let res = service + .validate_token(&query.token) + .await + .map_err(map_share_error)?; + let out: ShareDocumentResponse = res + .map(Into::into) + .ok_or(ApiError::not_found("not_found"))?; + Ok(Json(out)) +} + +#[utoipa::path(get, path = "/api/shares/browse", tag = "Sharing", + params(("token" = String, Query, description = "Share token")), + responses((status = 200, description = "Share tree", body = ShareBrowseResponse)))] +pub async fn browse_share( + State(ctx): State, + Query(query): Query, +) -> Result, ApiError> { + let service = ctx.share_service(); + let res = service + .browse_share(&query.token) + .await + .map_err(map_share_error)?; + let out: ShareBrowseResponse = res + .map(Into::into) + .ok_or(ApiError::not_found("not_found"))?; + Ok(Json(out)) +} diff --git a/api/crates/presentation/src/http/documents/tagging/handlers/mod.rs b/api/crates/presentation/src/http/documents/tagging/handlers/mod.rs new file mode 100644 index 00000000..0ffbf918 --- /dev/null +++ b/api/crates/presentation/src/http/documents/tagging/handlers/mod.rs @@ -0,0 +1,35 @@ +use axum::{ + Json, + extract::{Query, State}, +}; + +use crate::context::DocumentsContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use application::core::services::errors::ServiceError; +use domain::access::permissions::PERM_DOC_VIEW; + +use super::types::TagItem; + +fn map_tag_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error(err, "tag_service_error") +} + +#[utoipa::path(get, path = "/api/tags", tag = "Tags", + params(("q" = Option, Query, description = "Filter contains")), + responses((status = 200, body = [TagItem])))] +pub async fn list_tags( + State(ctx): State, + auth: WorkspaceAuth, + q: Option>>, +) -> Result>, ApiError> { + auth.ensure_permission(PERM_DOC_VIEW)?; + let filter = q.and_then(|Query(m)| m.get("q").cloned()); + let service = ctx.tag_service(); + let items = service + .list(auth.workspace_id, filter) + .await + .map_err(map_tag_error)?; + let out: Vec = items.into_iter().map(Into::into).collect(); + Ok(Json(out)) +} diff --git a/api/crates/presentation/src/http/documents/tagging/mod.rs b/api/crates/presentation/src/http/documents/tagging/mod.rs new file mode 100644 index 00000000..575f2868 --- /dev/null +++ b/api/crates/presentation/src/http/documents/tagging/mod.rs @@ -0,0 +1,17 @@ +mod handlers; +pub mod types; + +use axum::{Router, routing::get}; + +use crate::context::AppContext; + +pub use handlers::list_tags; +pub use types::*; + +pub mod openapi { + pub use super::handlers::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new().route("/tags", get(list_tags)).with_state(ctx) +} diff --git a/api/crates/presentation/src/http/documents/tagging/types.rs b/api/crates/presentation/src/http/documents/tagging/types.rs new file mode 100644 index 00000000..69bf44a6 --- /dev/null +++ b/api/crates/presentation/src/http/documents/tagging/types.rs @@ -0,0 +1,19 @@ +use serde::Serialize; +use utoipa::ToSchema; + +use application::documents::dtos::TagItemDto; + +#[derive(Serialize, ToSchema)] +pub struct TagItem { + pub name: String, + pub count: i64, +} + +impl From for TagItem { + fn from(d: TagItemDto) -> Self { + TagItem { + name: d.name, + count: d.count, + } + } +} diff --git a/api/crates/presentation/src/http/documents/types.rs b/api/crates/presentation/src/http/documents/types.rs new file mode 100644 index 00000000..9c1e5436 --- /dev/null +++ b/api/crates/presentation/src/http/documents/types.rs @@ -0,0 +1,472 @@ +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; +use uuid::Uuid; + +use crate::http::error::ApiError; +use application::core::services::errors::ServiceError; +use application::documents::dtos::DocumentDownloadFormat; +use application::documents::dtos::{ + DocumentListFilter, SnapshotDiffBaseMode, SnapshotDiffSideDto, SnapshotSummaryDto, +}; +use application::documents::services::DocumentPatchOperation; +use contracts::core::dtos::TextDiffResult; +use domain::documents::document as domain; + +#[derive(Debug, Serialize, ToSchema)] +pub struct Document { + pub id: Uuid, + /// Legacy alias for `workspace_id` kept for backward compatibility with older clients. + pub owner_id: Uuid, + pub workspace_id: Uuid, + pub title: String, + pub parent_id: Option, + pub r#type: String, + pub created_at: chrono::DateTime, + pub updated_at: chrono::DateTime, + #[serde(skip_serializing_if = "Option::is_none")] + pub created_by_plugin: Option, + pub slug: String, + pub desired_path: String, + pub path: Option, + pub created_by: Option, + pub archived_at: Option>, + pub archived_by: Option, + pub archived_parent_id: Option, +} + +pub fn to_http_document(doc: domain::Document) -> Document { + Document { + id: doc.id(), + // NOTE: Older clients used `owner_id` to identify the workspace. + owner_id: doc.workspace_id(), + workspace_id: doc.workspace_id(), + title: doc.title().as_str().to_string(), + parent_id: doc.parent_id(), + r#type: doc.doc_type().to_string(), + created_at: doc.created_at(), + updated_at: doc.updated_at(), + created_by_plugin: doc.created_by_plugin().map(str::to_string), + slug: doc.slug().as_str().to_string(), + desired_path: doc.desired_path().as_str().to_string(), + path: doc.path().map(str::to_string), + created_by: doc.created_by(), + archived_at: doc.archived_at(), + archived_by: doc.archived_by(), + archived_parent_id: doc.archived_parent_id(), + } +} + +pub fn map_service_error(err: ServiceError) -> ApiError { + crate::http::error::map_service_error(err, "document_service_error") +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct DocumentListResponse { + pub items: Vec, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct SnapshotSummary { + pub id: Uuid, + pub document_id: Uuid, + pub label: String, + pub notes: Option, + pub kind: String, + pub created_at: chrono::DateTime, + pub created_by: Option, + pub byte_size: i64, + pub content_hash: String, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct SnapshotListResponse { + pub items: Vec, +} + +#[derive(Debug, Serialize, ToSchema)] +#[serde(rename_all = "lowercase")] +pub enum SnapshotDiffKind { + Current, + Snapshot, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct SnapshotDiffSideResponse { + pub kind: SnapshotDiffKind, + pub markdown: String, + pub snapshot: Option, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct SnapshotDiffResponse { + pub base: SnapshotDiffSideResponse, + pub target: SnapshotDiffSideResponse, + pub diff: TextDiffResult, +} + +#[derive(Debug, Clone, Copy, Deserialize, ToSchema, Default)] +#[serde(rename_all = "snake_case")] +pub enum SnapshotDiffBaseParam { + #[default] + Auto, + Current, + Previous, +} + +impl From for SnapshotDiffBaseMode { + fn from(value: SnapshotDiffBaseParam) -> Self { + match value { + SnapshotDiffBaseParam::Auto => SnapshotDiffBaseMode::Auto, + SnapshotDiffBaseParam::Current => SnapshotDiffBaseMode::ForceCurrent, + SnapshotDiffBaseParam::Previous => SnapshotDiffBaseMode::ForcePrevious, + } + } +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct SnapshotRestoreResponse { + pub snapshot: SnapshotSummary, +} + +pub fn snapshot_summary_from(record: SnapshotSummaryDto) -> SnapshotSummary { + SnapshotSummary { + id: record.id, + document_id: record.document_id, + label: record.label, + notes: record.notes, + kind: record.kind, + created_at: record.created_at, + created_by: record.created_by, + byte_size: record.byte_size, + content_hash: record.content_hash, + } +} + +pub fn snapshot_diff_side_response_from(side: SnapshotDiffSideDto) -> SnapshotDiffSideResponse { + match side { + SnapshotDiffSideDto::Current { markdown } => SnapshotDiffSideResponse { + kind: SnapshotDiffKind::Current, + markdown, + snapshot: None, + }, + SnapshotDiffSideDto::Snapshot { snapshot, markdown } => SnapshotDiffSideResponse { + kind: SnapshotDiffKind::Snapshot, + markdown, + snapshot: Some(snapshot_summary_from(snapshot)), + }, + } +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct CreateDocumentRequest { + pub title: Option, + pub parent_id: Option, + pub r#type: Option, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct UpdateDocumentRequest { + pub title: Option, + #[serde(default, deserialize_with = "deserialize_double_option")] + #[schema(value_type = Option)] + pub parent_id: DoubleOption, +} + +impl Default for UpdateDocumentRequest { + fn default() -> Self { + Self { + title: None, + parent_id: DoubleOption::NotProvided, + } + } +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct DuplicateDocumentRequest { + pub title: Option, + #[serde(default, deserialize_with = "deserialize_double_option")] + #[schema(value_type = Option)] + pub parent_id: DoubleOption, +} + +impl Default for DuplicateDocumentRequest { + fn default() -> Self { + Self { + title: None, + parent_id: DoubleOption::NotProvided, + } + } +} + +#[derive(Debug, Clone, Default)] +pub enum DoubleOption { + #[default] + NotProvided, + Null, + Some(T), +} + +fn deserialize_double_option<'de, D, T>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, + T: serde::Deserialize<'de>, +{ + Option::::deserialize(deserializer).map(|opt| match opt { + None => DoubleOption::Null, + Some(value) => DoubleOption::Some(value), + }) +} + +#[derive(Debug, Deserialize)] +pub struct ListDocumentsQuery { + pub query: Option, + pub tag: Option, + #[serde(default)] + pub state: Option, +} + +#[derive(Debug, Clone, Copy, Deserialize, ToSchema)] +#[serde(rename_all = "lowercase")] +pub enum DocumentStateFilter { + Active, + Archived, + All, +} + +impl From for DocumentListFilter { + fn from(value: DocumentStateFilter) -> Self { + match value { + DocumentStateFilter::Active => DocumentListFilter::Active, + DocumentStateFilter::Archived => DocumentListFilter::Archived, + DocumentStateFilter::All => DocumentListFilter::All, + } + } +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct UpdateDocumentContentRequest { + pub content: String, +} + +#[derive(Debug, Deserialize, ToSchema)] +#[serde(tag = "op", rename_all = "snake_case")] +pub enum DocumentPatchOperationRequest { + Insert { + offset: usize, + text: String, + }, + Delete { + offset: usize, + length: usize, + }, + Replace { + offset: usize, + length: usize, + text: String, + }, +} + +impl From for DocumentPatchOperation { + fn from(value: DocumentPatchOperationRequest) -> Self { + match value { + DocumentPatchOperationRequest::Insert { offset, text } => { + DocumentPatchOperation::Insert { offset, text } + } + DocumentPatchOperationRequest::Delete { offset, length } => { + DocumentPatchOperation::Delete { offset, length } + } + DocumentPatchOperationRequest::Replace { + offset, + length, + text, + } => DocumentPatchOperation::Replace { + offset, + length, + text, + }, + } + } +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct PatchDocumentContentRequest { + pub operations: Vec, +} + +#[allow(dead_code)] +#[derive(ToSchema)] +pub struct DocumentDownloadBinary(#[schema(value_type = String, format = Binary)] pub Vec); + +#[allow(dead_code)] +#[derive(ToSchema)] +pub struct DocumentArchiveBinary(#[schema(value_type = String, format = Binary)] pub Vec); + +#[derive(Debug, Clone, Copy, Deserialize, ToSchema, Default)] +#[serde(rename_all = "snake_case")] +#[schema(rename_all = "snake_case")] +pub enum DownloadFormat { + #[default] + Archive, + Markdown, + Html, + Html5, + Pdf, + Docx, + Latex, + Beamer, + Context, + Man, + Mediawiki, + Dokuwiki, + Textile, + Org, + Texinfo, + Opml, + Docbook, + Opendocument, + Odt, + Rtf, + Epub, + Epub3, + Fb2, + Asciidoc, + Icml, + Slidy, + Slideous, + Dzslides, + Revealjs, + S5, + Json, + Plain, + Commonmark, + CommonmarkX, + MarkdownStrict, + MarkdownPhpextra, + MarkdownGithub, + Rst, + Native, + Haddock, +} + +impl From for DocumentDownloadFormat { + fn from(value: DownloadFormat) -> Self { + match value { + DownloadFormat::Archive => DocumentDownloadFormat::Archive, + DownloadFormat::Markdown => DocumentDownloadFormat::Markdown, + DownloadFormat::Html => DocumentDownloadFormat::Html, + DownloadFormat::Html5 => DocumentDownloadFormat::Html5, + DownloadFormat::Pdf => DocumentDownloadFormat::Pdf, + DownloadFormat::Docx => DocumentDownloadFormat::Docx, + DownloadFormat::Latex => DocumentDownloadFormat::Latex, + DownloadFormat::Beamer => DocumentDownloadFormat::Beamer, + DownloadFormat::Context => DocumentDownloadFormat::Context, + DownloadFormat::Man => DocumentDownloadFormat::Man, + DownloadFormat::Mediawiki => DocumentDownloadFormat::MediaWiki, + DownloadFormat::Dokuwiki => DocumentDownloadFormat::Dokuwiki, + DownloadFormat::Textile => DocumentDownloadFormat::Textile, + DownloadFormat::Org => DocumentDownloadFormat::Org, + DownloadFormat::Texinfo => DocumentDownloadFormat::Texinfo, + DownloadFormat::Opml => DocumentDownloadFormat::Opml, + DownloadFormat::Docbook => DocumentDownloadFormat::Docbook, + DownloadFormat::Opendocument => DocumentDownloadFormat::OpenDocument, + DownloadFormat::Odt => DocumentDownloadFormat::Odt, + DownloadFormat::Rtf => DocumentDownloadFormat::Rtf, + DownloadFormat::Epub => DocumentDownloadFormat::Epub, + DownloadFormat::Epub3 => DocumentDownloadFormat::Epub3, + DownloadFormat::Fb2 => DocumentDownloadFormat::Fb2, + DownloadFormat::Asciidoc => DocumentDownloadFormat::Asciidoc, + DownloadFormat::Icml => DocumentDownloadFormat::Icml, + DownloadFormat::Slidy => DocumentDownloadFormat::Slidy, + DownloadFormat::Slideous => DocumentDownloadFormat::Slideous, + DownloadFormat::Dzslides => DocumentDownloadFormat::Dzslides, + DownloadFormat::Revealjs => DocumentDownloadFormat::Revealjs, + DownloadFormat::S5 => DocumentDownloadFormat::S5, + DownloadFormat::Json => DocumentDownloadFormat::Json, + DownloadFormat::Plain => DocumentDownloadFormat::Plain, + DownloadFormat::Commonmark => DocumentDownloadFormat::Commonmark, + DownloadFormat::CommonmarkX => DocumentDownloadFormat::CommonmarkX, + DownloadFormat::MarkdownStrict => DocumentDownloadFormat::MarkdownStrict, + DownloadFormat::MarkdownPhpextra => DocumentDownloadFormat::MarkdownPhpextra, + DownloadFormat::MarkdownGithub => DocumentDownloadFormat::MarkdownGithub, + DownloadFormat::Rst => DocumentDownloadFormat::Rst, + DownloadFormat::Native => DocumentDownloadFormat::Native, + DownloadFormat::Haddock => DocumentDownloadFormat::Haddock, + } + } +} + +#[derive(Debug, Deserialize, ToSchema, Default)] +pub struct DownloadDocumentQuery { + pub token: Option, + #[serde(default)] + pub format: DownloadFormat, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct SearchResult { + pub id: Uuid, + pub title: String, + pub document_type: String, + pub path: Option, + pub updated_at: chrono::DateTime, +} + +#[derive(Debug, Deserialize)] +pub struct SearchQuery { + pub q: Option, +} + +#[derive(Debug, Default, Deserialize)] +pub struct ListSnapshotsQuery { + pub token: Option, + pub limit: Option, + pub offset: Option, +} + +#[derive(Debug, Default, Deserialize)] +pub struct SnapshotDiffQuery { + pub token: Option, + pub compare: Option, + #[serde(default)] + pub base: Option, +} + +#[derive(Debug, Default, Deserialize)] +pub struct SnapshotTokenQuery { + pub token: Option, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct BacklinkInfo { + pub document_id: String, + pub title: String, + pub document_type: String, + pub file_path: Option, + pub link_type: String, + pub link_text: Option, + pub link_count: i64, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct BacklinksResponse { + pub backlinks: Vec, + pub total_count: usize, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct OutgoingLink { + pub document_id: String, + pub title: String, + pub document_type: String, + pub file_path: Option, + pub link_type: String, + pub link_text: Option, + pub position_start: Option, + pub position_end: Option, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct OutgoingLinksResponse { + pub links: Vec, + pub total_count: usize, +} diff --git a/api/crates/presentation/src/http/error.rs b/api/crates/presentation/src/http/error.rs new file mode 100644 index 00000000..d90cd22e --- /dev/null +++ b/api/crates/presentation/src/http/error.rs @@ -0,0 +1,101 @@ +use axum::Json; +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use serde::Serialize; + +use application::core::services::errors::ServiceError; + +#[derive(Debug, Serialize)] +pub struct ApiErrorBody { + pub code: &'static str, + #[serde(skip_serializing_if = "Option::is_none")] + pub message: Option<&'static str>, +} + +#[derive(Debug)] +pub struct ApiError { + status: StatusCode, + code: &'static str, + message: Option<&'static str>, +} + +impl ApiError { + pub fn new(status: StatusCode, code: &'static str) -> Self { + Self { + status, + code, + message: None, + } + } + + pub fn with_message(mut self, message: &'static str) -> Self { + self.message = Some(message); + self + } + + pub fn bad_request(code: &'static str) -> Self { + Self::new(StatusCode::BAD_REQUEST, code) + } + + pub fn unauthorized(code: &'static str) -> Self { + Self::new(StatusCode::UNAUTHORIZED, code) + } + + pub fn forbidden(code: &'static str) -> Self { + Self::new(StatusCode::FORBIDDEN, code) + } + + pub fn not_found(code: &'static str) -> Self { + Self::new(StatusCode::NOT_FOUND, code) + } + + pub fn conflict(code: &'static str) -> Self { + Self::new(StatusCode::CONFLICT, code) + } + + pub fn status(&self) -> StatusCode { + self.status + } +} + +impl IntoResponse for ApiError { + fn into_response(self) -> Response { + ( + self.status, + Json(ApiErrorBody { + code: self.code, + message: self.message, + }), + ) + .into_response() + } +} + +pub fn map_service_error(err: ServiceError, log_context: &'static str) -> ApiError { + match err { + ServiceError::Unauthorized => ApiError::unauthorized("unauthorized"), + ServiceError::TokenExpired => ApiError::unauthorized("token_expired"), + ServiceError::Forbidden => ApiError::forbidden("forbidden"), + ServiceError::Conflict => ApiError::conflict("conflict"), + ServiceError::NotFound => ApiError::not_found("not_found"), + ServiceError::BadRequest(code) => ApiError::bad_request(code).with_message(code), + ServiceError::Unexpected(inner) => { + tracing::error!(error = ?inner, context = log_context, "service_error"); + ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error") + } + } +} + +pub fn map_service_error_no_log(err: ServiceError) -> ApiError { + match err { + ServiceError::Unauthorized => ApiError::unauthorized("unauthorized"), + ServiceError::TokenExpired => ApiError::unauthorized("token_expired"), + ServiceError::Forbidden => ApiError::forbidden("forbidden"), + ServiceError::Conflict => ApiError::conflict("conflict"), + ServiceError::NotFound => ApiError::not_found("not_found"), + ServiceError::BadRequest(code) => ApiError::bad_request(code).with_message(code), + ServiceError::Unexpected(_) => { + ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error") + } + } +} diff --git a/api/crates/presentation/src/http/extractors.rs b/api/crates/presentation/src/http/extractors.rs new file mode 100644 index 00000000..391a2260 --- /dev/null +++ b/api/crates/presentation/src/http/extractors.rs @@ -0,0 +1,119 @@ +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use domain::access::permissions::PermissionSet; +use uuid::Uuid; + +use crate::context::AppContext; +use crate::http::error::ApiError; +use crate::http::workspaces::scope as workspace_scope; +use crate::security::token::{self, Bearer}; + +#[derive(Debug, Clone)] +pub struct AuthedUser { + pub user_id: Uuid, + pub bearer_token: String, +} + +#[axum::async_trait] +impl FromRequestParts for AuthedUser { + type Rejection = ApiError; + + async fn from_request_parts( + parts: &mut Parts, + state: &AppContext, + ) -> Result { + let bearer = Bearer::from_request_parts(parts, state).await?; + let bearer_token = bearer.0.clone(); + let user_id = token::require_user_id(state, bearer) + .await + .map_err(token::map_actor_error)?; + Ok(Self { + user_id, + bearer_token, + }) + } +} + +#[derive(Debug, Clone)] +pub struct WorkspaceAuth { + pub user_id: Uuid, + pub workspace_id: Uuid, + pub permissions: PermissionSet, + pub bearer_token: String, +} + +impl WorkspaceAuth { + pub fn ensure_permission(&self, permission: &str) -> Result<(), ApiError> { + if self.permissions.allows(permission) { + Ok(()) + } else { + Err(ApiError::forbidden("forbidden")) + } + } +} + +#[derive(Debug, Clone)] +pub struct WorkspaceUser { + pub user_id: Uuid, + pub workspace_id: Uuid, + pub bearer_token: String, +} + +#[axum::async_trait] +impl FromRequestParts for WorkspaceAuth { + type Rejection = ApiError; + + async fn from_request_parts( + parts: &mut Parts, + state: &AppContext, + ) -> Result { + let bearer = Bearer::from_request_parts(parts, state).await?; + let bearer_token = bearer.0.clone(); + let user_id = token::require_user_id(state, bearer) + .await + .map_err(token::map_actor_error)?; + let workspace_id = workspace_scope::resolve_active_workspace_id( + state, + &parts.headers, + Some(bearer_token.as_str()), + user_id, + ) + .await?; + let permissions = + workspace_scope::resolve_workspace_permissions(state, workspace_id, user_id).await?; + Ok(Self { + user_id, + workspace_id, + permissions, + bearer_token, + }) + } +} + +#[axum::async_trait] +impl FromRequestParts for WorkspaceUser { + type Rejection = ApiError; + + async fn from_request_parts( + parts: &mut Parts, + state: &AppContext, + ) -> Result { + let bearer = Bearer::from_request_parts(parts, state).await?; + let bearer_token = bearer.0.clone(); + let user_id = token::require_user_id(state, bearer) + .await + .map_err(token::map_actor_error)?; + let workspace_id = workspace_scope::resolve_active_workspace_id( + state, + &parts.headers, + Some(bearer_token.as_str()), + user_id, + ) + .await?; + Ok(Self { + user_id, + workspace_id, + bearer_token, + }) + } +} diff --git a/api/crates/presentation/src/http/git/config.rs b/api/crates/presentation/src/http/git/config.rs new file mode 100644 index 00000000..0d34963e --- /dev/null +++ b/api/crates/presentation/src/http/git/config.rs @@ -0,0 +1,82 @@ +use axum::{Json, extract::State, http::StatusCode}; + +use crate::context::GitContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use application::core::services::errors::ServiceError; +use application::git::dtos::GitConfigDto; +use application::git::dtos::UpsertGitConfigInput; +use domain::access::permissions::{PERM_GIT_CONFIGURE, PERM_GIT_INIT, PERM_GIT_SYNC}; + +use super::types::{ + CreateGitConfigRequest, GitConfigResponse, GitRemoteCheckResponse, map_git_error, +}; + +#[utoipa::path(get, path = "/api/git/config", tag = "Git", responses((status = 200, body = Option)))] +pub async fn get_config( + State(ctx): State, + auth: WorkspaceAuth, +) -> Result>, ApiError> { + auth.ensure_permission(PERM_GIT_INIT)?; + auth.ensure_permission(PERM_GIT_SYNC)?; + auth.ensure_permission(PERM_GIT_CONFIGURE)?; + let service = ctx.git_service(); + let resp: Option = service + .get_config(auth.workspace_id) + .await + .map_err(map_git_error)?; + let mut out: Option = resp.map(Into::into); + if let Some(ref mut cfg) = out + && let Some(check) = service + .check_remote(auth.workspace_id) + .await + .map_err(map_git_error)? + { + cfg.remote_check = Some(GitRemoteCheckResponse::from(check)); + } + Ok(Json(out)) +} + +#[utoipa::path(post, path = "/api/git/config", tag = "Git", request_body = CreateGitConfigRequest, responses((status = 200, body = GitConfigResponse)))] +pub async fn create_or_update_config( + State(ctx): State, + auth: WorkspaceAuth, + Json(req): Json, +) -> Result, ApiError> { + auth.ensure_permission(PERM_GIT_INIT)?; + auth.ensure_permission(PERM_GIT_SYNC)?; + auth.ensure_permission(PERM_GIT_CONFIGURE)?; + let input: UpsertGitConfigInput = req.into(); + let service = ctx.git_service(); + let resp: GitConfigDto = service + .upsert_config(auth.workspace_id, &input) + .await + .map_err(|err| match err { + ServiceError::BadRequest(code) => ApiError::bad_request(code).with_message(code), + other => map_git_error(other), + })?; + let mut out: GitConfigResponse = resp.into(); + if let Some(check) = service + .check_remote(auth.workspace_id) + .await + .map_err(map_git_error)? + { + out.remote_check = Some(check.into()); + } + Ok(Json(out)) +} + +#[utoipa::path(delete, path = "/api/git/config", tag = "Git", responses((status = 204, description = "Deleted")))] +pub async fn delete_config( + State(ctx): State, + auth: WorkspaceAuth, +) -> Result { + auth.ensure_permission(PERM_GIT_SYNC)?; + auth.ensure_permission(PERM_GIT_CONFIGURE)?; + let service = ctx.git_service(); + service + .delete_config(auth.workspace_id) + .await + .map_err(map_git_error)?; + Ok(StatusCode::NO_CONTENT) +} diff --git a/api/crates/presentation/src/http/git/ignore.rs b/api/crates/presentation/src/http/git/ignore.rs new file mode 100644 index 00000000..2a6ddd37 --- /dev/null +++ b/api/crates/presentation/src/http/git/ignore.rs @@ -0,0 +1,90 @@ +use axum::{Json, extract::State}; +use uuid::Uuid; + +use crate::context::GitContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceUser; +use application::core::services::errors::ServiceError; + +use super::types::{ + AddPatternsRequest, CheckIgnoredRequest, GitignoreUpdateResponse, map_git_error, +}; + +#[utoipa::path(post, path = "/api/git/ignore/doc/{id}", params(("id" = String, Path, description = "Document ID")), tag = "Git", responses((status = 200, description = "OK")))] +pub async fn ignore_document( + State(ctx): State, + auth: WorkspaceUser, + axum::extract::Path(id): axum::extract::Path, +) -> Result, ApiError> { + let doc_id = Uuid::parse_str(&id).map_err(|_| ApiError::bad_request("invalid_document_id"))?; + let service = ctx.git_service(); + let res = service + .ignore_document(auth.workspace_id, doc_id) + .await + .map_err(|err| match err { + ServiceError::NotFound => ApiError::not_found("not_found"), + other => map_git_error(other), + })?; + Ok(Json(res.into())) +} + +#[utoipa::path(post, path = "/api/git/ignore/folder/{id}", params(("id" = String, Path, description = "Folder ID")), tag = "Git", responses((status = 200, description = "OK")))] +pub async fn ignore_folder( + State(ctx): State, + auth: WorkspaceUser, + axum::extract::Path(id): axum::extract::Path, +) -> Result, ApiError> { + let folder_id = Uuid::parse_str(&id).map_err(|_| ApiError::bad_request("invalid_folder_id"))?; + let service = ctx.git_service(); + let res = service + .ignore_folder(auth.workspace_id, folder_id) + .await + .map_err(|err| match err { + ServiceError::NotFound => ApiError::not_found("not_found"), + other => map_git_error(other), + })?; + Ok(Json(res.into())) +} + +#[utoipa::path(post, path = "/api/git/gitignore/patterns", tag = "Git", request_body = AddPatternsRequest, responses((status = 200, description = "OK")))] +pub async fn add_gitignore_patterns( + State(ctx): State, + auth: WorkspaceUser, + Json(req): Json, +) -> Result, ApiError> { + let service = ctx.git_service(); + let added = service + .add_gitignore_patterns(auth.workspace_id, req.patterns) + .await + .map_err(map_git_error)?; + Ok(Json(serde_json::json!({"added": added}))) +} + +#[utoipa::path(get, path = "/api/git/gitignore/patterns", tag = "Git", responses((status = 200, description = "OK")))] +pub async fn get_gitignore_patterns( + State(ctx): State, + auth: WorkspaceUser, +) -> Result, ApiError> { + let service = ctx.git_service(); + let patterns = service + .get_gitignore_patterns(auth.workspace_id) + .await + .map_err(map_git_error)?; + Ok(Json(serde_json::json!({"patterns": patterns}))) +} + +#[utoipa::path(post, path = "/api/git/gitignore/check", tag = "Git", request_body = CheckIgnoredRequest, responses((status = 200, description = "OK")))] +pub async fn check_path_ignored( + State(ctx): State, + auth: WorkspaceUser, + Json(req): Json, +) -> Result, ApiError> { + let service = ctx.git_service(); + let is_ignored = service + .check_path_ignored(auth.workspace_id, &req.path) + .await + .map_err(map_git_error)?; + Ok(Json( + serde_json::json!({"path": req.path, "is_ignored": is_ignored}), + )) +} diff --git a/api/crates/presentation/src/http/git/mod.rs b/api/crates/presentation/src/http/git/mod.rs new file mode 100644 index 00000000..836a129e --- /dev/null +++ b/api/crates/presentation/src/http/git/mod.rs @@ -0,0 +1,69 @@ +mod config; +mod ignore; +mod pull; +mod status; +mod sync; +pub mod types; + +use axum::{ + Router, + routing::{get, post}, +}; + +use crate::context::AppContext; + +pub use config::{create_or_update_config, delete_config, get_config}; +pub use ignore::{ + add_gitignore_patterns, check_path_ignored, get_gitignore_patterns, ignore_document, + ignore_folder, +}; +pub use pull::{ + finalize_pull_session, get_pull_session, pull_repository, resolve_pull_session, + start_pull_session, +}; +pub use status::{get_changes, get_commit_diff, get_history, get_status, get_working_diff}; +pub use sync::{deinit_repository, import_repository, init_repository, sync_now}; +pub use types::*; + +pub mod openapi { + pub use super::config::*; + pub use super::ignore::*; + pub use super::pull::*; + pub use super::status::*; + pub use super::sync::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + .route( + "/git/config", + get(get_config) + .post(create_or_update_config) + .delete(delete_config), + ) + .route("/git/status", get(get_status)) + .route("/git/changes", get(get_changes)) + .route("/git/history", get(get_history)) + .route("/git/diff/working", get(get_working_diff)) + .route("/git/diff/commits/:from/:to", get(get_commit_diff)) + .route("/git/sync", post(sync_now)) + .route("/git/import", post(import_repository)) + .route("/git/pull", post(pull_repository)) + .route("/git/pull/start", post(start_pull_session)) + .route("/git/pull/session/:id", get(get_pull_session)) + .route("/git/pull/session/:id/resolve", post(resolve_pull_session)) + .route( + "/git/pull/session/:id/finalize", + post(finalize_pull_session), + ) + .route("/git/init", post(init_repository)) + .route("/git/deinit", post(deinit_repository)) + .route("/git/ignore/doc/:id", post(ignore_document)) + .route("/git/ignore/folder/:id", post(ignore_folder)) + .route( + "/git/gitignore/patterns", + get(get_gitignore_patterns).post(add_gitignore_patterns), + ) + .route("/git/gitignore/check", post(check_path_ignored)) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/git/pull.rs b/api/crates/presentation/src/http/git/pull.rs new file mode 100644 index 00000000..9074c302 --- /dev/null +++ b/api/crates/presentation/src/http/git/pull.rs @@ -0,0 +1,415 @@ +use axum::{Json, extract::State, http::StatusCode}; +use uuid::Uuid; + +use crate::context::GitContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use application::core::services::errors::ServiceError; +use application::git::dtos::{GitPullRequestDto, GitPullResolutionDto}; +use application::git::services::FinalizePullSessionResult; +use domain::access::permissions::PERM_GIT_SYNC; +use domain::git::pull_session::GitPullSessionStatus; + +use super::types::{ + GitPullConflictItem, GitPullRequest, GitPullResolution, GitPullResponse, + GitPullSessionResponse, map_git_error, +}; + +#[utoipa::path( + post, + path = "/api/git/pull", + tag = "Git", + request_body = GitPullRequest, + responses( + (status = 200, body = GitPullResponse), + (status = 409, body = GitPullResponse, description = "Conflicts detected") + ) +)] +pub async fn pull_repository( + State(ctx): State, + auth: WorkspaceAuth, + Json(req): Json, +) -> Result<(StatusCode, Json), ApiError> { + auth.ensure_permission(PERM_GIT_SYNC)?; + let service = ctx.git_service(); + let dto = service + .pull_repository( + auth.workspace_id, + auth.user_id, + GitPullRequestDto { + resolutions: req + .resolutions + .unwrap_or_default() + .into_iter() + .map(|r| GitPullResolutionDto { + path: r.path, + choice: r.choice, + content: r.content, + }) + .collect(), + }, + ) + .await + .map_err(|err| { + let message = match &err { + ServiceError::BadRequest("workspace_has_pending_changes") => { + "Workspace has pending changes. Commit, sync, or discard them before pulling." + .to_string() + } + _ => err.to_string(), + }; + let status = map_git_error(err).status(); + let body = GitPullResponse { + success: false, + message, + files_changed: 0, + commit_hash: None, + conflicts: None, + git_status: None, + }; + (status, body) + }); + let dto = match dto { + Ok(v) => v, + Err((status, body)) => return Ok((status, Json(body))), + }; + let conflicts = dto + .conflicts + .map(|items| items.into_iter().map(Into::into).collect::>()) + .unwrap_or_default(); + let has_conflicts = !conflicts.is_empty(); + let status = if has_conflicts { + StatusCode::CONFLICT + } else { + StatusCode::OK + }; + Ok(( + status, + Json(GitPullResponse { + success: dto.success, + message: dto.message, + files_changed: dto.files_changed as i32, + commit_hash: dto.commit_hash, + conflicts: if has_conflicts { Some(conflicts) } else { None }, + git_status: None, + }), + )) +} + +#[utoipa::path( + post, + path = "/api/git/pull/start", + tag = "Git", + responses( + (status = 200, body = GitPullSessionResponse), + (status = 400, body = GitPullSessionResponse), + (status = 409, body = GitPullSessionResponse, description = "Conflicts detected") + ) +)] +pub async fn start_pull_session( + State(ctx): State, + auth: WorkspaceAuth, +) -> Result<(StatusCode, Json), ApiError> { + auth.ensure_permission(PERM_GIT_SYNC)?; + + let service = ctx.git_service(); + let session = match service + .start_pull_session_flow(auth.workspace_id, auth.user_id) + .await + { + Ok(v) => v, + Err(err) => { + let message = match &err { + ServiceError::BadRequest("workspace_has_pending_changes") => { + "Workspace has pending changes. Commit, sync, or discard them before pulling." + .to_string() + } + other => other.to_string(), + }; + let status = map_git_error(err).status(); + return Ok(( + status, + Json(GitPullSessionResponse { + session_id: Uuid::nil(), + status: "error".to_string(), + conflicts: Vec::new(), + resolutions: Vec::new(), + message: Some(message), + }), + )); + } + }; + if session.status == GitPullSessionStatus::Error { + return Ok(( + StatusCode::BAD_REQUEST, + Json(GitPullSessionResponse { + session_id: session.id, + status: session.status.as_str().to_string(), + conflicts: Vec::new(), + resolutions: Vec::new(), + message: session.message, + }), + )); + } + let conflicts = session + .conflicts + .clone() + .into_iter() + .map(Into::into) + .collect::>(); + let has_conflicts = !conflicts.is_empty(); + let status = if has_conflicts { + StatusCode::CONFLICT + } else { + StatusCode::OK + }; + Ok(( + status, + Json(GitPullSessionResponse { + session_id: session.id, + status: session.status.as_str().to_string(), + conflicts, + resolutions: Vec::new(), + message: session.message, + }), + )) +} + +#[utoipa::path( + get, + path = "/api/git/pull/session/{id}", + tag = "Git", + responses((status = 200, body = GitPullSessionResponse)) +)] +pub async fn get_pull_session( + State(ctx): State, + auth: WorkspaceAuth, + axum::extract::Path(id): axum::extract::Path, +) -> Result, ApiError> { + auth.ensure_permission(PERM_GIT_SYNC)?; + + let service = ctx.git_service(); + let state = service + .load_pull_session_with_stale_check(auth.workspace_id, id) + .await + .map_err(map_git_error)? + .ok_or(ApiError::not_found("not_found"))?; + Ok(Json(GitPullSessionResponse { + session_id: state.id, + status: state.status.as_str().to_string(), + conflicts: state.conflicts.into_iter().map(Into::into).collect(), + resolutions: state + .resolutions + .into_iter() + .map(|r| GitPullResolution { + path: r.path, + choice: r.choice, + content: r.content, + }) + .collect(), + message: state.message, + })) +} + +#[utoipa::path( + post, + path = "/api/git/pull/session/{id}/resolve", + tag = "Git", + request_body = GitPullRequest, + responses( + (status = 200, body = GitPullSessionResponse), + (status = 400, body = GitPullSessionResponse), + (status = 409, body = GitPullSessionResponse) + ) +)] +pub async fn resolve_pull_session( + State(ctx): State, + auth: WorkspaceAuth, + axum::extract::Path(id): axum::extract::Path, + Json(req): Json, +) -> Result<(StatusCode, Json), ApiError> { + auth.ensure_permission(PERM_GIT_SYNC)?; + + let service = ctx.git_service(); + let existing_session = service + .load_pull_session_with_stale_check(auth.workspace_id, id) + .await + .map_err(map_git_error)? + .ok_or(ApiError::not_found("not_found"))?; + let resolutions = req.resolutions.unwrap_or_default(); + let session = match service + .resolve_pull_session_flow( + auth.workspace_id, + auth.user_id, + id, + resolutions + .iter() + .cloned() + .map(|r| GitPullResolutionDto { + path: r.path, + choice: r.choice, + content: r.content, + }) + .collect(), + ) + .await + { + Ok(v) => v, + Err(err) => { + let message = match &err { + ServiceError::BadRequest("workspace_has_pending_changes") => { + "Workspace has pending changes. Commit, sync, or discard them before pulling." + .to_string() + } + other => other.to_string(), + }; + let status = map_git_error(err).status(); + return Ok(( + status, + Json(GitPullSessionResponse { + session_id: id, + status: "error".to_string(), + conflicts: existing_session + .conflicts + .into_iter() + .map(Into::into) + .collect(), + resolutions: existing_session + .resolutions + .into_iter() + .map(|r| GitPullResolution { + path: r.path, + choice: r.choice, + content: r.content, + }) + .collect(), + message: Some(message), + }), + )); + } + }; + + let mut status_code = StatusCode::OK; + + let conflicts: Vec = session + .conflicts + .clone() + .into_iter() + .map(Into::into) + .collect(); + if !conflicts.is_empty() { + status_code = StatusCode::CONFLICT; + } + if session.status == GitPullSessionStatus::Stale { + status_code = StatusCode::CONFLICT; + } + if session.status == GitPullSessionStatus::Error { + status_code = StatusCode::BAD_REQUEST; + } + let session_status = session.status; + + Ok(( + status_code, + Json(GitPullSessionResponse { + session_id: id, + status: session_status.as_str().to_string(), + conflicts, + resolutions, + message: if session_status == GitPullSessionStatus::Error { + session.message + } else if status_code == StatusCode::CONFLICT + && session_status == GitPullSessionStatus::Stale + { + Some("Pull session is stale. Please start a new pull.".to_string()) + } else { + session.message + }, + }), + )) +} + +#[utoipa::path( + post, + path = "/api/git/pull/session/{id}/finalize", + tag = "Git", + responses( + (status = 200, body = GitPullResponse), + (status = 400, body = GitPullResponse), + (status = 409, body = GitPullResponse) + ) +)] +pub async fn finalize_pull_session( + State(ctx): State, + auth: WorkspaceAuth, + axum::extract::Path(id): axum::extract::Path, +) -> Result<(StatusCode, Json), ApiError> { + auth.ensure_permission(PERM_GIT_SYNC)?; + + let service = ctx.git_service(); + let FinalizePullSessionResult { + session, + git_status, + } = service + .finalize_pull_session_flow(auth.workspace_id, id) + .await + .map_err(map_git_error)?; + if session.status == GitPullSessionStatus::Error { + return Ok(( + StatusCode::BAD_REQUEST, + Json(GitPullResponse { + success: false, + message: session + .message + .clone() + .unwrap_or_else(|| "pull failed".to_string()), + files_changed: 0, + commit_hash: None, + conflicts: Some(session.conflicts.into_iter().map(Into::into).collect()), + git_status: None, + }), + )); + } + if session.status == GitPullSessionStatus::Stale { + return Ok(( + StatusCode::CONFLICT, + Json(GitPullResponse { + success: false, + message: session + .message + .clone() + .unwrap_or_else(|| "pull session stale".to_string()), + files_changed: 0, + commit_hash: None, + conflicts: Some(session.conflicts.into_iter().map(Into::into).collect()), + git_status: None, + }), + )); + } + if !session.conflicts.is_empty() { + return Ok(( + StatusCode::CONFLICT, + Json(GitPullResponse { + success: false, + message: "conflicts remaining".to_string(), + files_changed: 0, + commit_hash: None, + conflicts: Some(session.conflicts.into_iter().map(Into::into).collect()), + git_status: None, + }), + )); + } + Ok(( + StatusCode::OK, + Json(GitPullResponse { + success: true, + message: session + .message + .clone() + .unwrap_or_else(|| "merge completed".to_string()), + files_changed: 0, + commit_hash: None, + conflicts: None, + git_status: git_status.map(Into::into), + }), + )) +} diff --git a/api/crates/presentation/src/http/git/status.rs b/api/crates/presentation/src/http/git/status.rs new file mode 100644 index 00000000..9babbe87 --- /dev/null +++ b/api/crates/presentation/src/http/git/status.rs @@ -0,0 +1,89 @@ +use axum::{Json, extract::State}; + +use crate::context::GitContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceUser; +use application::git::dtos::{GitCommitInfo, GitStatusDto}; +use contracts::core::dtos::TextDiffResult; + +use super::types::{GitChangesResponse, GitHistoryResponse, GitStatus, map_git_error}; + +#[utoipa::path(get, path = "/api/git/status", tag = "Git", responses((status = 200, body = GitStatus)))] +pub async fn get_status( + State(ctx): State, + auth: WorkspaceUser, +) -> Result, ApiError> { + let service = ctx.git_service(); + let dto: GitStatusDto = service + .get_status(auth.workspace_id) + .await + .map_err(map_git_error)?; + let out: GitStatus = dto.into(); + Ok(Json(out)) +} + +#[utoipa::path(get, path = "/api/git/changes", tag = "Git", responses((status = 200, body = GitChangesResponse)))] +pub async fn get_changes( + State(ctx): State, + auth: WorkspaceUser, +) -> Result, ApiError> { + let service = ctx.git_service(); + let files = service + .get_changes(auth.workspace_id) + .await + .map_err(map_git_error)?; + let items = files.into_iter().map(Into::into).collect(); + Ok(Json(GitChangesResponse { files: items })) +} + +#[utoipa::path(get, path = "/api/git/history", tag = "Git", responses((status = 200, body = GitHistoryResponse)))] +pub async fn get_history( + State(ctx): State, + auth: WorkspaceUser, +) -> Result, ApiError> { + let service = ctx.git_service(); + let commits: Vec = service + .get_history(auth.workspace_id) + .await + .map_err(map_git_error)?; + let out = commits.into_iter().map(Into::into).collect(); + Ok(Json(GitHistoryResponse { commits: out })) +} + +#[utoipa::path( + get, + path = "/api/git/diff/working", + tag = "Git", + responses((status = 200, body = [TextDiffResult])) +)] +pub async fn get_working_diff( + State(ctx): State, + auth: WorkspaceUser, +) -> Result>, ApiError> { + let service = ctx.git_service(); + let diffs = service + .get_working_diff(auth.workspace_id) + .await + .map_err(map_git_error)?; + Ok(Json(diffs)) +} + +#[utoipa::path( + get, + path = "/api/git/diff/commits/{from}/{to}", + params(("from" = String, Path, description = "From"), ("to" = String, Path, description = "To")), + tag = "Git", + responses((status = 200, body = [TextDiffResult])) +)] +pub async fn get_commit_diff( + State(ctx): State, + auth: WorkspaceUser, + axum::extract::Path((from, to)): axum::extract::Path<(String, String)>, +) -> Result>, ApiError> { + let service = ctx.git_service(); + let diffs = service + .get_commit_diff(auth.workspace_id, &from, &to) + .await + .map_err(map_git_error)?; + Ok(Json(diffs)) +} diff --git a/api/crates/presentation/src/http/git/sync.rs b/api/crates/presentation/src/http/git/sync.rs new file mode 100644 index 00000000..9d0b2857 --- /dev/null +++ b/api/crates/presentation/src/http/git/sync.rs @@ -0,0 +1,100 @@ +use axum::{Json, extract::State}; + +use crate::context::GitContext; +use crate::http::error::ApiError; +use crate::http::extractors::{WorkspaceAuth, WorkspaceUser}; +use application::git::dtos::{GitSyncRequestDto, UpsertGitConfigInput}; +use domain::access::permissions::PERM_GIT_INIT; + +use super::types::{ + CreateGitConfigRequest, GitImportResponse, GitSyncRequest, GitSyncResponse, map_git_error, +}; + +#[utoipa::path(post, path = "/api/git/sync", tag = "Git", request_body = GitSyncRequest, responses((status = 200, body = GitSyncResponse), (status = 409, description = "Conflicts during rebase/pull")))] +pub async fn sync_now( + State(ctx): State, + auth: WorkspaceUser, + Json(req): Json, +) -> Result, ApiError> { + let service = ctx.git_service(); + let out = service + .sync_now( + auth.workspace_id, + GitSyncRequestDto { + message: req.message.clone(), + force: req.force, + full_scan: req.full_scan, + skip_push: req.skip_push, + }, + ) + .await + .map_err(map_git_error)?; + Ok(Json(GitSyncResponse { + success: out.success, + message: out.message, + commit_hash: out.commit_hash, + files_changed: out.files_changed, + })) +} + +#[utoipa::path( + post, + path = "/api/git/import", + tag = "Git", + request_body = CreateGitConfigRequest, + responses((status = 200, body = GitImportResponse)) +)] +pub async fn import_repository( + State(ctx): State, + auth: WorkspaceAuth, + Json(req): Json, +) -> Result, ApiError> { + if req.repository_url.trim().is_empty() { + return Err(ApiError::bad_request("invalid_repository_url")); + } + auth.ensure_permission(PERM_GIT_INIT)?; + + let service = ctx.git_service(); + let dto = service + .import_repository( + auth.workspace_id, + auth.user_id, + &UpsertGitConfigInput::from(req), + ) + .await + .map_err(map_git_error)?; + Ok(Json(GitImportResponse { + success: true, + message: dto.message, + files_changed: dto.files_changed as i32, + commit_hash: dto.commit_hash, + docs_created: dto.docs_created as i32, + attachments_created: dto.attachments_created as i32, + })) +} + +#[utoipa::path(post, path = "/api/git/init", tag = "Git", responses((status = 200, description = "OK")))] +pub async fn init_repository( + State(ctx): State, + auth: WorkspaceUser, +) -> Result, ApiError> { + let service = ctx.git_service(); + service + .init_repository(auth.workspace_id) + .await + .map_err(map_git_error)?; + Ok(Json(serde_json::json!({"success":true}))) +} + +#[utoipa::path(post, path = "/api/git/deinit", tag = "Git", responses((status = 200, description = "OK")))] +pub async fn deinit_repository( + State(ctx): State, + auth: WorkspaceUser, +) -> Result, ApiError> { + let service = ctx.git_service(); + service + .deinit_repository(auth.workspace_id) + .await + .map_err(map_git_error)?; + Ok(Json(serde_json::json!({"success":true}))) +} diff --git a/api/crates/presentation/src/http/git/types.rs b/api/crates/presentation/src/http/git/types.rs new file mode 100644 index 00000000..d79e1513 --- /dev/null +++ b/api/crates/presentation/src/http/git/types.rs @@ -0,0 +1,298 @@ +use application::core::services::errors::ServiceError; +use application::git::dtos::UpsertGitConfigInput; +use application::git::dtos::{ + GitChangeItem as GitChangeDto, GitCommitInfo, GitConfigDto, GitPullConflictItemDto, + GitPullResolutionDto, GitPullSessionDto, GitStatusDto, GitignoreUpdateDto, +}; +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; + +pub fn map_git_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error(err, "git_service_error") +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct GitignoreUpdateResponse { + pub added: usize, + pub patterns: Vec, +} + +impl From for GitignoreUpdateResponse { + fn from(value: GitignoreUpdateDto) -> Self { + Self { + added: value.added, + patterns: value.patterns, + } + } +} + +#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] +pub struct GitConfigResponse { + pub id: uuid::Uuid, + pub repository_url: String, + pub branch_name: String, + pub auth_type: String, + pub auto_sync: bool, + pub created_at: chrono::DateTime, + pub updated_at: chrono::DateTime, + pub remote_check: Option, +} + +#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] +pub struct GitRemoteCheckResponse { + pub ok: bool, + pub message: String, + pub reason: Option, +} + +impl From for GitRemoteCheckResponse { + fn from(value: application::git::dtos::GitRemoteCheckDto) -> Self { + Self { + ok: value.ok, + message: value.message, + reason: value.reason, + } + } +} + +impl From for GitConfigResponse { + fn from(d: GitConfigDto) -> Self { + GitConfigResponse { + id: d.id, + repository_url: d.repository_url, + branch_name: d.branch_name, + auth_type: d.auth_type, + auto_sync: d.auto_sync, + created_at: d.created_at, + updated_at: d.updated_at, + remote_check: None, + } + } +} + +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct CreateGitConfigRequest { + pub repository_url: String, + pub branch_name: Option, + pub auth_type: String, + pub auth_data: serde_json::Value, + pub auto_sync: Option, +} + +impl From for UpsertGitConfigInput { + fn from(r: CreateGitConfigRequest) -> Self { + UpsertGitConfigInput { + repository_url: r.repository_url, + branch_name: r.branch_name, + auth_type: r.auth_type, + auth_data: r.auth_data, + auto_sync: r.auto_sync, + } + } +} + +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct UpdateGitConfigRequest { + pub repository_url: Option, + pub branch_name: Option, + pub auth_type: Option, + pub auth_data: Option, + pub auto_sync: Option, +} + +#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] +pub struct GitPullResolution { + pub path: String, + pub choice: String, + pub content: Option, +} + +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct GitPullRequest { + pub resolutions: Option>, +} + +#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] +pub struct GitPullConflictItem { + pub path: String, + pub is_binary: bool, + pub ours: Option, + pub theirs: Option, + pub base: Option, + pub document_id: Option, +} + +impl From for GitPullConflictItem { + fn from(value: GitPullConflictItemDto) -> Self { + Self { + path: value.path, + is_binary: value.is_binary, + ours: value.ours, + theirs: value.theirs, + base: value.base, + document_id: value.document_id, + } + } +} + +#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] +pub struct GitPullResponse { + pub success: bool, + pub message: String, + pub files_changed: i32, + pub commit_hash: Option, + pub conflicts: Option>, + pub git_status: Option, +} + +#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] +pub struct GitImportResponse { + pub success: bool, + pub message: String, + pub files_changed: i32, + pub commit_hash: Option, + pub docs_created: i32, + pub attachments_created: i32, +} + +#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] +pub struct GitPullSessionResponse { + pub session_id: uuid::Uuid, + pub status: String, + pub conflicts: Vec, + pub resolutions: Vec, + pub message: Option, +} + +impl From for GitPullSessionResponse { + fn from(value: GitPullSessionDto) -> Self { + Self { + session_id: value.id, + status: value.status.as_str().to_string(), + conflicts: value.conflicts.into_iter().map(Into::into).collect(), + resolutions: value + .resolutions + .into_iter() + .map(|r| GitPullResolution { + path: r.path, + choice: r.choice, + content: r.content, + }) + .collect(), + message: value.message, + } + } +} + +#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] +pub struct GitStatus { + pub repository_initialized: bool, + pub has_remote: bool, + pub current_branch: Option, + pub uncommitted_changes: u32, + pub untracked_files: u32, + pub last_sync: Option>, + pub last_sync_status: Option, + pub last_sync_message: Option, + pub last_sync_commit_hash: Option, + pub sync_enabled: bool, +} + +impl From for GitStatus { + fn from(d: GitStatusDto) -> Self { + GitStatus { + repository_initialized: d.repository_initialized, + has_remote: d.has_remote, + current_branch: d.current_branch, + uncommitted_changes: d.uncommitted_changes, + untracked_files: d.untracked_files, + last_sync: d.last_sync, + last_sync_status: d.last_sync_status, + last_sync_message: d.last_sync_message, + last_sync_commit_hash: d.last_sync_commit_hash, + sync_enabled: d.sync_enabled, + } + } +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct GitSyncRequest { + pub message: Option, + pub force: Option, + pub full_scan: Option, + pub skip_push: Option, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct GitSyncResponse { + pub success: bool, + pub message: String, + pub commit_hash: Option, + pub files_changed: u32, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct GitChangeItem { + pub path: String, + pub status: String, +} + +impl From for GitChangeItem { + fn from(value: GitChangeDto) -> Self { + GitChangeItem { + path: value.path, + status: value.status, + } + } +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct GitChangesResponse { + pub files: Vec, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct GitCommitItem { + pub hash: String, + pub message: String, + pub author_name: String, + pub author_email: String, + pub time: chrono::DateTime, +} + +impl From for GitCommitItem { + fn from(value: GitCommitInfo) -> Self { + GitCommitItem { + hash: value.hash, + message: value.message, + author_name: value.author_name, + author_email: value.author_email, + time: value.time, + } + } +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct GitHistoryResponse { + pub commits: Vec, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct AddPatternsRequest { + pub patterns: Vec, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct CheckIgnoredRequest { + pub path: String, +} + +impl From for GitPullResolutionDto { + fn from(value: GitPullResolution) -> Self { + GitPullResolutionDto { + path: value.path, + choice: value.choice, + content: value.content, + } + } +} diff --git a/api/crates/presentation/src/http/identity/api_tokens/handlers/mod.rs b/api/crates/presentation/src/http/identity/api_tokens/handlers/mod.rs new file mode 100644 index 00000000..4e13d9d0 --- /dev/null +++ b/api/crates/presentation/src/http/identity/api_tokens/handlers/mod.rs @@ -0,0 +1,84 @@ +use axum::{ + Json, + extract::{Path, State}, + http::StatusCode, +}; +use uuid::Uuid; + +use crate::context::IdentityContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use application::core::services::errors::ServiceError; + +use super::types::{ApiTokenCreateRequest, ApiTokenCreateResponse, ApiTokenItem}; + +fn map_token_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error(err, "api_token_service_error") +} + +#[utoipa::path( + get, + path = "/api/me/api-tokens", + tag = "Auth", + responses((status = 200, body = [ApiTokenItem])) +)] +pub async fn list_api_tokens( + State(ctx): State, + auth: WorkspaceAuth, +) -> Result>, ApiError> { + let service = ctx.api_token_service(); + let items = service + .list(auth.workspace_id, &auth.permissions) + .await + .map_err(map_token_error)?; + Ok(Json(items.into_iter().map(ApiTokenItem::from).collect())) +} + +#[utoipa::path( + post, + path = "/api/me/api-tokens", + tag = "Auth", + request_body = ApiTokenCreateRequest, + responses((status = 200, body = ApiTokenCreateResponse)) +)] +pub async fn create_api_token( + State(ctx): State, + auth: WorkspaceAuth, + Json(payload): Json, +) -> Result, ApiError> { + let service = ctx.api_token_service(); + let created = service + .create( + auth.workspace_id, + auth.user_id, + &auth.permissions, + payload.name.as_deref(), + ) + .await + .map_err(map_token_error)?; + Ok(Json(ApiTokenCreateResponse::from(created))) +} + +#[utoipa::path( + delete, + path = "/api/me/api-tokens/{id}", + tag = "Auth", + params(("id" = Uuid, Path, description = "Token ID")), + responses((status = 204)) +)] +pub async fn revoke_api_token( + State(ctx): State, + auth: WorkspaceAuth, + Path(id): Path, +) -> Result { + let service = ctx.api_token_service(); + let revoked = service + .revoke(auth.workspace_id, id, &auth.permissions) + .await + .map_err(map_token_error)?; + if revoked { + Ok(StatusCode::NO_CONTENT) + } else { + Err(ApiError::not_found("not_found")) + } +} diff --git a/api/crates/presentation/src/http/identity/api_tokens/mod.rs b/api/crates/presentation/src/http/identity/api_tokens/mod.rs new file mode 100644 index 00000000..18d151b4 --- /dev/null +++ b/api/crates/presentation/src/http/identity/api_tokens/mod.rs @@ -0,0 +1,24 @@ +mod handlers; +pub mod types; + +use axum::Router; +use axum::routing::{delete, get}; + +use crate::context::AppContext; + +pub use handlers::{create_api_token, list_api_tokens, revoke_api_token}; +pub use types::*; + +pub mod openapi { + pub use super::handlers::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + .route( + "/me/api-tokens", + get(list_api_tokens).post(create_api_token), + ) + .route("/me/api-tokens/:id", delete(revoke_api_token)) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/identity/api_tokens/types.rs b/api/crates/presentation/src/http/identity/api_tokens/types.rs new file mode 100644 index 00000000..db8415c2 --- /dev/null +++ b/api/crates/presentation/src/http/identity/api_tokens/types.rs @@ -0,0 +1,51 @@ +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; +use uuid::Uuid; + +use application::identity::dtos::{ApiTokenDto, CreatedApiTokenDto}; + +#[derive(Debug, Serialize, ToSchema)] +pub struct ApiTokenItem { + pub id: Uuid, + pub name: String, + pub created_at: chrono::DateTime, + pub last_used_at: Option>, + pub revoked_at: Option>, +} + +impl From for ApiTokenItem { + fn from(value: ApiTokenDto) -> Self { + Self { + id: value.id, + name: value.name, + created_at: value.created_at, + last_used_at: value.last_used_at, + revoked_at: value.revoked_at, + } + } +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct ApiTokenCreateResponse { + pub id: Uuid, + pub name: String, + pub created_at: chrono::DateTime, + pub token: String, +} + +impl From for ApiTokenCreateResponse { + fn from(value: CreatedApiTokenDto) -> Self { + Self { + id: value.token.id, + name: value.token.name, + created_at: value.token.created_at, + token: value.plaintext, + } + } +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct ApiTokenCreateRequest { + #[schema(example = "Deploy token")] + pub name: Option, +} diff --git a/api/crates/presentation/src/http/identity/auth/cookies.rs b/api/crates/presentation/src/http/identity/auth/cookies.rs new file mode 100644 index 00000000..d8dd695e --- /dev/null +++ b/api/crates/presentation/src/http/identity/auth/cookies.rs @@ -0,0 +1,187 @@ +use application::identity::services::auth::auth_service::AuthServiceFacade; +use application::identity::services::auth::external::ExternalAuthProviderKind; +use application::identity::services::auth::user_sessions::IssuedSessionBundle; +use axum::http::{HeaderMap, HeaderValue, header}; +use chrono::{DateTime, Duration, Utc}; +use rand::{Rng, distributions::Alphanumeric, rngs::OsRng}; + +pub(super) const SESSION_COOKIE_NAME: &str = "access_token"; +const REFRESH_COOKIE_NAME: &str = "refresh_token"; +pub(super) const OAUTH_STATE_COOKIE_NAME: &str = "oauth_state"; +pub(super) const OAUTH_STATE_TTL_SECS: i64 = 300; + +pub(super) fn generate_oauth_state() -> String { + OsRng + .sample_iter(&Alphanumeric) + .take(48) + .map(char::from) + .collect() +} + +pub(super) fn build_oauth_state_cookie( + provider: ExternalAuthProviderKind, + state: &str, + secure: bool, +) -> String { + let issued_at = Utc::now().timestamp(); + let value = format!("{}:{}:{}", provider.as_str(), state, issued_at); + let secure_attr = if secure { "; Secure" } else { "" }; + format!( + "{}={}; HttpOnly{}; Path=/; Max-Age={}; SameSite=Lax", + OAUTH_STATE_COOKIE_NAME, value, secure_attr, OAUTH_STATE_TTL_SECS + ) +} + +pub(super) fn clear_oauth_state_cookie(headers: &mut HeaderMap, secure: bool) { + let secure_attr = if secure { "; Secure" } else { "" }; + append_cookie( + headers, + format!( + "{}=; HttpOnly{}; Path=/; Max-Age=0; SameSite=Lax", + OAUTH_STATE_COOKIE_NAME, secure_attr + ), + ); +} + +pub(super) fn validate_oauth_state_cookie( + headers: &HeaderMap, + provider: ExternalAuthProviderKind, + provided_state: &str, +) -> Result<(), ()> { + let cookie_value = extract_cookie_from_headers(headers, OAUTH_STATE_COOKIE_NAME).ok_or(())?; + let mut segments = cookie_value.splitn(3, ':'); + let provider_raw = segments.next().ok_or(())?; + let stored_state = segments.next().ok_or(())?; + let issued_raw = segments.next().ok_or(())?; + let parsed_provider = ExternalAuthProviderKind::try_from(provider_raw).map_err(|_| ())?; + if parsed_provider != provider || stored_state != provided_state { + return Err(()); + } + let issued_ts: i64 = issued_raw.parse().map_err(|_| ())?; + let issued_at = DateTime::::from_timestamp(issued_ts, 0).ok_or(())?; + if Utc::now() - issued_at > Duration::seconds(OAUTH_STATE_TTL_SECS) { + return Err(()); + } + Ok(()) +} + +pub(super) fn get_cookie(cookie_header: &str, name: &str) -> Option { + for part in cookie_header.split(';') { + let kv = part.trim(); + if let Some((k, v)) = kv.split_once('=') + && k.trim() == name + { + return Some(v.trim().to_string()); + } + } + None +} + +fn extract_cookie_from_headers(headers: &HeaderMap, name: &str) -> Option { + headers + .get(header::COOKIE) + .and_then(|v| v.to_str().ok()) + .and_then(|cookie| get_cookie(cookie, name)) +} + +pub(crate) fn extract_refresh_token(headers: &HeaderMap) -> Option { + extract_cookie_from_headers(headers, REFRESH_COOKIE_NAME) +} + +pub(crate) fn extract_user_agent(headers: &HeaderMap) -> Option<&str> { + headers + .get(header::USER_AGENT) + .and_then(|v| v.to_str().ok()) +} + +pub(crate) fn extract_client_ip(headers: &HeaderMap) -> Option { + if let Some(value) = headers.get("x-forwarded-for").and_then(|v| v.to_str().ok()) + && let Some(first) = value.split(',').next() + && let trimmed = first.trim() + && !trimmed.is_empty() + { + return Some(trimmed.to_string()); + } + headers + .get("x-real-ip") + .or_else(|| headers.get("cf-connecting-ip")) + .and_then(|v| v.to_str().ok()) + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) +} + +pub(crate) fn build_session_cookie(token: &str, max_age_secs: usize, secure: bool) -> String { + let secure_attr = if secure { "; Secure" } else { "" }; + format!( + "{}={}; HttpOnly{}; Path=/; Max-Age={}; SameSite=Lax", + SESSION_COOKIE_NAME, token, secure_attr, max_age_secs + ) +} + +fn build_refresh_cookie(token: &str, max_age_secs: usize, secure: bool) -> String { + let secure_attr = if secure { "; Secure" } else { "" }; + format!( + "{}={}; HttpOnly{}; Path=/; Max-Age={}; SameSite=Lax", + REFRESH_COOKIE_NAME, token, secure_attr, max_age_secs + ) +} + +fn clear_session_cookie(secure: bool) -> String { + let secure_attr = if secure { "; Secure" } else { "" }; + format!( + "{}=; HttpOnly{}; Path=/; Max-Age=0; SameSite=Lax", + SESSION_COOKIE_NAME, secure_attr + ) +} + +fn clear_refresh_cookie(secure: bool) -> String { + let secure_attr = if secure { "; Secure" } else { "" }; + format!( + "{}=; HttpOnly{}; Path=/; Max-Age=0; SameSite=Lax", + REFRESH_COOKIE_NAME, secure_attr + ) +} + +pub(super) fn append_cookie(headers: &mut HeaderMap, value: String) { + if let Ok(header_value) = HeaderValue::from_str(&value) { + headers.append(header::SET_COOKIE, header_value); + } +} + +fn refresh_cookie_max_age(expires_at: DateTime) -> usize { + let now = Utc::now(); + if expires_at <= now { + 0 + } else { + (expires_at - now).num_seconds().max(0) as usize + } +} + +pub(crate) fn apply_session_cookies( + auth_service: &dyn AuthServiceFacade, + session_cookie_secure: bool, + headers: &mut HeaderMap, + issued: &IssuedSessionBundle, +) { + append_cookie( + headers, + build_session_cookie( + &issued.access.token, + auth_service.session_ttl_secs(), + session_cookie_secure, + ), + ); + append_cookie( + headers, + build_refresh_cookie( + &issued.refresh_token, + refresh_cookie_max_age(issued.refresh_expires_at), + session_cookie_secure, + ), + ); +} + +pub(crate) fn clear_auth_cookies(headers: &mut HeaderMap, secure: bool) { + append_cookie(headers, clear_session_cookie(secure)); + append_cookie(headers, clear_refresh_cookie(secure)); +} diff --git a/api/crates/presentation/src/http/identity/auth/handlers/mod.rs b/api/crates/presentation/src/http/identity/auth/handlers/mod.rs new file mode 100644 index 00000000..37acd4cf --- /dev/null +++ b/api/crates/presentation/src/http/identity/auth/handlers/mod.rs @@ -0,0 +1,511 @@ +use application::core::services::errors::ServiceError; +use application::identity::dtos::UserDto; +use application::identity::ports::user_session_repository::UserSessionRecord; +use application::identity::services::auth::external::{ + ExternalAuthPayload, ExternalAuthProviderKind, +}; +use application::identity::services::auth::user_sessions::SessionMetadata; +use application::workspaces::ports::workspace_repository::WorkspaceListItem; +use axum::{ + Json, + extract::{Extension, Path, State}, + http::{HeaderMap, StatusCode}, + response::IntoResponse, +}; +use chrono::Utc; +use tracing::warn; +use uuid::Uuid; + +use crate::context::HasWorkspaceService; +use crate::context::IdentityContext; +use crate::http::error::ApiError; +use crate::http::extractors::AuthedUser; +use crate::http::workspaces::scope as workspace_scope; + +use super::cookies::{ + append_cookie, build_oauth_state_cookie, clear_oauth_state_cookie, extract_client_ip, + extract_refresh_token, extract_user_agent, generate_oauth_state, validate_oauth_state_cookie, +}; +use super::security::{RefreshedSession, map_auth_error}; +use super::{ + AuthProviderInfoResponse, AuthProvidersResponse, LoginRequest, LoginResponse, + OAuthLoginRequest, OAuthStateResponse, RefreshResponse, RegisterRequest, SessionResponse, + UserResponse, apply_session_cookies, clear_auth_cookies, +}; + +#[utoipa::path( + post, + path = "/api/auth/oauth/{provider}/state", + tag = "Auth", + params(("provider" = String, Path, description = "OAuth provider identifier")), + security(()), + responses((status = 200, body = OAuthStateResponse)) +)] +pub async fn oauth_state( + Path(provider): Path, + State(ctx): State, +) -> Result<(HeaderMap, Json), ApiError> { + let provider_kind = ExternalAuthProviderKind::try_from(provider.as_str()) + .map_err(|_| ApiError::not_found("oauth_provider_not_found"))?; + if ctx.external_auth().get(provider_kind).is_none() { + return Err(ApiError::new( + StatusCode::NOT_IMPLEMENTED, + "oauth_provider_not_implemented", + )); + } + let state = generate_oauth_state(); + let mut headers = HeaderMap::new(); + append_cookie( + &mut headers, + build_oauth_state_cookie(provider_kind, &state, ctx.cfg.session_cookie_secure), + ); + Ok((headers, Json(OAuthStateResponse { state }))) +} + +#[utoipa::path( + post, + path = "/api/auth/oauth/{provider}", + tag = "Auth", + params(("provider" = String, Path, description = "OAuth provider identifier (e.g., google)")), + request_body = OAuthLoginRequest, + security(()), + responses((status = 200, body = LoginResponse)) +)] +pub async fn oauth_login( + Path(provider): Path, + State(ctx): State, + headers: HeaderMap, + Json(req): Json, +) -> Result<(HeaderMap, Json), ApiError> { + let provider_kind = ExternalAuthProviderKind::try_from(provider.as_str()) + .map_err(|_| ApiError::not_found("oauth_provider_not_found"))?; + let registry = ctx.external_auth(); + let verifier = registry.get(provider_kind).ok_or(ApiError::new( + StatusCode::NOT_IMPLEMENTED, + "oauth_provider_not_implemented", + ))?; + let mut response_headers = HeaderMap::new(); + if provider_kind.requires_state() { + let provided_state = req + .state + .as_deref() + .ok_or(ApiError::bad_request("missing_state"))?; + validate_oauth_state_cookie(&headers, provider_kind, provided_state) + .map_err(|_| ApiError::unauthorized("invalid_oauth_state"))?; + clear_oauth_state_cookie(&mut response_headers, ctx.cfg.session_cookie_secure); + } + let payload = ExternalAuthPayload { + credential: req.credential.clone(), + code: req.code.clone(), + redirect_uri: req.redirect_uri.clone(), + }; + let identity = verifier.verify(&payload).await.map_err(map_auth_error)?; + let account_service = ctx.account_service(); + let user_dto = account_service + .sign_in_with_external(identity) + .await + .map_err(map_account_error)?; + let user = build_user_response(&ctx, user_dto, None).await?; + let active_workspace_id = user + .active_workspace_id + .or_else(|| user.workspaces.iter().find(|w| w.is_default).map(|w| w.id)) + .ok_or(ApiError::new( + StatusCode::INTERNAL_SERVER_ERROR, + "workspace_missing", + ))?; + let client_ip = extract_client_ip(&headers); + let user_agent = extract_user_agent(&headers); + let issued = ctx + .session_service() + .issue_new_session( + user.id, + active_workspace_id, + req.remember_me, + SessionMetadata { + user_agent, + ip_address: client_ip.as_deref(), + }, + ) + .await + .map_err(map_auth_error)?; + apply_session_cookies( + ctx.auth_service().as_ref(), + ctx.cfg.session_cookie_secure, + &mut response_headers, + &issued, + ); + Ok(( + response_headers, + Json(LoginResponse { + access_token: issued.access.token, + user, + }), + )) +} + +#[utoipa::path( + get, + path = "/api/auth/providers", + tag = "Auth", + security(()), + responses((status = 200, body = AuthProvidersResponse)) +)] +pub async fn list_oauth_providers( + State(ctx): State, +) -> Result, ApiError> { + let providers = ctx + .external_auth() + .list_descriptors() + .into_iter() + .map(|descriptor| AuthProviderInfoResponse { + id: descriptor.kind.as_str().to_string(), + requires_state: descriptor.requires_state, + client_ids: descriptor.client_ids, + redirect_uri: descriptor.redirect_uri, + name: descriptor.display_name, + authorization_url: descriptor.authorization_url, + scopes: descriptor.scopes, + }) + .collect(); + Ok(Json(AuthProvidersResponse { providers })) +} + +fn map_account_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error(err, "account_service_error") +} + +fn map_workspace_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error(err, "workspace_service_error") +} + +fn workspace_response_from(item: WorkspaceListItem) -> super::WorkspaceMembershipResponse { + super::WorkspaceMembershipResponse { + id: item.id, + name: item.name, + slug: item.slug, + icon: item.icon, + description: item.description, + is_personal: item.is_personal, + role_kind: item.role_kind.as_str().to_string(), + system_role: item.system_role.map(|role| role.as_str().to_string()), + custom_role_id: item.custom_role_id, + is_default: item.is_default, + } +} + +fn session_response_from( + record: UserSessionRecord, + current_session_id: Option, +) -> SessionResponse { + SessionResponse { + id: record.id, + workspace_id: record.workspace_id, + user_agent: record.user_agent, + ip_address: record.ip_address, + remember_me: record.remember_me, + created_at: record.created_at, + last_seen_at: record.last_seen_at, + expires_at: record.expires_at, + current: current_session_id.is_some_and(|id| id == record.id), + } +} + +async fn build_user_response( + ctx: &impl HasWorkspaceService, + user: UserDto, + preferred_workspace_id: Option, +) -> Result { + let workspaces = ctx + .workspace_service() + .list_for_user(user.id) + .await + .map_err(map_workspace_error)? + .into_iter() + .map(workspace_response_from) + .collect::>(); + let mut active_workspace_id = + preferred_workspace_id.and_then(|id| workspaces.iter().find(|w| w.id == id).map(|w| w.id)); + if active_workspace_id.is_none() { + active_workspace_id = workspaces.iter().find(|w| w.is_default).map(|w| w.id); + } + if active_workspace_id.is_none() { + active_workspace_id = workspaces.first().map(|w| w.id); + } + let active_workspace = + active_workspace_id.and_then(|id| workspaces.iter().find(|w| w.id == id).cloned()); + let mut active_workspace_permissions = Vec::new(); + if let Some(active_ws_id) = active_workspace_id { + match ctx + .workspace_service() + .resolve_permission_set(active_ws_id, user.id) + .await + { + Ok(Some(set)) => active_workspace_permissions = set.to_vec(), + Ok(None) => {} + Err(err) => { + let mapped = map_workspace_error(err); + if mapped.status() != StatusCode::FORBIDDEN { + return Err(mapped); + } + } + } + } + Ok(UserResponse { + id: user.id, + email: user.email, + name: user.name, + workspaces, + active_workspace_id, + active_workspace, + active_workspace_permissions, + }) +} + +#[utoipa::path( + post, + path = "/api/auth/register", + tag = "Auth", + request_body = RegisterRequest, + security(()), + responses((status = 200, body = UserResponse)) +)] +pub async fn register( + State(ctx): State, + Json(req): Json, +) -> Result, ApiError> { + let service = ctx.account_service(); + let user = service + .register(&req.email, &req.name, &req.password) + .await + .map_err(map_account_error)?; + let response = build_user_response(&ctx, user, None).await?; + Ok(Json(response)) +} + +#[utoipa::path( + post, + path = "/api/auth/login", + tag = "Auth", + request_body = LoginRequest, + security(()), + responses((status = 200, body = LoginResponse)) +)] +pub async fn login( + State(ctx): State, + headers: HeaderMap, + Json(req): Json, +) -> Result<(HeaderMap, Json), ApiError> { + let service = ctx.account_service(); + let user = service + .login(&req.email, &req.password) + .await + .map_err(map_account_error)? + .ok_or(ApiError::unauthorized("invalid_credentials"))?; + let user = build_user_response(&ctx, user, None).await?; + let active_workspace_id = user + .active_workspace_id + .or_else(|| user.workspaces.iter().find(|w| w.is_default).map(|w| w.id)) + .ok_or(ApiError::new( + StatusCode::INTERNAL_SERVER_ERROR, + "workspace_missing", + ))?; + let client_ip = extract_client_ip(&headers); + let user_agent = extract_user_agent(&headers); + let issued = ctx + .session_service() + .issue_new_session( + user.id, + active_workspace_id, + req.remember_me, + SessionMetadata { + user_agent, + ip_address: client_ip.as_deref(), + }, + ) + .await + .map_err(map_auth_error)?; + + let mut response_headers = HeaderMap::new(); + apply_session_cookies( + ctx.auth_service().as_ref(), + ctx.cfg.session_cookie_secure, + &mut response_headers, + &issued, + ); + + Ok(( + response_headers, + Json(LoginResponse { + access_token: issued.access.token, + user, + }), + )) +} + +#[utoipa::path( + post, + path = "/api/auth/refresh", + tag = "Auth", + responses((status = 200, body = RefreshResponse)) +)] +pub async fn refresh_session( + State(ctx): State, + refreshed: Option>, +) -> Result { + if let Some(Extension(bundle)) = refreshed { + return Ok(Json(RefreshResponse { + access_token: bundle.0.access.token.clone(), + }) + .into_response()); + } + + let mut response_headers = HeaderMap::new(); + clear_auth_cookies(&mut response_headers, ctx.cfg.session_cookie_secure); + Ok((response_headers, StatusCode::UNAUTHORIZED).into_response()) +} + +#[utoipa::path(get, path = "/api/auth/me", tag = "Auth", responses((status = 200, body = UserResponse)))] +pub async fn me( + State(ctx): State, + auth: AuthedUser, + headers: HeaderMap, +) -> Result, crate::http::error::ApiError> { + let active_workspace_id = match workspace_scope::resolve_active_workspace_id( + &ctx, + &headers, + Some(auth.bearer_token.as_str()), + auth.user_id, + ) + .await + { + Ok(id) => Some(id), + Err(err) if err.status() == StatusCode::FORBIDDEN => None, + Err(err) => return Err(err), + }; + + let service = ctx.account_service(); + let row = service + .get_me(auth.user_id) + .await + .map_err(map_account_error)? + .ok_or(crate::http::error::ApiError::unauthorized("unauthorized"))?; + let resp = build_user_response(&ctx, row, active_workspace_id).await?; + Ok(Json(resp)) +} + +#[utoipa::path(delete, path = "/api/auth/me", tag = "Auth", responses((status = 204)))] +pub async fn delete_account( + State(ctx): State, + auth: AuthedUser, +) -> Result<(HeaderMap, StatusCode), crate::http::error::ApiError> { + let service = ctx.account_service(); + service + .delete_account(auth.user_id) + .await + .map_err(map_account_error)?; + ctx.session_service() + .revoke_all_for_user(auth.user_id) + .await + .map_err(map_auth_error)?; + + let mut headers = HeaderMap::new(); + clear_auth_cookies(&mut headers, ctx.cfg.session_cookie_secure); + + Ok((headers, StatusCode::NO_CONTENT)) +} + +#[utoipa::path(post, path = "/api/auth/logout", tag = "Auth", responses((status = 204)))] +pub async fn logout( + State(ctx): State, + headers: HeaderMap, +) -> Result<(HeaderMap, StatusCode), ApiError> { + if let Some(refresh_token) = extract_refresh_token(&headers) + && let Err(err) = ctx.session_service().revoke_by_token(&refresh_token).await + { + warn!(error = ?err, "logout_revoke_session_failed"); + } + let mut response_headers = HeaderMap::new(); + clear_auth_cookies(&mut response_headers, ctx.cfg.session_cookie_secure); + clear_oauth_state_cookie(&mut response_headers, ctx.cfg.session_cookie_secure); + Ok((response_headers, StatusCode::NO_CONTENT)) +} + +#[utoipa::path(get, path = "/api/auth/sessions", tag = "Auth", responses((status = 200, body = [SessionResponse])))] +pub async fn list_sessions( + State(ctx): State, + auth: AuthedUser, + headers: HeaderMap, +) -> Result>, ApiError> { + let current_session_id = if let Some(refresh_token) = extract_refresh_token(&headers) { + match ctx + .session_service() + .find_session_by_token(&refresh_token) + .await + { + Ok(Some(session)) => Some(session.id), + Ok(None) => None, + Err(err) => { + warn!(error = ?err, "resolve_current_session_failed"); + None + } + } + } else { + None + }; + let sessions = ctx + .session_service() + .list_for_user(auth.user_id) + .await + .map_err(map_auth_error)?; + let now = Utc::now(); + let payload = sessions + .into_iter() + .filter(|session| session.revoked_at.is_none() && session.expires_at > now) + .map(|session| session_response_from(session, current_session_id)) + .collect(); + Ok(Json(payload)) +} + +#[utoipa::path( + delete, + path = "/api/auth/sessions/{id}", + tag = "Auth", + params(("id" = Uuid, Path, description = "Session ID")), + responses((status = 204)) +)] +pub async fn revoke_session( + State(ctx): State, + auth: AuthedUser, + headers: HeaderMap, + Path(session_id): Path, +) -> Result<(HeaderMap, StatusCode), ApiError> { + let current_session_id = if let Some(refresh_token) = extract_refresh_token(&headers) { + match ctx + .session_service() + .find_session_by_token(&refresh_token) + .await + { + Ok(Some(session)) => Some(session.id), + Ok(None) => None, + Err(err) => { + warn!(error = ?err, "resolve_current_session_failed"); + None + } + } + } else { + None + }; + ctx.session_service() + .revoke_session(auth.user_id, session_id) + .await + .map_err(|err| match err { + ServiceError::Forbidden => ApiError::forbidden("forbidden"), + ServiceError::NotFound => ApiError::not_found("not_found"), + other => map_auth_error(other), + })?; + let mut response_headers = HeaderMap::new(); + if current_session_id == Some(session_id) { + clear_auth_cookies(&mut response_headers, ctx.cfg.session_cookie_secure); + } + Ok((response_headers, StatusCode::NO_CONTENT)) +} diff --git a/api/crates/presentation/src/http/identity/auth/middleware.rs b/api/crates/presentation/src/http/identity/auth/middleware.rs new file mode 100644 index 00000000..69084d3d --- /dev/null +++ b/api/crates/presentation/src/http/identity/auth/middleware.rs @@ -0,0 +1 @@ +pub use crate::security::request_status::{mark_token_expired, middleware}; diff --git a/api/crates/presentation/src/http/identity/auth/mod.rs b/api/crates/presentation/src/http/identity/auth/mod.rs new file mode 100644 index 00000000..c0db010a --- /dev/null +++ b/api/crates/presentation/src/http/identity/auth/mod.rs @@ -0,0 +1,41 @@ +pub mod middleware; +mod types; + +pub use types::*; + +mod cookies; +mod handlers; +mod routes; +mod security; + +pub mod request_status { + pub use super::middleware::{mark_token_expired, middleware}; +} + +pub use crate::security::token::{AccessTokenOverride, Bearer}; + +pub use handlers::{ + delete_account, list_oauth_providers, list_sessions, login, logout, me, oauth_login, + oauth_state, refresh_session, register, revoke_session, +}; + +// `utoipa::OpenApi(paths(...))` needs the generated `__path_*` items to be visible from the module +// path referenced in `paths(...)`. Keep those under `auth::openapi` so we don't leak `__path_*` +// from the main `auth` module API. +pub mod openapi { + pub use super::handlers::*; +} +pub use routes::routes; +pub use security::{ + refresh_middleware, resolve_actor_from_parts, resolve_actor_from_token_str, + validate_bearer_public, validate_bearer_str, +}; + +pub(crate) use cookies::{ + apply_session_cookies, clear_auth_cookies, extract_client_ip, extract_refresh_token, + extract_user_agent, +}; +pub(crate) use security::map_auth_error; + +#[cfg(test)] +mod tests; diff --git a/api/crates/presentation/src/http/identity/auth/routes.rs b/api/crates/presentation/src/http/identity/auth/routes.rs new file mode 100644 index 00000000..e2339cfa --- /dev/null +++ b/api/crates/presentation/src/http/identity/auth/routes.rs @@ -0,0 +1,26 @@ +use axum::{ + Router, + routing::{delete, get, post}, +}; + +use crate::context::AppContext; + +use super::handlers::{ + delete_account, list_oauth_providers, list_sessions, login, logout, me, oauth_login, + oauth_state, refresh_session, register, revoke_session, +}; + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + .route("/register", post(register)) + .route("/login", post(login)) + .route("/oauth/:provider/state", post(oauth_state)) + .route("/oauth/:provider", post(oauth_login)) + .route("/providers", get(list_oauth_providers)) + .route("/logout", post(logout)) + .route("/refresh", post(refresh_session)) + .route("/sessions", get(list_sessions)) + .route("/sessions/:id", delete(revoke_session)) + .route("/me", get(me).delete(delete_account)) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/identity/auth/security.rs b/api/crates/presentation/src/http/identity/auth/security.rs new file mode 100644 index 00000000..2f6e5ec7 --- /dev/null +++ b/api/crates/presentation/src/http/identity/auth/security.rs @@ -0,0 +1,203 @@ +use std::sync::Arc; + +use application::core::services::access; +use application::core::services::errors::ServiceError; +use application::identity::services::auth::user_sessions::{IssuedSessionBundle, SessionMetadata}; +use axum::{ + body::Body, + extract::State, + http::{HeaderMap, HeaderValue, Request, StatusCode, header}, + middleware::Next, + response::IntoResponse, +}; + +use crate::context::{HasAuthServices, IdentityContext}; +use crate::http::error::ApiError; +use crate::security::token::{AccessTokenOverride, Bearer}; + +use super::cookies::{ + SESSION_COOKIE_NAME, apply_session_cookies, clear_auth_cookies, extract_client_ip, + extract_refresh_token, extract_user_agent, get_cookie, +}; +use super::request_status; + +#[derive(Clone)] +pub struct RefreshedSession(pub Arc); + +fn unauthorized_token_expired(ctx: &IdentityContext) -> axum::response::Response { + let mut headers = HeaderMap::new(); + clear_auth_cookies(&mut headers, ctx.cfg.session_cookie_secure); + let _ = headers.insert( + header::WWW_AUTHENTICATE, + HeaderValue::from_static("Bearer error=\"token_expired\""), + ); + (headers, StatusCode::UNAUTHORIZED).into_response() +} + +fn extract_bearer_token(headers: &HeaderMap) -> Option { + if let Some(cookie) = headers + .get(axum::http::header::COOKIE) + .and_then(|v| v.to_str().ok()) + && let Some(token) = get_cookie(cookie, SESSION_COOKIE_NAME) + && !token.trim().is_empty() + { + return Some(token); + } + + if let Some(auth) = headers + .get(axum::http::header::AUTHORIZATION) + .and_then(|v| v.to_str().ok()) + && let Some(t) = auth.strip_prefix("Bearer ") + && let trimmed = t.trim() + && !trimmed.is_empty() + { + return Some(trimmed.to_string()); + } + None +} + +fn should_skip_refresh(path: &str) -> bool { + path.starts_with("/api/public") || path.starts_with("/api/health") || path == "/metrics" +} + +pub(crate) async fn validate_bearer( + ctx: &impl HasAuthServices, + bearer: Bearer, +) -> Result { + validate_bearer_str(ctx, &bearer.0).await +} + +pub async fn validate_bearer_public( + ctx: &impl HasAuthServices, + bearer: Bearer, +) -> Result { + validate_bearer(ctx, bearer).await +} + +pub async fn validate_bearer_str( + ctx: &impl HasAuthServices, + token: &str, +) -> Result { + let service = ctx.auth_service(); + let session_service = ctx.session_service(); + let subject = match service.subject_from_token(token).await { + Ok(Some(sub)) => sub, + Ok(None) => return Err(ApiError::unauthorized("unauthorized")), + Err(ServiceError::TokenExpired) => { + request_status::mark_token_expired(); + return Err(ApiError::unauthorized("token_expired")); + } + Err(err) => return Err(map_auth_error(err)), + }; + if let Some(session_id) = service.session_id_from_token_claim(token) { + session_service + .ensure_session_active(session_id) + .await + .map_err(map_auth_error)?; + } + Ok(subject) +} + +pub async fn resolve_actor_from_parts( + ctx: &impl HasAuthServices, + bearer: Option, + share_token: Option<&str>, +) -> Option { + crate::security::token::resolve_actor_from_parts(ctx, bearer, share_token) + .await + .ok() + .flatten() +} + +pub async fn refresh_middleware( + State(ctx): State, + mut req: Request, + next: Next, +) -> axum::response::Response { + let path = req.uri().path().to_owned(); + if should_skip_refresh(&path) { + return next.run(req).await; + } + + let mut refreshed: Option> = None; + let force_refresh = path == "/api/auth/refresh"; + let access_token = extract_bearer_token(req.headers()); + let refresh_token = extract_refresh_token(req.headers()); + + if force_refresh || access_token.is_some() || refresh_token.is_some() { + let auth = ctx.auth_service(); + let session_service = ctx.session_service(); + + let token_expired_or_missing = if force_refresh { + true + } else if let Some(access_token) = access_token.as_deref() { + matches!( + auth.subject_from_token(access_token).await, + Err(ServiceError::TokenExpired) + ) + } else { + refresh_token.is_some() + }; + + if token_expired_or_missing { + if let Some(refresh_token) = refresh_token { + let client_ip = extract_client_ip(req.headers()); + let meta = SessionMetadata { + user_agent: extract_user_agent(req.headers()), + ip_address: client_ip.as_deref(), + }; + match session_service + .refresh_session(&refresh_token, None, meta) + .await + { + Ok(bundle) => { + let shared = Arc::new(bundle); + req.extensions_mut() + .insert(AccessTokenOverride(shared.access.token.clone())); + req.extensions_mut() + .insert(RefreshedSession(shared.clone())); + refreshed = Some(shared); + } + Err(ServiceError::Unauthorized) => return unauthorized_token_expired(&ctx), + Err(err) => return map_auth_error(err).into_response(), + } + } else { + return unauthorized_token_expired(&ctx); + } + } + } + + let mut response = next.run(req).await; + if let Some(bundle) = refreshed { + apply_session_cookies( + ctx.auth_service().as_ref(), + ctx.cfg.session_cookie_secure, + response.headers_mut(), + bundle.as_ref(), + ); + } + response +} + +pub async fn resolve_actor_from_token_str( + ctx: &impl HasAuthServices, + token: &str, +) -> Option { + crate::security::token::resolve_actor_from_token_str(ctx, token) + .await + .ok() +} + +pub(crate) fn map_auth_error(err: ServiceError) -> ApiError { + match err { + ServiceError::Unauthorized => ApiError::unauthorized("unauthorized"), + ServiceError::TokenExpired => ApiError::unauthorized("token_expired"), + ServiceError::Forbidden => ApiError::forbidden("forbidden"), + ServiceError::NotFound => ApiError::not_found("not_found"), + ServiceError::Conflict => ApiError::conflict("conflict"), + ServiceError::BadRequest(code) => ApiError::bad_request(code).with_message(code), + ServiceError::Unexpected(_) => { + ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error") + } + } +} diff --git a/api/crates/presentation/src/http/identity/auth/tests.rs b/api/crates/presentation/src/http/identity/auth/tests.rs new file mode 100644 index 00000000..323ce76d --- /dev/null +++ b/api/crates/presentation/src/http/identity/auth/tests.rs @@ -0,0 +1,39 @@ +use axum::http::{HeaderMap, HeaderValue, header}; +use chrono::Utc; + +use super::cookies::{OAUTH_STATE_COOKIE_NAME, OAUTH_STATE_TTL_SECS, validate_oauth_state_cookie}; +use application::identity::services::auth::external::ExternalAuthProviderKind; + +fn cookie_headers(provider: ExternalAuthProviderKind, state: &str, issued_at: i64) -> HeaderMap { + let mut headers = HeaderMap::new(); + let raw_value = format!( + "{}={}:{}:{}", + OAUTH_STATE_COOKIE_NAME, + provider.as_str(), + state, + issued_at + ); + headers.insert( + header::COOKIE, + HeaderValue::from_str(&raw_value).expect("header"), + ); + headers +} + +#[test] +fn oauth_state_cookie_roundtrip() { + let provider = ExternalAuthProviderKind::Github; + let state = "state-token"; + let issued = Utc::now().timestamp(); + let headers = cookie_headers(provider, state, issued); + assert!(validate_oauth_state_cookie(&headers, provider, state).is_ok()); +} + +#[test] +fn oauth_state_cookie_rejects_expired() { + let provider = ExternalAuthProviderKind::Github; + let state = "expired"; + let issued = Utc::now().timestamp() - (OAUTH_STATE_TTL_SECS + 10); + let headers = cookie_headers(provider, state, issued); + assert!(validate_oauth_state_cookie(&headers, provider, state).is_err()); +} diff --git a/api/crates/presentation/src/http/identity/auth/types.rs b/api/crates/presentation/src/http/identity/auth/types.rs new file mode 100644 index 00000000..36a8b6c0 --- /dev/null +++ b/api/crates/presentation/src/http/identity/auth/types.rs @@ -0,0 +1,115 @@ +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; +use uuid::Uuid; + +#[derive(Debug, Deserialize, ToSchema)] +pub struct RegisterRequest { + pub email: String, + pub name: String, + pub password: String, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct UserResponse { + pub id: Uuid, + pub email: String, + pub name: String, + pub workspaces: Vec, + pub active_workspace_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub active_workspace: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub active_workspace_permissions: Vec, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct SessionResponse { + pub id: Uuid, + pub workspace_id: Uuid, + #[serde(skip_serializing_if = "Option::is_none")] + pub user_agent: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub ip_address: Option, + pub remember_me: bool, + pub created_at: DateTime, + pub last_seen_at: DateTime, + pub expires_at: DateTime, + pub current: bool, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct RefreshResponse { + pub access_token: String, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct AuthProviderInfoResponse { + pub id: String, + pub requires_state: bool, + pub client_ids: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub redirect_uri: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub authorization_url: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub scopes: Vec, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct AuthProvidersResponse { + pub providers: Vec, +} + +#[derive(Debug, Serialize, ToSchema, Clone)] +pub struct WorkspaceMembershipResponse { + pub id: Uuid, + pub name: String, + pub slug: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub icon: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + pub is_personal: bool, + pub role_kind: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub system_role: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub custom_role_id: Option, + pub is_default: bool, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct LoginRequest { + pub email: String, + pub password: String, + #[serde(default)] + pub remember_me: bool, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct LoginResponse { + pub access_token: String, + pub user: UserResponse, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct OAuthLoginRequest { + #[serde(skip_serializing_if = "Option::is_none")] + pub credential: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub code: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub redirect_uri: Option, + #[serde(default)] + pub remember_me: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct OAuthStateResponse { + pub state: String, +} diff --git a/api/crates/presentation/src/http/identity/mod.rs b/api/crates/presentation/src/http/identity/mod.rs new file mode 100644 index 00000000..f221d7e6 --- /dev/null +++ b/api/crates/presentation/src/http/identity/mod.rs @@ -0,0 +1,3 @@ +pub mod api_tokens; +pub mod auth; +pub mod shortcuts; diff --git a/api/crates/presentation/src/http/identity/shortcuts/handlers/mod.rs b/api/crates/presentation/src/http/identity/shortcuts/handlers/mod.rs new file mode 100644 index 00000000..8571e6f1 --- /dev/null +++ b/api/crates/presentation/src/http/identity/shortcuts/handlers/mod.rs @@ -0,0 +1,59 @@ +use axum::{Json, extract::State}; + +use crate::context::IdentityContext; +use crate::http::error::ApiError; +use crate::http::extractors::WorkspaceAuth; +use application::core::services::errors::ServiceError; + +use super::types::{UpdateUserShortcutRequest, UserShortcutResponse}; + +fn map_shortcut_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error(err, "user_shortcut_service_error") +} + +#[utoipa::path( + get, + path = "/api/me/shortcuts", + tag = "Auth", + responses((status = 200, body = UserShortcutResponse)) +)] +pub async fn get_user_shortcuts( + State(ctx): State, + auth: WorkspaceAuth, +) -> Result, ApiError> { + let service = ctx.user_shortcut_service(); + let profile = service + .get_profile(auth.workspace_id, auth.user_id, &auth.permissions) + .await + .map_err(map_shortcut_error)?; + let response = profile + .map(UserShortcutResponse::from) + .unwrap_or_else(UserShortcutResponse::empty); + Ok(Json(response)) +} + +#[utoipa::path( + put, + path = "/api/me/shortcuts", + tag = "Auth", + request_body = UpdateUserShortcutRequest, + responses((status = 200, body = UserShortcutResponse)) +)] +pub async fn update_user_shortcuts( + State(ctx): State, + auth: WorkspaceAuth, + Json(payload): Json, +) -> Result, ApiError> { + let service = ctx.user_shortcut_service(); + let result = service + .update_profile( + auth.workspace_id, + auth.user_id, + &auth.permissions, + payload.bindings, + payload.leader_key, + ) + .await + .map_err(map_shortcut_error)?; + Ok(Json(UserShortcutResponse::from(result))) +} diff --git a/api/crates/presentation/src/http/identity/shortcuts/mod.rs b/api/crates/presentation/src/http/identity/shortcuts/mod.rs new file mode 100644 index 00000000..415a6459 --- /dev/null +++ b/api/crates/presentation/src/http/identity/shortcuts/mod.rs @@ -0,0 +1,22 @@ +mod handlers; +pub mod types; + +use axum::{Router, routing::get}; + +use crate::context::AppContext; + +pub use handlers::{get_user_shortcuts, update_user_shortcuts}; +pub use types::*; + +pub mod openapi { + pub use super::handlers::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + .route( + "/me/shortcuts", + get(get_user_shortcuts).put(update_user_shortcuts), + ) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/identity/shortcuts/types.rs b/api/crates/presentation/src/http/identity/shortcuts/types.rs new file mode 100644 index 00000000..e3afd663 --- /dev/null +++ b/api/crates/presentation/src/http/identity/shortcuts/types.rs @@ -0,0 +1,44 @@ +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use serde_json::{Map, Value}; +use utoipa::ToSchema; + +use application::identity::dtos::UserShortcutProfileDto; + +#[derive(Debug, Serialize, ToSchema)] +pub struct UserShortcutResponse { + #[schema(value_type = Object)] + pub bindings: Value, + #[schema(example = "")] + pub leader_key: Option, + pub updated_at: Option>, +} + +impl UserShortcutResponse { + pub fn empty() -> Self { + Self { + bindings: Value::Object(Map::new()), + leader_key: None, + updated_at: None, + } + } +} + +impl From for UserShortcutResponse { + fn from(value: UserShortcutProfileDto) -> Self { + Self { + bindings: value.bindings, + leader_key: value.leader_key, + updated_at: Some(value.updated_at), + } + } +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct UpdateUserShortcutRequest { + #[schema(value_type = Object)] + #[serde(default = "Value::default")] + pub bindings: Value, + #[schema(example = "")] + pub leader_key: Option, +} diff --git a/api/crates/presentation/src/http/mod.rs b/api/crates/presentation/src/http/mod.rs new file mode 100644 index 00000000..3822990b --- /dev/null +++ b/api/crates/presentation/src/http/mod.rs @@ -0,0 +1,8 @@ +pub mod core; +pub mod documents; +pub mod error; +pub mod extractors; +pub mod git; +pub mod identity; +pub mod plugins; +pub mod workspaces; diff --git a/api/crates/presentation/src/http/plugins/assets.rs b/api/crates/presentation/src/http/plugins/assets.rs new file mode 100644 index 00000000..73974b57 --- /dev/null +++ b/api/crates/presentation/src/http/plugins/assets.rs @@ -0,0 +1,105 @@ +use axum::{ + extract::{Query, State}, + http::{HeaderMap, HeaderValue, header}, + response::{IntoResponse, Response}, +}; +use uuid::Uuid; + +use crate::context::PluginsContext; +use crate::http::error::ApiError; +use application::plugins::services::management::{AssetRequestScope, PluginAssetRequest}; + +use super::util::map_plugin_service_error; + +#[utoipa::path( + get, + path = "/api/plugin-assets", + params(("token" = Option, Query, description = "Share token (optional)")), + responses((status = 200, description = "Plugin asset")), + tag = "Plugins", + operation_id = "pluginsGetAsset" +)] +pub async fn get_plugin_asset( + State(ctx): State, + Query(params): Query>, +) -> Result { + let scope_raw = params + .get("scope") + .map(|s| s.as_str()) + .ok_or(ApiError::bad_request("missing_scope"))?; + let plugin_id = params + .get("plugin") + .map(|s| s.as_str()) + .ok_or(ApiError::bad_request("missing_plugin"))?; + let version = params + .get("version") + .map(|s| s.as_str()) + .ok_or(ApiError::bad_request("missing_version"))?; + let path = params + .get("path") + .map(|s| s.as_str()) + .ok_or(ApiError::bad_request("missing_path"))?; + let exp = params + .get("exp") + .map(|s| s.as_str()) + .ok_or(ApiError::bad_request("missing_exp"))?; + let expires_at = exp + .parse::() + .map_err(|_| ApiError::bad_request("invalid_exp"))?; + let sig = params + .get("sig") + .map(|s| s.as_str()) + .ok_or(ApiError::bad_request("missing_sig"))?; + let share_owned = params + .get("share") + .map(|s| s.trim()) + .filter(|s| !s.is_empty()) + .map(|s| s.to_string()); + + let scope = match scope_raw { + "global" => AssetRequestScope::Global, + "user" => { + let owner_str = params + .get("owner") + .map(|s| s.as_str()) + .ok_or(ApiError::bad_request("missing_owner"))?; + let owner_id = + Uuid::parse_str(owner_str).map_err(|_| ApiError::bad_request("invalid_owner"))?; + AssetRequestScope::User { + owner_id, + share_token: share_owned.as_deref(), + } + } + _ => return Err(ApiError::bad_request("invalid_scope")), + }; + + let payload = ctx + .plugin_management() + .fetch_asset(PluginAssetRequest { + scope, + plugin_id, + version, + path, + expires_at, + signature: sig, + }) + .await + .map_err(map_plugin_service_error)?; + + let mut headers = HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + HeaderValue::from_str(&payload.content_type) + .unwrap_or_else(|_| HeaderValue::from_static("application/octet-stream")), + ); + headers.insert( + header::CACHE_CONTROL, + HeaderValue::from_static("public, max-age=60"), + ); + headers.insert( + header::HeaderName::from_static("x-content-type-options"), + HeaderValue::from_static("nosniff"), + ); + + Ok((headers, payload.bytes).into_response()) +} diff --git a/api/crates/presentation/src/http/plugins/exec.rs b/api/crates/presentation/src/http/plugins/exec.rs new file mode 100644 index 00000000..66baf252 --- /dev/null +++ b/api/crates/presentation/src/http/plugins/exec.rs @@ -0,0 +1,103 @@ +use axum::{ + Json, + extract::{Path, State}, + http::HeaderMap, +}; +use serde_json::json; + +use crate::context::PluginsContext; +use crate::http::error::ApiError; +use crate::http::identity::auth::Bearer; +use application::core::services::access; +use domain::access::permissions::PERM_PLUGIN_RUN; +use domain::documents::doc_type::DocumentType; + +use super::types::{ExecBody, ExecResultResponse, ensure_valid_plugin_id, extract_doc_id}; +use super::util::{map_plugin_service_error, resolve_plugin_user_context}; + +#[utoipa::path( + post, + path = "/api/plugins/{plugin}/exec/{action}", + request_body = ExecBody, + params( + ("plugin" = String, Path, description = "Plugin ID"), + ("action" = String, Path, description = "Action") + ), + responses((status = 200, body = ExecResultResponse)), + tag = "Plugins", + operation_id = "pluginsExecAction" +)] +pub async fn exec_action( + State(ctx): State, + bearer: Bearer, + headers: HeaderMap, + Path((plugin, action)): Path<(String, String)>, + Json(body): Json, +) -> Result, ApiError> { + ensure_valid_plugin_id(&plugin)?; + let bearer_token = bearer.0; + let plugin_ctx = + resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), Some(PERM_PLUGIN_RUN)) + .await?; + let actor = plugin_ctx.actor.clone(); + let doc_id_from_payload = body.payload.as_ref().and_then(extract_doc_id); + let doc_id_from_share = if doc_id_from_payload.is_none() { + if let access::Actor::ShareToken(token) = &actor { + ctx.share_service() + .resolve_share_context(token) + .await + .map_err(map_plugin_service_error)? + .and_then(|ctx| { + if ctx.shared_type == DocumentType::Document { + Some(ctx.shared_id) + } else { + None + } + }) + } else { + None + } + } else { + None + }; + let effective_doc_id = doc_id_from_payload.or(doc_id_from_share); + if let Some(doc_id) = effective_doc_id { + let auth = ctx.authorization(); + if let access::Actor::ShareToken(_) = &actor { + auth.require_view(&actor, doc_id) + .await + .map_err(|_| ApiError::forbidden("forbidden"))?; + } else { + auth.require_edit(&actor, doc_id) + .await + .map_err(|_| ApiError::forbidden("forbidden"))?; + } + } + let allowed_doc_id = match &actor { + access::Actor::ShareToken(_) => effective_doc_id, + _ => None, + }; + let exec_service = ctx.plugin_execution_service(); + match exec_service + .execute_action( + plugin_ctx.workspace_id, + plugin_ctx.user_id, + &plugin_ctx.permissions, + &plugin, + &action, + body.payload.clone(), + allowed_doc_id, + &actor, + ) + .await + .map_err(map_plugin_service_error)? + { + Some(result) => Ok(Json(ExecResultResponse::from(result))), + None => Ok(Json(ExecResultResponse { + ok: false, + data: None, + effects: vec![], + error: Some(json!({ "code": "UNKNOWN_ACTION" })), + })), + } +} diff --git a/api/crates/presentation/src/http/plugins/install.rs b/api/crates/presentation/src/http/plugins/install.rs new file mode 100644 index 00000000..3daad4f1 --- /dev/null +++ b/api/crates/presentation/src/http/plugins/install.rs @@ -0,0 +1,116 @@ +use axum::{ + Json, + extract::State, + http::{HeaderMap, StatusCode}, +}; + +use crate::context::PluginsContext; +use crate::http::error::ApiError; +use crate::http::identity::auth::Bearer; +use application::plugins::use_cases::install_from_url::InstallPluginError; +use domain::access::permissions::{PERM_PLUGIN_INSTALL, PERM_PLUGIN_UNINSTALL}; + +use super::types::{InstallFromUrlBody, InstallResponse, UninstallBody, ensure_valid_plugin_id}; +use super::util::{map_plugin_service_error, resolve_plugin_user_context}; + +#[utoipa::path( + post, + path = "/api/me/plugins/install-from-url", + request_body = InstallFromUrlBody, + responses((status = 200, body = InstallResponse)), + tag = "Plugins", + operation_id = "pluginsInstallFromUrl" +)] +pub async fn install_from_url( + State(ctx): State, + bearer: Bearer, + headers: HeaderMap, + Json(body): Json, +) -> Result, ApiError> { + let bearer_token_raw = bearer.0; + let plugin_ctx = resolve_plugin_user_context( + &ctx, + &headers, + bearer_token_raw.as_str(), + Some(PERM_PLUGIN_INSTALL), + ) + .await?; + + let management = ctx.plugin_management(); + + match management + .install_from_url( + plugin_ctx.workspace_id, + plugin_ctx.user_id, + &plugin_ctx.permissions, + &body.url, + body.token.as_deref(), + ) + .await + { + Ok(installed) => Ok(Json(InstallResponse { + id: installed.id, + version: installed.version, + })), + Err(err) => { + tracing::error!(error = ?err, "failed to install plugin from url"); + match err { + InstallPluginError::Download(_) => Err(ApiError::new( + StatusCode::BAD_GATEWAY, + "plugin_download_failed", + )), + InstallPluginError::Install(inner) => match inner { + application::plugins::ports::plugin_installer::PluginInstallError::InvalidPackage(_) => { + Err(ApiError::bad_request("invalid_plugin_package")) + } + application::plugins::ports::plugin_installer::PluginInstallError::Storage(_) => { + Err(ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error")) + } + }, + InstallPluginError::Persist(_) => { + Err(ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error")) + } + InstallPluginError::Event(_) => { + Err(ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error")) + } + } + } + } +} + +#[utoipa::path( + post, + path = "/api/me/plugins/uninstall", + request_body = UninstallBody, + responses((status = 204)), + tag = "Plugins", + operation_id = "pluginsUninstall" +)] +pub async fn uninstall( + State(ctx): State, + bearer: Bearer, + headers: HeaderMap, + Json(body): Json, +) -> Result { + let bearer_token_raw = bearer.0; + let plugin_ctx = resolve_plugin_user_context( + &ctx, + &headers, + bearer_token_raw.as_str(), + Some(PERM_PLUGIN_UNINSTALL), + ) + .await?; + let UninstallBody { id } = body; + let trimmed_id = id.trim(); + ensure_valid_plugin_id(trimmed_id)?; + ctx.plugin_management() + .uninstall( + plugin_ctx.workspace_id, + plugin_ctx.user_id, + &plugin_ctx.permissions, + trimmed_id, + ) + .await + .map_err(map_plugin_service_error)?; + Ok(StatusCode::NO_CONTENT) +} diff --git a/api/crates/presentation/src/http/plugins/kv.rs b/api/crates/presentation/src/http/plugins/kv.rs new file mode 100644 index 00000000..8a3443d0 --- /dev/null +++ b/api/crates/presentation/src/http/plugins/kv.rs @@ -0,0 +1,109 @@ +use crate::context::PluginsContext; +use crate::http::error::ApiError; +use crate::http::identity::auth::Bearer; +use axum::{ + Json, + extract::{Path, State}, + http::{HeaderMap, StatusCode}, +}; +use domain::access::permissions::PERM_PLUGIN_RUN; + +use super::types::{KvPath, KvValueBody, KvValueResponse, ensure_valid_plugin_id}; +use super::util::{ + PERMISSION_DOC_READ, PERMISSION_DOC_WRITE, map_plugin_service_error, + resolve_plugin_user_context, +}; +use domain::plugins::scope::PluginScope; + +#[utoipa::path( + get, + path = "/api/plugins/{plugin}/docs/{doc_id}/kv/{key}", + params(("plugin" = String, Path, description = "Plugin ID"), ("doc_id" = Uuid, Path, description = "Document ID"), ("key" = String, Path, description = "Key")), + responses((status = 200, body = KvValueResponse)), + tag = "Plugins", + operation_id = "pluginsGetKv" +)] +pub async fn get_kv_value( + State(ctx): State, + bearer: Bearer, + headers: HeaderMap, + Path(p): Path, +) -> Result, ApiError> { + ensure_valid_plugin_id(&p.plugin)?; + let bearer_token = bearer.0; + let plugin_ctx = + resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), Some(PERM_PLUGIN_RUN)) + .await?; + let actor = plugin_ctx.actor.clone(); + ctx.authorization() + .require_view(&actor, p.doc_id) + .await + .map_err(|_| ApiError::forbidden("forbidden"))?; + + ctx.plugin_permissions() + .ensure( + Some(plugin_ctx.workspace_id), + &p.plugin, + PERMISSION_DOC_READ, + ) + .await + .map_err(map_plugin_service_error)?; + + let plugin_data = ctx.plugin_data_service(); + let val = plugin_data + .get_kv(&p.plugin, PluginScope::Doc, Some(p.doc_id), &p.key) + .await + .map_err(map_plugin_service_error)? + .unwrap_or(serde_json::Value::Null); + Ok(Json(KvValueResponse { value: val })) +} + +#[utoipa::path( + put, + path = "/api/plugins/{plugin}/docs/{doc_id}/kv/{key}", + request_body = KvValueBody, + params(("plugin" = String, Path, description = "Plugin ID"), ("doc_id" = Uuid, Path, description = "Document ID"), ("key" = String, Path, description = "Key")), + responses((status = 204)), + tag = "Plugins", + operation_id = "pluginsPutKv" +)] +pub async fn put_kv_value( + State(ctx): State, + bearer: Bearer, + headers: HeaderMap, + Path(p): Path, + Json(body): Json, +) -> Result { + ensure_valid_plugin_id(&p.plugin)?; + let bearer_token = bearer.0; + let plugin_ctx = + resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), Some(PERM_PLUGIN_RUN)) + .await?; + let actor = plugin_ctx.actor.clone(); + ctx.authorization() + .require_edit(&actor, p.doc_id) + .await + .map_err(|_| ApiError::forbidden("forbidden"))?; + + ctx.plugin_permissions() + .ensure( + Some(plugin_ctx.workspace_id), + &p.plugin, + PERMISSION_DOC_WRITE, + ) + .await + .map_err(map_plugin_service_error)?; + + let plugin_data = ctx.plugin_data_service(); + plugin_data + .put_kv( + &p.plugin, + PluginScope::Doc, + Some(p.doc_id), + &p.key, + &body.value, + ) + .await + .map_err(map_plugin_service_error)?; + Ok(StatusCode::NO_CONTENT) +} diff --git a/api/crates/presentation/src/http/plugins/manifest.rs b/api/crates/presentation/src/http/plugins/manifest.rs new file mode 100644 index 00000000..7072840d --- /dev/null +++ b/api/crates/presentation/src/http/plugins/manifest.rs @@ -0,0 +1,34 @@ +use axum::{Json, extract::State, http::HeaderMap}; + +use crate::context::PluginsContext; +use crate::http::error::ApiError; +use crate::http::identity::auth::Bearer; + +use super::types::ManifestItem; +use super::util::resolve_plugin_user_context; + +#[utoipa::path( + get, + path = "/api/me/plugins/manifest", + responses((status = 200, body = [ManifestItem])), + tag = "Plugins", + operation_id = "pluginsGetManifest" +)] +pub async fn get_manifest( + State(ctx): State, + bearer: Bearer, + headers: HeaderMap, +) -> Result>, ApiError> { + let bearer_token = bearer.0; + let plugin_ctx = + resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), None).await?; + let manifests = ctx + .plugin_management() + .manifests_for_workspace(plugin_ctx.workspace_id, plugin_ctx.user_id) + .await + .map_err(super::util::map_plugin_service_error)? + .into_iter() + .map(ManifestItem::from) + .collect(); + Ok(Json(manifests)) +} diff --git a/api/crates/presentation/src/http/plugins/mod.rs b/api/crates/presentation/src/http/plugins/mod.rs new file mode 100644 index 00000000..cd713a4d --- /dev/null +++ b/api/crates/presentation/src/http/plugins/mod.rs @@ -0,0 +1,62 @@ +mod assets; +mod exec; +mod install; +mod kv; +mod manifest; +mod records; +pub mod types; +mod updates; +mod util; + +use axum::{ + Router, + routing::{get, patch, post}, +}; + +use crate::context::AppContext; + +pub use assets::get_plugin_asset; +pub use exec::exec_action; +pub use install::{install_from_url, uninstall}; +pub use kv::{get_kv_value, put_kv_value}; +pub use manifest::get_manifest; +pub use records::{create_record, delete_record, list_records, update_record}; +pub use types::*; +pub use updates::sse_updates; + +pub mod openapi { + pub use super::assets::*; + pub use super::exec::*; + pub use super::install::*; + pub use super::kv::*; + pub use super::manifest::*; + pub use super::records::*; + pub use super::updates::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + // Manifest for current user (stubbed) + .route("/me/plugins/manifest", get(get_manifest)) + // SSE updates (stubbed) + .route("/me/plugins/updates", get(sse_updates)) + // Generic exec endpoint + .route("/plugins/:plugin/exec/:action", post(exec_action)) + .route("/me/plugins/install-from-url", post(install_from_url)) + .route("/me/plugins/uninstall", post(uninstall)) + // Generic records API + .route( + "/plugins/:plugin/docs/:doc_id/records/:kind", + get(list_records).post(create_record), + ) + .route( + "/plugins/:plugin/records/:id", + patch(update_record).delete(delete_record), + ) + .route( + "/plugins/:plugin/docs/:doc_id/kv/:key", + get(get_kv_value).put(put_kv_value), + ) + .route("/plugin-assets", get(get_plugin_asset)) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/plugins/records.rs b/api/crates/presentation/src/http/plugins/records.rs new file mode 100644 index 00000000..053af973 --- /dev/null +++ b/api/crates/presentation/src/http/plugins/records.rs @@ -0,0 +1,286 @@ +use axum::{ + Json, + extract::{Path, Query, State}, + http::{HeaderMap, StatusCode}, +}; +use serde_json::json; +use std::collections::HashMap; + +use crate::context::PluginsContext; +use crate::http::error::ApiError; +use crate::http::identity::auth::Bearer; +use domain::access::permissions::PERM_PLUGIN_RUN; + +use super::types::{ + CreateRecordBody, RecordsPath, RecordsResponse, UpdateRecordBody, UpdateRecordPath, + ensure_valid_plugin_id, +}; +use super::util::{ + PERMISSION_DOC_READ, PERMISSION_DOC_WRITE, map_plugin_service_error, + resolve_plugin_user_context, +}; +use domain::plugins::scope::PluginRecordScope; + +#[utoipa::path( + get, + path = "/api/plugins/{plugin}/docs/{doc_id}/records/{kind}", + params( + ("plugin" = String, Path, description = "Plugin ID"), + ("doc_id" = Uuid, Path, description = "Document ID"), + ("kind" = String, Path, description = "Record kind"), + ("limit" = Option, Query, description = "Limit"), + ("offset" = Option, Query, description = "Offset") + ), + responses((status = 200, body = RecordsResponse)), + tag = "Plugins" +)] +pub async fn list_records( + State(ctx): State, + bearer: Bearer, + headers: HeaderMap, + Query(params): Query>, + Path(p): Path, +) -> Result, ApiError> { + ensure_valid_plugin_id(&p.plugin)?; + let bearer_token = bearer.0; + let plugin_ctx = + resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), Some(PERM_PLUGIN_RUN)) + .await?; + let actor = plugin_ctx.actor; + ctx.authorization() + .require_view(&actor, p.doc_id) + .await + .map_err(|_| ApiError::forbidden("forbidden"))?; + + let limit = params + .get("limit") + .and_then(|s| s.parse::().ok()) + .unwrap_or(50) + .clamp(1, 200); + let offset = params + .get("offset") + .and_then(|s| s.parse::().ok()) + .unwrap_or(0) + .max(0); + + ctx.plugin_permissions() + .ensure( + Some(plugin_ctx.workspace_id), + &p.plugin, + PERMISSION_DOC_READ, + ) + .await + .map_err(map_plugin_service_error)?; + + let plugin_data = ctx.plugin_data_service(); + let rows = plugin_data + .list_records( + &p.plugin, + PluginRecordScope::Doc, + p.doc_id, + &p.kind, + limit, + offset, + ) + .await + .map_err(map_plugin_service_error)?; + let mut items = Vec::with_capacity(rows.len()); + for r in rows { + items.push(json!({ + "id": r.id, + "plugin": r.plugin, + "kind": r.kind, + "data": r.data, + "createdAt": r.created_at, + "updatedAt": r.updated_at, + })); + } + Ok(Json(RecordsResponse { items })) +} + +#[utoipa::path( + post, + path = "/api/plugins/{plugin}/docs/{doc_id}/records/{kind}", + request_body = CreateRecordBody, + params( + ("plugin" = String, Path, description = "Plugin ID"), + ("doc_id" = Uuid, Path, description = "Document ID"), + ("kind" = String, Path, description = "Record kind") + ), + responses((status = 200, body = serde_json::Value)), + tag = "Plugins", + operation_id = "pluginsCreateRecord" +)] +pub async fn create_record( + State(ctx): State, + bearer: Bearer, + headers: HeaderMap, + Path(p): Path, + Json(body): Json, +) -> Result, ApiError> { + ensure_valid_plugin_id(&p.plugin)?; + let bearer_token = bearer.0; + let plugin_ctx = + resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), Some(PERM_PLUGIN_RUN)) + .await?; + let actor = plugin_ctx.actor.clone(); + ctx.authorization() + .require_edit(&actor, p.doc_id) + .await + .map_err(|_| ApiError::forbidden("forbidden"))?; + + ctx.plugin_permissions() + .ensure( + Some(plugin_ctx.workspace_id), + &p.plugin, + PERMISSION_DOC_WRITE, + ) + .await + .map_err(map_plugin_service_error)?; + + let mut data = body.data; + data["authorId"] = json!(plugin_ctx.user_id); + + let plugin_data = ctx.plugin_data_service(); + let rec = plugin_data + .create_record(&p.plugin, PluginRecordScope::Doc, p.doc_id, &p.kind, &data) + .await + .map_err(map_plugin_service_error)?; + Ok(Json(json!({ + "id": rec.id, + "data": rec.data, + "createdAt": rec.created_at, + "updatedAt": rec.updated_at, + }))) +} + +#[utoipa::path( + patch, + path = "/api/plugins/{plugin}/records/{id}", + request_body = UpdateRecordBody, + params(("plugin" = String, Path, description = "Plugin ID"), ("id" = Uuid, Path, description = "Record ID")), + responses((status = 200, body = serde_json::Value)), + tag = "Plugins", + operation_id = "pluginsUpdateRecord" +)] +pub async fn update_record( + State(ctx): State, + bearer: Bearer, + headers: HeaderMap, + Path(p): Path, + Json(body): Json, +) -> Result, ApiError> { + ensure_valid_plugin_id(&p.plugin)?; + let bearer_token_raw = bearer.0; + let plugin_ctx = resolve_plugin_user_context( + &ctx, + &headers, + bearer_token_raw.as_str(), + Some(PERM_PLUGIN_RUN), + ) + .await?; + let actor = plugin_ctx.actor.clone(); + + let plugin_data = ctx.plugin_data_service(); + let rec = plugin_data + .get_record(p.id) + .await + .map_err(map_plugin_service_error)? + .ok_or(ApiError::not_found("record_not_found"))?; + + if rec.plugin != p.plugin { + return Err(ApiError::not_found("record_not_found")); + } + if rec.scope != PluginRecordScope::Doc { + return Err(ApiError::not_found("record_not_found")); + } + + ctx.authorization() + .require_edit(&actor, rec.scope_id) + .await + .map_err(|_| ApiError::forbidden("forbidden"))?; + + ctx.plugin_permissions() + .ensure( + Some(plugin_ctx.workspace_id), + &p.plugin, + PERMISSION_DOC_WRITE, + ) + .await + .map_err(map_plugin_service_error)?; + + let updated = plugin_data + .update_record(p.id, &body.patch) + .await + .map_err(map_plugin_service_error)? + .ok_or(ApiError::not_found("record_not_found"))?; + + Ok(Json(json!({ + "id": updated.id, + "data": updated.data, + "updatedAt": updated.updated_at, + }))) +} + +#[utoipa::path( + delete, + path = "/api/plugins/{plugin}/records/{id}", + params(("plugin" = String, Path, description = "Plugin ID"), ("id" = Uuid, Path, description = "Record ID")), + responses((status = 204)), + tag = "Plugins", + operation_id = "pluginsDeleteRecord" +)] +pub async fn delete_record( + State(ctx): State, + bearer: Bearer, + headers: HeaderMap, + Path(p): Path, +) -> Result { + ensure_valid_plugin_id(&p.plugin)?; + let bearer_token_raw = bearer.0; + let plugin_ctx = resolve_plugin_user_context( + &ctx, + &headers, + bearer_token_raw.as_str(), + Some(PERM_PLUGIN_RUN), + ) + .await?; + let actor = plugin_ctx.actor.clone(); + let plugin_data = ctx.plugin_data_service(); + let rec = plugin_data + .get_record(p.id) + .await + .map_err(map_plugin_service_error)? + .ok_or(ApiError::not_found("record_not_found"))?; + + if rec.plugin != p.plugin { + return Err(ApiError::not_found("record_not_found")); + } + if rec.scope != PluginRecordScope::Doc { + return Err(ApiError::not_found("record_not_found")); + } + + ctx.authorization() + .require_edit(&actor, rec.scope_id) + .await + .map_err(|_| ApiError::forbidden("forbidden"))?; + + ctx.plugin_permissions() + .ensure( + Some(plugin_ctx.workspace_id), + &p.plugin, + PERMISSION_DOC_WRITE, + ) + .await + .map_err(map_plugin_service_error)?; + + let ok = plugin_data + .delete_record(p.id) + .await + .map_err(map_plugin_service_error)?; + if ok { + Ok(StatusCode::NO_CONTENT) + } else { + Err(ApiError::not_found("record_not_found")) + } +} diff --git a/api/crates/presentation/src/http/plugins/types.rs b/api/crates/presentation/src/http/plugins/types.rs new file mode 100644 index 00000000..9cb7f588 --- /dev/null +++ b/api/crates/presentation/src/http/plugins/types.rs @@ -0,0 +1,143 @@ +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; +use uuid::Uuid; + +use application::plugins::dtos::ExecResult; +use application::plugins::services::management::PluginManifestItem; + +pub use super::util::ensure_valid_plugin_id; + +#[derive(Debug, Deserialize, ToSchema)] +pub struct RecordsPath { + pub plugin: String, + pub doc_id: Uuid, + pub kind: String, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct RecordsResponse { + pub items: Vec, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct ExecResultResponse { + pub ok: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub data: Option, + pub effects: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +impl From for ExecResultResponse { + fn from(value: ExecResult) -> Self { + Self { + ok: value.ok, + data: value.data, + effects: value.effects, + error: value.error, + } + } +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct ManifestItem { + pub id: String, + pub name: Option, + pub version: String, + pub scope: String, + pub mounts: Vec, + pub frontend: serde_json::Value, + pub permissions: Vec, + pub config: serde_json::Value, + pub ui: serde_json::Value, + pub author: Option, + pub repository: Option, +} + +impl From for ManifestItem { + fn from(value: PluginManifestItem) -> Self { + Self { + id: value.id, + name: value.name, + version: value.version, + scope: value.scope.as_str().to_string(), + mounts: value.mounts, + frontend: value.frontend, + permissions: value.permissions, + config: value.config, + ui: value.ui, + author: value.author, + repository: value.repository, + } + } +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct CreateRecordBody { + pub data: serde_json::Value, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct UpdateRecordPath { + pub plugin: String, + pub id: Uuid, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct UpdateRecordBody { + pub patch: serde_json::Value, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct KvPath { + pub plugin: String, + pub doc_id: Uuid, + pub key: String, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct KvValueResponse { + pub value: serde_json::Value, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct KvValueBody { + pub value: serde_json::Value, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct ExecBody { + pub payload: Option, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct InstallFromUrlBody { + pub url: String, + pub token: Option, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct InstallResponse { + pub id: String, + pub version: String, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct UninstallBody { + pub id: String, +} + +pub fn extract_doc_id(value: &serde_json::Value) -> Option { + value + .get("docId") + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + .or_else(|| { + value + .get("payload") + .and_then(|payload| payload.get("docId")) + .and_then(|v| v.as_str()) + .and_then(|s| Uuid::parse_str(s).ok()) + }) +} diff --git a/api/crates/presentation/src/http/plugins/updates.rs b/api/crates/presentation/src/http/plugins/updates.rs new file mode 100644 index 00000000..cd47798d --- /dev/null +++ b/api/crates/presentation/src/http/plugins/updates.rs @@ -0,0 +1,38 @@ +use axum::extract::State; +use axum::http::StatusCode; +use axum::response::sse::{Event, KeepAlive, Sse}; +use futures_util::stream::{self, Stream, StreamExt}; +use std::time::Duration; + +use crate::context::PluginsContext; +use crate::http::error::ApiError; +use crate::http::extractors::AuthedUser; + +#[utoipa::path( + get, + path = "/api/me/plugins/updates", + tag = "Plugins", + responses((status = 200, description = "Plugin event stream", content_type = "text/event-stream")) +)] +pub async fn sse_updates( + State(ctx): State, + auth: AuthedUser, +) -> Result>>, ApiError> { + let initial = stream::iter(vec![Ok(Event::default().event("ready").data("{}\n"))]); + let event_stream = ctx + .subscribe_plugin_events() + .await + .map_err(|_| ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error"))?; + let broadcast = event_stream.filter_map(move |ev| async move { + if ev.user_id.is_some() && ev.user_id != Some(auth.user_id) { + return None; + } + let payload = ev.payload.to_string(); + Some(Ok(Event::default().event("update").data(payload))) + }); + let merged = initial.chain(broadcast); + let keepalive = KeepAlive::new() + .interval(Duration::from_secs(25)) + .text(":\n"); + Ok(Sse::new(merged).keep_alive(keepalive)) +} diff --git a/api/crates/presentation/src/http/plugins/util.rs b/api/crates/presentation/src/http/plugins/util.rs new file mode 100644 index 00000000..3d3f11e7 --- /dev/null +++ b/api/crates/presentation/src/http/plugins/util.rs @@ -0,0 +1,95 @@ +use axum::http::HeaderMap; +use uuid::Uuid; + +use crate::context::{HasAuthServices, HasShareService, HasWorkspaceService}; +use crate::http::error::ApiError; +use crate::http::workspaces::scope as workspace_scope; +use crate::security::token; +use application::core::services::access; +use application::core::services::errors::ServiceError; +use application::plugins::services::management; +use domain::access::permissions::{PERM_DOC_EDIT, PERM_DOC_VIEW, PERM_PLUGIN_RUN, PermissionSet}; +use domain::documents::share; + +pub const PERMISSION_DOC_READ: &str = "doc.read"; +pub const PERMISSION_DOC_WRITE: &str = "doc.write"; + +#[derive(Clone)] +pub struct PluginUserContext { + pub workspace_id: Uuid, + pub user_id: Uuid, + pub permissions: PermissionSet, + pub actor: access::Actor, +} + +pub async fn resolve_plugin_user_context( + ctx: &(impl HasAuthServices + HasWorkspaceService + HasShareService), + headers: &HeaderMap, + bearer_token: &str, + required_permission: Option<&str>, +) -> Result { + let actor = token::resolve_actor_from_token_str(ctx, bearer_token) + .await + .map_err(token::map_actor_error)?; + + match actor { + access::Actor::User(user_id) => { + let workspace_id = workspace_scope::resolve_active_workspace_id( + ctx, + headers, + Some(bearer_token), + user_id, + ) + .await?; + let permissions = + workspace_scope::resolve_workspace_permissions(ctx, workspace_id, user_id).await?; + if let Some(permission) = required_permission + && !permissions.allows(permission) + { + return Err(ApiError::forbidden("forbidden")); + } + Ok(PluginUserContext { + workspace_id, + user_id, + permissions, + actor: access::Actor::User(user_id), + }) + } + access::Actor::ShareToken(token) => { + let ctx_share = ctx + .share_service() + .resolve_share_context(&token) + .await + .map_err(|err| crate::http::error::map_service_error(err, "share_service_error"))? + .ok_or(ApiError::unauthorized("unauthorized"))?; + if share::is_expired(ctx_share.expires_at.as_ref(), chrono::Utc::now()) { + return Err(ApiError::unauthorized("share_expired")); + } + let mut permissions = PermissionSet::from_slice(&[PERM_PLUGIN_RUN, PERM_DOC_VIEW]); + if ctx_share.permission.allows_edit() { + permissions.insert(PERM_DOC_EDIT); + } + if let Some(permission) = required_permission + && !permissions.allows(permission) + { + return Err(ApiError::forbidden("forbidden")); + } + Ok(PluginUserContext { + workspace_id: ctx_share.workspace_id, + // Share tokens do not map to a user; use workspace_id as a stable placeholder + user_id: ctx_share.workspace_id, + permissions, + actor: access::Actor::ShareToken(token), + }) + } + _ => Err(ApiError::unauthorized("unauthorized")), + } +} + +pub fn ensure_valid_plugin_id(id: &str) -> Result<(), ApiError> { + management::validate_plugin_id(id).map_err(map_plugin_service_error) +} + +pub fn map_plugin_service_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error_no_log(err) +} diff --git a/api/crates/presentation/src/http/workspaces/invitations.rs b/api/crates/presentation/src/http/workspaces/invitations.rs new file mode 100644 index 00000000..4fd70b52 --- /dev/null +++ b/api/crates/presentation/src/http/workspaces/invitations.rs @@ -0,0 +1,151 @@ +use axum::{ + Json, + extract::{Path, State}, + http::StatusCode, +}; +use uuid::Uuid; + +use crate::context::WorkspacesContext; +use crate::http::error::ApiError; +use crate::http::extractors::AuthedUser; +use application::core::services::errors::ServiceError; +use domain::access::permissions::PERM_MEMBER_INVITE; + +use super::types::{ + CreateWorkspaceInvitationRequest, WorkspaceInvitationResponse, invitation_response_from, + map_service_error, parse_role_kind, parse_system_role, require_permission, +}; + +#[utoipa::path( + get, + path = "/api/workspaces/{id}/invitations", + tag = "Workspaces", + params(("id" = Uuid, Path, description = "Workspace ID")), + responses((status = 200, body = [WorkspaceInvitationResponse])) +)] +pub async fn list_invitations( + State(ctx): State, + auth: AuthedUser, + Path(id): Path, +) -> Result>, ApiError> { + require_permission(&ctx, id, auth.user_id, PERM_MEMBER_INVITE).await?; + let invitations = ctx + .workspace_service() + .list_invitations(id) + .await + .map_err(map_service_error)? + .into_iter() + .map(invitation_response_from) + .collect(); + Ok(Json(invitations)) +} + +#[utoipa::path( + post, + path = "/api/workspaces/{id}/invitations", + tag = "Workspaces", + params(("id" = Uuid, Path, description = "Workspace ID")), + request_body = CreateWorkspaceInvitationRequest, + responses((status = 200, body = WorkspaceInvitationResponse)) +)] +pub async fn create_invitation( + State(ctx): State, + auth: AuthedUser, + Path(id): Path, + Json(body): Json, +) -> Result, ApiError> { + if body.email.trim().is_empty() { + return Err(ApiError::bad_request("invalid_email")); + } + let role_kind = parse_role_kind(body.role_kind.as_str())?; + let system_role = parse_system_role(body.system_role.as_deref())?; + match role_kind { + domain::workspaces::roles::WorkspaceRoleKind::System => { + if system_role.is_none() || body.custom_role_id.is_some() { + return Err(ApiError::bad_request("invalid_role")); + } + } + domain::workspaces::roles::WorkspaceRoleKind::Custom => { + if system_role.is_some() || body.custom_role_id.is_none() { + return Err(ApiError::bad_request("invalid_role")); + } + } + } + require_permission(&ctx, id, auth.user_id, PERM_MEMBER_INVITE).await?; + let record = ctx + .workspace_service() + .create_invitation( + id, + auth.user_id, + &body.email, + role_kind, + system_role, + body.custom_role_id, + body.expires_at, + ) + .await + .map_err(map_service_error)?; + Ok(Json(invitation_response_from(record))) +} + +#[utoipa::path( + delete, + path = "/api/workspaces/{id}/invitations/{invitation_id}", + tag = "Workspaces", + params( + ("id" = Uuid, Path, description = "Workspace ID"), + ("invitation_id" = Uuid, Path, description = "Invitation ID"), + ), + responses((status = 200, body = WorkspaceInvitationResponse)) +)] +pub async fn revoke_invitation( + State(ctx): State, + auth: AuthedUser, + Path((workspace_id, invitation_id)): Path<(Uuid, Uuid)>, +) -> Result, ApiError> { + require_permission(&ctx, workspace_id, auth.user_id, PERM_MEMBER_INVITE).await?; + let record = ctx + .workspace_service() + .revoke_invitation(workspace_id, invitation_id) + .await + .map_err(map_service_error)?; + Ok(Json(invitation_response_from(record))) +} + +#[utoipa::path( + post, + path = "/api/workspace-invitations/{token}/accept", + tag = "Workspaces", + params(("token" = String, Path, description = "Invitation token")), + responses((status = 204)) +)] +pub async fn accept_invitation( + State(ctx): State, + auth: AuthedUser, + Path(token): Path, +) -> Result { + let user = ctx + .account_service() + .get_me(auth.user_id) + .await + .map_err(|err| match err { + ServiceError::Unauthorized | ServiceError::TokenExpired => { + ApiError::unauthorized("unauthorized") + } + ServiceError::Forbidden => ApiError::forbidden("forbidden"), + ServiceError::NotFound => ApiError::unauthorized("unauthorized"), + ServiceError::BadRequest(code) => ApiError::bad_request(code).with_message(code), + ServiceError::Conflict => ApiError::conflict("conflict"), + ServiceError::Unexpected(_) => { + ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error") + } + })? + .ok_or(ApiError::unauthorized("unauthorized"))?; + + ctx.workspace_service() + .accept_invitation(&token, auth.user_id, &user.email) + .await + .map_err(map_service_error)?; + + Ok(StatusCode::NO_CONTENT) +} diff --git a/api/crates/presentation/src/http/workspaces/members.rs b/api/crates/presentation/src/http/workspaces/members.rs new file mode 100644 index 00000000..02b799c2 --- /dev/null +++ b/api/crates/presentation/src/http/workspaces/members.rs @@ -0,0 +1,121 @@ +use axum::{ + Json, + extract::{Path, State}, + http::StatusCode, +}; +use uuid::Uuid; + +use crate::context::WorkspacesContext; +use crate::http::error::ApiError; +use crate::http::extractors::AuthedUser; +use domain::access::permissions::{PERM_MEMBER_REMOVE, PERM_MEMBER_UPDATE_ROLE, PERM_MEMBER_VIEW}; + +use super::types::{ + UpdateMemberRoleRequest, WorkspaceMemberResponse, map_service_error, member_response_from, + parse_role_kind, parse_system_role, require_permission, +}; + +#[utoipa::path( + get, + path = "/api/workspaces/{id}/members", + tag = "Workspaces", + params(("id" = Uuid, Path, description = "Workspace ID")), + responses((status = 200, body = [WorkspaceMemberResponse])) +)] +pub async fn list_members( + State(ctx): State, + auth: AuthedUser, + Path(id): Path, +) -> Result>, ApiError> { + require_permission(&ctx, id, auth.user_id, PERM_MEMBER_VIEW).await?; + let members = ctx + .workspace_service() + .list_members(id) + .await + .map_err(map_service_error)? + .into_iter() + .map(member_response_from) + .collect(); + Ok(Json(members)) +} + +#[utoipa::path( + patch, + path = "/api/workspaces/{id}/members/{user_id}", + tag = "Workspaces", + params( + ("id" = Uuid, Path, description = "Workspace ID"), + ("user_id" = Uuid, Path, description = "Target user ID"), + ), + request_body = UpdateMemberRoleRequest, + responses((status = 200, body = WorkspaceMemberResponse)) +)] +pub async fn update_member_role( + State(ctx): State, + auth: AuthedUser, + Path((workspace_id, member_id)): Path<(Uuid, Uuid)>, + Json(body): Json, +) -> Result, ApiError> { + let role_kind = parse_role_kind(body.role_kind.as_str())?; + let system_role = parse_system_role(body.system_role.as_deref())?; + match role_kind { + domain::workspaces::roles::WorkspaceRoleKind::System => { + if system_role.is_none() || body.custom_role_id.is_some() { + return Err(ApiError::bad_request("invalid_role")); + } + } + domain::workspaces::roles::WorkspaceRoleKind::Custom => { + if system_role.is_some() || body.custom_role_id.is_none() { + return Err(ApiError::bad_request("invalid_role")); + } + } + } + + require_permission(&ctx, workspace_id, auth.user_id, PERM_MEMBER_UPDATE_ROLE).await?; + + ctx.workspace_service() + .update_member_role( + workspace_id, + member_id, + auth.user_id, + role_kind, + system_role, + body.custom_role_id, + ) + .await + .map_err(map_service_error)?; + + let updated = ctx + .workspace_service() + .list_members(workspace_id) + .await + .map_err(map_service_error)? + .into_iter() + .find(|m| m.user_id == member_id) + .ok_or(ApiError::not_found("member_not_found"))?; + + Ok(Json(member_response_from(updated))) +} + +#[utoipa::path( + delete, + path = "/api/workspaces/{id}/members/{user_id}", + tag = "Workspaces", + params( + ("id" = Uuid, Path, description = "Workspace ID"), + ("user_id" = Uuid, Path, description = "Target user ID"), + ), + responses((status = 204)) +)] +pub async fn remove_member( + State(ctx): State, + auth: AuthedUser, + Path((workspace_id, member_id)): Path<(Uuid, Uuid)>, +) -> Result { + require_permission(&ctx, workspace_id, auth.user_id, PERM_MEMBER_REMOVE).await?; + ctx.workspace_service() + .remove_member(workspace_id, member_id, Some(auth.user_id)) + .await + .map_err(map_service_error)?; + Ok(StatusCode::NO_CONTENT) +} diff --git a/api/crates/presentation/src/http/workspaces/mod.rs b/api/crates/presentation/src/http/workspaces/mod.rs new file mode 100644 index 00000000..994209fe --- /dev/null +++ b/api/crates/presentation/src/http/workspaces/mod.rs @@ -0,0 +1,71 @@ +mod invitations; +mod members; +mod permissions; +mod roles; +pub mod scope; +pub mod types; +mod workspace; + +use axum::Router; +use axum::routing::{delete, get, patch, post}; + +use crate::context::AppContext; + +pub use invitations::{accept_invitation, create_invitation, list_invitations, revoke_invitation}; +pub use members::{list_members, remove_member, update_member_role}; +pub use permissions::get_workspace_permissions; +pub use roles::{create_role, delete_role, list_roles, update_role}; +pub use types::*; +pub use workspace::{ + create_workspace, delete_workspace, download_workspace_archive, get_workspace_detail, + leave_workspace, list_workspaces, switch_workspace, update_workspace, +}; + +pub mod openapi { + pub use super::invitations::*; + pub use super::members::*; + pub use super::permissions::*; + pub use super::roles::*; + pub use super::workspace::*; +} + +pub fn routes(ctx: AppContext) -> Router { + Router::new() + .route("/workspaces", get(list_workspaces).post(create_workspace)) + .route( + "/workspaces/:id", + get(get_workspace_detail) + .put(update_workspace) + .delete(delete_workspace), + ) + .route("/workspaces/:id/leave", post(leave_workspace)) + .route("/workspaces/:id/switch", post(switch_workspace)) + .route("/workspaces/:id/members", get(list_members)) + .route( + "/workspaces/:id/members/:user_id", + patch(update_member_role).delete(remove_member), + ) + .route( + "/workspaces/:id/permissions", + get(get_workspace_permissions), + ) + .route("/workspaces/:id/roles", get(list_roles).post(create_role)) + .route( + "/workspaces/:id/roles/:role_id", + patch(update_role).delete(delete_role), + ) + .route( + "/workspaces/:id/invitations", + get(list_invitations).post(create_invitation), + ) + .route( + "/workspaces/:id/invitations/:invitation_id", + delete(revoke_invitation), + ) + .route("/workspaces/:id/download", get(download_workspace_archive)) + .route( + "/workspace-invitations/:token/accept", + post(accept_invitation), + ) + .with_state(ctx) +} diff --git a/api/crates/presentation/src/http/workspaces/permissions.rs b/api/crates/presentation/src/http/workspaces/permissions.rs new file mode 100644 index 00000000..f06801f3 --- /dev/null +++ b/api/crates/presentation/src/http/workspaces/permissions.rs @@ -0,0 +1,34 @@ +use axum::{ + Json, + extract::{Path, State}, +}; +use uuid::Uuid; + +use crate::context::WorkspacesContext; +use crate::http::extractors::AuthedUser; + +use super::types::{WorkspacePermissionsResponse, map_service_error}; + +#[utoipa::path( + get, + path = "/api/workspaces/{id}/permissions", + tag = "Workspaces", + params(("id" = Uuid, Path, description = "Workspace ID")), + responses((status = 200, body = WorkspacePermissionsResponse)) +)] +pub async fn get_workspace_permissions( + State(ctx): State, + auth: AuthedUser, + Path(id): Path, +) -> Result, crate::http::error::ApiError> { + let set = ctx + .workspace_service() + .resolve_permission_set(id, auth.user_id) + .await + .map_err(map_service_error)? + .ok_or(crate::http::error::ApiError::forbidden("forbidden"))?; + Ok(Json(WorkspacePermissionsResponse { + workspace_id: id, + permissions: set.to_vec(), + })) +} diff --git a/api/crates/presentation/src/http/workspaces/roles.rs b/api/crates/presentation/src/http/workspaces/roles.rs new file mode 100644 index 00000000..c876d2a6 --- /dev/null +++ b/api/crates/presentation/src/http/workspaces/roles.rs @@ -0,0 +1,163 @@ +use axum::{ + Json, + extract::{Path, State}, + http::StatusCode, +}; +use uuid::Uuid; + +use crate::context::WorkspacesContext; +use crate::http::extractors::AuthedUser; +use domain::access::permissions::{PERM_MEMBER_INVITE, PERM_MEMBER_UPDATE_ROLE, PERM_MEMBER_VIEW}; + +use super::types::{ + CreateWorkspaceRoleRequest, UpdateWorkspaceRoleRequest, WorkspaceRoleResponse, + map_service_error, normalize_overrides, parse_base_role, parse_optional_base_role, + require_any_permission, require_permission, role_response_from, validate_base_role, +}; + +#[utoipa::path( + get, + path = "/api/workspaces/{id}/roles", + tag = "Workspaces", + params(("id" = Uuid, Path, description = "Workspace ID")), + responses((status = 200, body = [WorkspaceRoleResponse])) +)] +pub async fn list_roles( + State(ctx): State, + auth: AuthedUser, + Path(id): Path, +) -> Result>, crate::http::error::ApiError> { + require_any_permission( + &ctx, + id, + auth.user_id, + &[ + PERM_MEMBER_VIEW, + PERM_MEMBER_UPDATE_ROLE, + PERM_MEMBER_INVITE, + ], + ) + .await?; + let roles = ctx + .workspace_service() + .list_roles(id) + .await + .map_err(map_service_error)? + .into_iter() + .map(role_response_from) + .collect(); + Ok(Json(roles)) +} + +#[utoipa::path( + post, + path = "/api/workspaces/{id}/roles", + tag = "Workspaces", + params(("id" = Uuid, Path, description = "Workspace ID")), + request_body = CreateWorkspaceRoleRequest, + responses((status = 200, body = WorkspaceRoleResponse)) +)] +pub async fn create_role( + State(ctx): State, + auth: AuthedUser, + Path(id): Path, + Json(body): Json, +) -> Result, crate::http::error::ApiError> { + if body.name.trim().is_empty() || !validate_base_role(body.base_role.as_str()) { + return Err(crate::http::error::ApiError::bad_request("invalid_role")); + } + let base_role = parse_base_role(body.base_role.as_str())?; + let overrides = normalize_overrides(body.overrides)?; + require_permission(&ctx, id, auth.user_id, PERM_MEMBER_UPDATE_ROLE).await?; + let record = ctx + .workspace_service() + .create_role( + id, + auth.user_id, + body.name.trim(), + base_role, + body.description.as_deref(), + body.priority.unwrap_or(0), + &overrides, + ) + .await + .map_err(map_service_error)?; + Ok(Json(role_response_from(record))) +} + +#[utoipa::path( + patch, + path = "/api/workspaces/{id}/roles/{role_id}", + tag = "Workspaces", + params( + ("id" = Uuid, Path, description = "Workspace ID"), + ("role_id" = Uuid, Path, description = "Role ID"), + ), + request_body = UpdateWorkspaceRoleRequest, + responses((status = 200, body = WorkspaceRoleResponse)) +)] +pub async fn update_role( + State(ctx): State, + auth: AuthedUser, + Path((workspace_id, role_id)): Path<(Uuid, Uuid)>, + Json(body): Json, +) -> Result, crate::http::error::ApiError> { + if body + .base_role + .as_deref() + .is_some_and(|base| !validate_base_role(base)) + { + return Err(crate::http::error::ApiError::bad_request( + "invalid_base_role", + )); + } + let base_role = parse_optional_base_role(body.base_role.as_deref())?; + let overrides_vec = normalize_overrides(body.overrides.clone())?; + let overrides_opt = if body.overrides.is_some() { + Some(overrides_vec.as_slice()) + } else { + None + }; + require_permission(&ctx, workspace_id, auth.user_id, PERM_MEMBER_UPDATE_ROLE).await?; + let mut record = ctx + .workspace_service() + .update_role( + workspace_id, + auth.user_id, + role_id, + body.name.as_deref(), + base_role, + body.description.as_deref(), + body.priority, + overrides_opt, + ) + .await + .map_err(map_service_error)?; + if body.overrides.is_some() { + record.overrides = overrides_vec; + } + Ok(Json(role_response_from(record))) +} + +#[utoipa::path( + delete, + path = "/api/workspaces/{id}/roles/{role_id}", + tag = "Workspaces", + params( + ("id" = Uuid, Path, description = "Workspace ID"), + ("role_id" = Uuid, Path, description = "Role ID"), + ), + responses((status = 204)) +)] +pub async fn delete_role( + State(ctx): State, + auth: AuthedUser, + Path((workspace_id, role_id)): Path<(Uuid, Uuid)>, +) -> Result { + require_permission(&ctx, workspace_id, auth.user_id, PERM_MEMBER_UPDATE_ROLE).await?; + ctx.workspace_service() + .delete_role(workspace_id, role_id) + .await + .map_err(map_service_error)?; + Ok(StatusCode::NO_CONTENT) +} diff --git a/api/src/presentation/http/workspace_scope.rs b/api/crates/presentation/src/http/workspaces/scope.rs similarity index 51% rename from api/src/presentation/http/workspace_scope.rs rename to api/crates/presentation/src/http/workspaces/scope.rs index 39f71472..55bee769 100644 --- a/api/src/presentation/http/workspace_scope.rs +++ b/api/crates/presentation/src/http/workspaces/scope.rs @@ -1,24 +1,27 @@ -use axum::http::{HeaderMap, StatusCode}; +use axum::http::HeaderMap; use uuid::Uuid; -use crate::domain::workspaces::permissions::PermissionSet; -use crate::presentation::context::AppContext; -use crate::presentation::http::workspaces::map_service_error; +use crate::context::{HasAuthServices, HasWorkspaceService}; +use crate::http::error::ApiError; +use crate::http::workspaces::map_service_error; +use domain::access::permissions::PermissionSet; const WORKSPACE_HEADER: &str = "X-Workspace-ID"; pub async fn resolve_active_workspace_id( - ctx: &AppContext, + ctx: &(impl HasAuthServices + HasWorkspaceService), headers: &HeaderMap, bearer_token: Option<&str>, user_id: Uuid, -) -> Result { +) -> Result { let override_id = headers .get(WORKSPACE_HEADER) .and_then(|v| v.to_str().ok()) .map(|raw| raw.trim()) .filter(|value| !value.is_empty()) - .map(|value| Uuid::parse_str(value).map_err(|_| StatusCode::BAD_REQUEST)) + .map(|value| { + Uuid::parse_str(value).map_err(|_| ApiError::bad_request("invalid_workspace_id")) + }) .transpose()?; let workspaces = ctx @@ -28,7 +31,7 @@ pub async fn resolve_active_workspace_id( .map_err(map_service_error)?; if workspaces.is_empty() { - return Err(StatusCode::FORBIDDEN); + return Err(ApiError::forbidden("forbidden")); } if let Some(id) = override_id { @@ -36,20 +39,24 @@ pub async fn resolve_active_workspace_id( if found { return Ok(id); } - return Err(StatusCode::FORBIDDEN); + return Err(ApiError::forbidden("forbidden")); } if let Some(token) = bearer_token { - if let Some(token_ws_id) = ctx.auth_service().workspace_from_token_claim(token) { - if workspaces.iter().any(|ws| ws.id == token_ws_id) { - return Ok(token_ws_id); - } - } else if let Ok(Some(token_ws_id)) = - ctx.auth_service().workspace_from_token_async(token).await + let token_ws_id = + if let Some(token_ws_id) = ctx.auth_service().workspace_from_token_claim(token) { + Some(token_ws_id) + } else { + ctx.auth_service() + .workspace_from_token_async(token) + .await + .ok() + .flatten() + }; + if let Some(token_ws_id) = token_ws_id + && workspaces.iter().any(|ws| ws.id == token_ws_id) { - if workspaces.iter().any(|ws| ws.id == token_ws_id) { - return Ok(token_ws_id); - } + return Ok(token_ws_id); } } @@ -61,27 +68,27 @@ pub async fn resolve_active_workspace_id( } pub async fn ensure_workspace_permission( - ctx: &AppContext, + ctx: &(impl HasAuthServices + HasWorkspaceService), workspace_id: Uuid, user_id: Uuid, permission: &str, -) -> Result<(), StatusCode> { +) -> Result<(), ApiError> { let set = resolve_workspace_permissions(ctx, workspace_id, user_id).await?; if set.allows(permission) { Ok(()) } else { - Err(StatusCode::FORBIDDEN) + Err(ApiError::forbidden("forbidden")) } } pub async fn resolve_workspace_permissions( - ctx: &AppContext, + ctx: &(impl HasAuthServices + HasWorkspaceService), workspace_id: Uuid, user_id: Uuid, -) -> Result { +) -> Result { ctx.workspace_service() .resolve_permission_set(workspace_id, user_id) .await .map_err(map_service_error)? - .ok_or(StatusCode::FORBIDDEN) + .ok_or(ApiError::forbidden("forbidden")) } diff --git a/api/crates/presentation/src/http/workspaces/types.rs b/api/crates/presentation/src/http/workspaces/types.rs new file mode 100644 index 00000000..12d4f6d0 --- /dev/null +++ b/api/crates/presentation/src/http/workspaces/types.rs @@ -0,0 +1,312 @@ +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; +use uuid::Uuid; + +use crate::context::HasWorkspaceService; +use crate::http::documents::DownloadFormat; +use application::core::services::errors::ServiceError; +use application::workspaces::ports::workspace_repository::{ + WorkspaceInvitationRecord, WorkspaceListItem, WorkspaceMemberDetail, WorkspaceRoleRecord, +}; +use domain::access::permissions::PermissionOverride; +use domain::workspaces::roles::{WorkspaceBaseRole, WorkspaceRoleKind, WorkspaceSystemRole}; + +#[derive(Debug, Serialize, ToSchema)] +pub struct WorkspaceResponse { + pub id: Uuid, + pub name: String, + pub slug: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub icon: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + pub is_personal: bool, + pub role_kind: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub system_role: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub custom_role_id: Option, + pub is_default: bool, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct CreateWorkspaceRequest { + pub name: String, + pub icon: Option, + pub description: Option, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct WorkspaceMemberResponse { + pub workspace_id: Uuid, + pub user_id: Uuid, + pub email: String, + pub name: String, + pub role_kind: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub system_role: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub custom_role_id: Option, + pub is_default: bool, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct UpdateMemberRoleRequest { + pub role_kind: String, + pub system_role: Option, + pub custom_role_id: Option, +} + +#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] +pub struct PermissionOverridePayload { + pub permission: String, + pub allowed: bool, +} + +#[derive(Debug, Deserialize, ToSchema, Default)] +pub struct DownloadWorkspaceQuery { + #[serde(default)] + pub format: DownloadFormat, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct WorkspaceRoleResponse { + pub id: Uuid, + pub workspace_id: Uuid, + pub name: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + pub base_role: String, + pub priority: i32, + pub overrides: Vec, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct WorkspaceInvitationResponse { + pub id: Uuid, + pub workspace_id: Uuid, + pub email: String, + pub role_kind: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub system_role: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub custom_role_id: Option, + pub invited_by: Uuid, + pub token: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub expires_at: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub accepted_by: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub accepted_at: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub revoked_at: Option>, + pub created_at: DateTime, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct CreateWorkspaceRoleRequest { + pub name: String, + pub base_role: String, + pub description: Option, + pub priority: Option, + pub overrides: Option>, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct UpdateWorkspaceRoleRequest { + pub name: Option, + pub base_role: Option, + pub description: Option, + pub priority: Option, + pub overrides: Option>, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct UpdateWorkspaceRequest { + pub name: Option, + pub icon: Option, + pub description: Option, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct SwitchWorkspaceResponse { + pub access_token: String, +} + +#[derive(Debug, Serialize, ToSchema)] +pub struct WorkspacePermissionsResponse { + pub workspace_id: Uuid, + pub permissions: Vec, +} + +#[derive(Debug, Deserialize, ToSchema)] +pub struct CreateWorkspaceInvitationRequest { + pub email: String, + pub role_kind: String, + pub system_role: Option, + pub custom_role_id: Option, + pub expires_at: Option>, +} + +pub fn to_response(row: WorkspaceListItem) -> WorkspaceResponse { + WorkspaceResponse { + id: row.id, + name: row.name, + slug: row.slug, + icon: row.icon, + description: row.description, + is_personal: row.is_personal, + role_kind: row.role_kind.as_str().to_string(), + system_role: row.system_role.map(|role| role.as_str().to_string()), + custom_role_id: row.custom_role_id, + is_default: row.is_default, + } +} + +pub fn map_service_error(err: ServiceError) -> crate::http::error::ApiError { + crate::http::error::map_service_error(err, "workspace_service_error") +} + +pub fn member_response_from(detail: WorkspaceMemberDetail) -> WorkspaceMemberResponse { + WorkspaceMemberResponse { + workspace_id: detail.workspace_id, + user_id: detail.user_id, + email: detail.user_email, + name: detail.user_name, + role_kind: detail.role_kind.as_str().to_string(), + system_role: detail.system_role.map(|role| role.as_str().to_string()), + custom_role_id: detail.custom_role_id, + is_default: detail.is_default, + } +} + +pub fn role_response_from(record: WorkspaceRoleRecord) -> WorkspaceRoleResponse { + WorkspaceRoleResponse { + id: record.id, + workspace_id: record.workspace_id, + name: record.name, + description: record.description, + base_role: record.base_role.as_str().to_string(), + priority: record.priority, + overrides: record + .overrides + .into_iter() + .map(|item| PermissionOverridePayload { + permission: item.permission, + allowed: item.allowed, + }) + .collect(), + } +} + +pub fn invitation_response_from(record: WorkspaceInvitationRecord) -> WorkspaceInvitationResponse { + WorkspaceInvitationResponse { + id: record.id, + workspace_id: record.workspace_id, + email: record.email, + role_kind: record.role_kind.as_str().to_string(), + system_role: record.system_role.map(|role| role.as_str().to_string()), + custom_role_id: record.custom_role_id, + invited_by: record.invited_by, + token: record.token, + expires_at: record.expires_at, + accepted_by: record.accepted_by, + accepted_at: record.accepted_at, + revoked_at: record.revoked_at, + created_at: record.created_at, + } +} + +pub async fn require_permission( + ctx: &impl HasWorkspaceService, + workspace_id: Uuid, + user_id: Uuid, + permission: &str, +) -> Result<(), crate::http::error::ApiError> { + let set = ctx + .workspace_service() + .resolve_permission_set(workspace_id, user_id) + .await + .map_err(map_service_error)? + .ok_or(crate::http::error::ApiError::forbidden("forbidden"))?; + if set.allows(permission) { + Ok(()) + } else { + Err(crate::http::error::ApiError::forbidden("forbidden")) + } +} + +pub async fn require_any_permission( + ctx: &impl HasWorkspaceService, + workspace_id: Uuid, + user_id: Uuid, + permissions: &[&str], +) -> Result<(), crate::http::error::ApiError> { + if permissions.is_empty() { + return Err(crate::http::error::ApiError::forbidden("forbidden")); + } + let set = ctx + .workspace_service() + .resolve_permission_set(workspace_id, user_id) + .await + .map_err(map_service_error)? + .ok_or(crate::http::error::ApiError::forbidden("forbidden"))?; + if permissions.iter().any(|perm| set.allows(perm)) { + Ok(()) + } else { + Err(crate::http::error::ApiError::forbidden("forbidden")) + } +} + +pub fn validate_base_role(role: &str) -> bool { + WorkspaceBaseRole::parse(role).is_some() +} + +pub fn parse_role_kind(role_kind: &str) -> Result { + WorkspaceRoleKind::parse(role_kind).ok_or(crate::http::error::ApiError::bad_request( + "invalid_role_kind", + )) +} + +pub fn parse_system_role( + role: Option<&str>, +) -> Result, crate::http::error::ApiError> { + role.map(|value| { + WorkspaceSystemRole::parse(value).ok_or(crate::http::error::ApiError::bad_request( + "invalid_system_role", + )) + }) + .transpose() +} + +pub fn parse_base_role(role: &str) -> Result { + WorkspaceBaseRole::parse(role).ok_or(crate::http::error::ApiError::bad_request( + "invalid_base_role", + )) +} + +pub fn parse_optional_base_role( + role: Option<&str>, +) -> Result, crate::http::error::ApiError> { + role.map(parse_base_role).transpose() +} + +pub fn normalize_overrides( + overrides: Option>, +) -> Result, crate::http::error::ApiError> { + let mut out = Vec::new(); + if let Some(items) = overrides { + for item in items { + let perm = item.permission.trim(); + if perm.is_empty() { + return Err(crate::http::error::ApiError::bad_request( + "invalid_permission_override", + )); + } + out.push(PermissionOverride::new(perm.to_string(), item.allowed)); + } + } + Ok(out) +} diff --git a/api/crates/presentation/src/http/workspaces/workspace.rs b/api/crates/presentation/src/http/workspaces/workspace.rs new file mode 100644 index 00000000..218ceb6a --- /dev/null +++ b/api/crates/presentation/src/http/workspaces/workspace.rs @@ -0,0 +1,363 @@ +use axum::{ + Json, + extract::{Path, Query, State}, + http::{HeaderMap, HeaderValue, StatusCode}, + response::{IntoResponse, Response}, +}; +use uuid::Uuid; + +use crate::context::WorkspacesContext; +#[allow(unused_imports)] +use crate::http::documents::DocumentDownloadBinary; +use crate::http::error::ApiError; +use crate::http::extractors::AuthedUser; +use crate::http::identity::auth::{ + self, apply_session_cookies, extract_client_ip, extract_refresh_token, extract_user_agent, +}; +use application::core::services::access; +use application::core::services::errors::ServiceError; +use application::identity::services::auth::user_sessions::SessionMetadata; +use application::workspaces::ports::workspace_repository::WorkspaceListItem; +use domain::access::permissions::{PERM_DOC_VIEW, PERM_WORKSPACE_DELETE, PERM_WORKSPACE_UPDATE}; +use domain::workspaces::roles::{WorkspaceRoleKind, WorkspaceSystemRole}; + +use super::types::{ + CreateWorkspaceRequest, DownloadWorkspaceQuery, SwitchWorkspaceResponse, + UpdateWorkspaceRequest, WorkspaceResponse, map_service_error, require_permission, to_response, +}; + +#[utoipa::path(get, path = "/api/workspaces", tag = "Workspaces", responses((status = 200, body = [WorkspaceResponse])))] +pub async fn list_workspaces( + State(ctx): State, + auth: AuthedUser, +) -> Result>, ApiError> { + let items = ctx + .workspace_service() + .list_for_user(auth.user_id) + .await + .map_err(map_service_error)? + .into_iter() + .map(to_response) + .collect(); + Ok(Json(items)) +} + +#[utoipa::path(post, path = "/api/workspaces", tag = "Workspaces", request_body = CreateWorkspaceRequest, responses((status = 200, body = WorkspaceResponse)))] +pub async fn create_workspace( + State(ctx): State, + auth: AuthedUser, + Json(payload): Json, +) -> Result, ApiError> { + if payload.name.trim().is_empty() { + return Err(ApiError::bad_request("invalid_workspace_name")); + } + let workspace = ctx + .workspace_service() + .create_workspace( + auth.user_id, + payload.name.trim(), + payload.icon.as_deref(), + payload.description.as_deref(), + ) + .await + .map_err(map_service_error)?; + let memberships = ctx + .workspace_service() + .list_for_user(auth.user_id) + .await + .map_err(map_service_error)?; + let created = memberships + .into_iter() + .find(|item| item.id == workspace.id) + .unwrap_or(WorkspaceListItem { + id: workspace.id, + name: workspace.name, + slug: workspace.slug, + icon: workspace.icon, + description: workspace.description, + is_personal: workspace.is_personal, + role_kind: WorkspaceRoleKind::System, + system_role: Some(WorkspaceSystemRole::Owner), + custom_role_id: None, + is_default: false, + }); + Ok(Json(to_response(created))) +} + +#[utoipa::path( + get, + path = "/api/workspaces/{id}", + tag = "Workspaces", + params(("id" = Uuid, Path, description = "Workspace ID")), + responses((status = 200, body = WorkspaceResponse)) +)] +pub async fn get_workspace_detail( + State(ctx): State, + auth: AuthedUser, + Path(id): Path, +) -> Result, ApiError> { + let workspaces = ctx + .workspace_service() + .list_for_user(auth.user_id) + .await + .map_err(map_service_error)?; + let workspace = workspaces + .into_iter() + .find(|ws| ws.id == id) + .ok_or(ApiError::not_found("workspace_not_found"))?; + Ok(Json(to_response(workspace))) +} + +#[utoipa::path( + put, + path = "/api/workspaces/{id}", + tag = "Workspaces", + params(("id" = Uuid, Path, description = "Workspace ID")), + request_body = UpdateWorkspaceRequest, + responses((status = 200, body = WorkspaceResponse)) +)] +pub async fn update_workspace( + State(ctx): State, + auth: AuthedUser, + Path(id): Path, + Json(payload): Json, +) -> Result, ApiError> { + if payload + .name + .as_deref() + .is_some_and(|name| name.trim().is_empty()) + { + return Err(ApiError::bad_request("invalid_workspace_name")); + } + require_permission(&ctx, id, auth.user_id, PERM_WORKSPACE_UPDATE).await?; + let normalized_name = payload + .name + .as_ref() + .map(|value| value.trim()) + .filter(|value| !value.is_empty()) + .map(|value| value.to_string()); + let normalized_icon = payload + .icon + .as_ref() + .map(|value| value.trim()) + .map(|value| value.to_string()); + let normalized_description = payload + .description + .as_ref() + .map(|value| value.trim()) + .map(|value| value.to_string()); + let updated = ctx + .workspace_service() + .update_workspace( + id, + normalized_name.as_deref(), + normalized_icon.as_deref(), + normalized_description.as_deref(), + ) + .await + .map_err(map_service_error)? + .ok_or(ApiError::not_found("workspace_not_found"))?; + + let memberships = ctx + .workspace_service() + .list_for_user(auth.user_id) + .await + .map_err(map_service_error)?; + let mut membership = memberships + .into_iter() + .find(|ws| ws.id == id) + .ok_or(ApiError::forbidden("forbidden"))?; + membership.name = updated.name; + membership.icon = updated.icon; + membership.description = updated.description; + membership.slug = updated.slug; + Ok(Json(to_response(membership))) +} + +#[utoipa::path( + delete, + path = "/api/workspaces/{id}", + tag = "Workspaces", + params(("id" = Uuid, Path, description = "Workspace ID")), + responses((status = 204)) +)] +pub async fn delete_workspace( + State(ctx): State, + auth: AuthedUser, + Path(id): Path, +) -> Result { + require_permission(&ctx, id, auth.user_id, PERM_WORKSPACE_DELETE).await?; + let workspace = ctx + .workspace_service() + .get_workspace(id) + .await + .map_err(map_service_error)? + .ok_or(ApiError::not_found("workspace_not_found"))?; + if workspace.is_personal { + return Err(ApiError::bad_request("cannot_delete_personal_workspace")); + } + let members = ctx + .workspace_service() + .list_members(id) + .await + .map_err(map_service_error)?; + if members.iter().any(|member| member.is_default) { + return Err(ApiError::conflict("workspace_has_default_member")); + } + ctx.workspace_service() + .delete_workspace(id) + .await + .map_err(map_service_error)?; + Ok(StatusCode::NO_CONTENT) +} + +#[utoipa::path( + post, + path = "/api/workspaces/{id}/leave", + tag = "Workspaces", + params(("id" = Uuid, Path, description = "Workspace ID")), + responses((status = 204)) +)] +pub async fn leave_workspace( + State(ctx): State, + auth: AuthedUser, + Path(workspace_id): Path, +) -> Result { + ctx.workspace_service() + .leave_workspace(workspace_id, auth.user_id) + .await + .map_err(map_service_error)?; + Ok(StatusCode::NO_CONTENT) +} + +#[utoipa::path( + post, + path = "/api/workspaces/{id}/switch", + tag = "Workspaces", + params(("id" = Uuid, Path, description = "Workspace ID")), + responses((status = 200, body = SwitchWorkspaceResponse)) +)] +pub async fn switch_workspace( + State(ctx): State, + auth: AuthedUser, + headers: HeaderMap, + Path(id): Path, +) -> Result<(HeaderMap, Json), ApiError> { + ctx.workspace_service() + .set_default_workspace(auth.user_id, id) + .await + .map_err(map_service_error)?; + let client_ip = extract_client_ip(&headers); + let user_agent = extract_user_agent(&headers); + let session_service = ctx.session_service(); + let mut issued = None; + if let Some(refresh_token) = extract_refresh_token(&headers) { + match session_service + .refresh_session( + &refresh_token, + Some(id), + SessionMetadata { + user_agent, + ip_address: client_ip.as_deref(), + }, + ) + .await + { + Ok(bundle) => issued = Some(bundle), + Err(ServiceError::Unauthorized | ServiceError::TokenExpired) => { + issued = None; + } + Err(err) => return Err(auth::map_auth_error(err)), + } + } + let issued = match issued { + Some(bundle) => bundle, + None => session_service + .issue_new_session( + auth.user_id, + id, + false, + SessionMetadata { + user_agent, + ip_address: client_ip.as_deref(), + }, + ) + .await + .map_err(auth::map_auth_error)?, + }; + let mut response_headers = HeaderMap::new(); + apply_session_cookies( + ctx.auth_service().as_ref(), + ctx.cfg.session_cookie_secure, + &mut response_headers, + &issued, + ); + Ok(( + response_headers, + Json(SwitchWorkspaceResponse { + access_token: issued.access.token, + }), + )) +} + +#[utoipa::path( + get, + path = "/api/workspaces/{id}/download", + tag = "Workspaces", + params( + ("id" = Uuid, Path, description = "Workspace ID"), + ("format" = Option, Query, description = "Download format (archive only)") + ), + responses( + (status = 200, description = "Workspace download", body = DocumentDownloadBinary, content_type = "application/octet-stream"), + (status = 401, description = "Unauthorized"), + (status = 404, description = "Workspace not found") + ) +)] +pub async fn download_workspace_archive( + State(ctx): State, + auth: AuthedUser, + Path(id): Path, + Query(params): Query, +) -> Result { + require_permission(&ctx, id, auth.user_id, PERM_DOC_VIEW).await?; + + let workspace = ctx + .workspace_service() + .get_workspace(id) + .await + .map_err(map_service_error)? + .ok_or(ApiError::not_found("workspace_not_found"))?; + + let actor = access::Actor::User(auth.user_id); + let download = ctx + .document_service() + .download_workspace_root(&actor, id, &workspace.name, params.format.into()) + .await + .map_err(|err| match err { + ServiceError::Unauthorized | ServiceError::TokenExpired | ServiceError::Forbidden => { + ApiError::forbidden("forbidden") + } + ServiceError::Conflict | ServiceError::NotFound => ApiError::not_found("not_found"), + ServiceError::BadRequest(code) => ApiError::bad_request(code).with_message(code), + ServiceError::Unexpected(inner) => { + tracing::error!(error = ?inner, workspace_id = %id, "workspace_download_failed"); + ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "internal_error") + } + })?; + + let mut headers = HeaderMap::new(); + let content_type = HeaderValue::from_str(&download.content_type) + .map_err(|_| ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "invalid_header"))?; + headers.insert(axum::http::header::CONTENT_TYPE, content_type); + headers.insert( + axum::http::header::HeaderName::from_static("x-content-type-options"), + HeaderValue::from_static("nosniff"), + ); + let disposition = format!("attachment; filename=\"{}\"", download.filename); + let content_disposition = HeaderValue::from_str(&disposition) + .map_err(|_| ApiError::new(StatusCode::INTERNAL_SERVER_ERROR, "invalid_header"))?; + headers.insert(axum::http::header::CONTENT_DISPOSITION, content_disposition); + + Ok((headers, download.bytes).into_response()) +} diff --git a/api/src/presentation/mod.rs b/api/crates/presentation/src/lib.rs similarity index 55% rename from api/src/presentation/mod.rs rename to api/crates/presentation/src/lib.rs index a6b6c029..dafefc12 100644 --- a/api/src/presentation/mod.rs +++ b/api/crates/presentation/src/lib.rs @@ -1,3 +1,5 @@ pub mod context; pub mod http; +pub mod openapi; +pub mod security; pub mod ws; diff --git a/api/crates/presentation/src/openapi.rs b/api/crates/presentation/src/openapi.rs new file mode 100644 index 00000000..a1cb4c80 --- /dev/null +++ b/api/crates/presentation/src/openapi.rs @@ -0,0 +1,258 @@ +use utoipa::OpenApi; + +use crate::http::core::{health, markdown, storage_ingest}; +use crate::http::documents::files; +use crate::http::documents::{publishing as public, sharing as shares, tagging as tags}; +use crate::http::identity::{api_tokens, auth, shortcuts}; +use crate::http::{documents, git, plugins, workspaces}; +use crate::ws; + +#[derive(OpenApi)] +#[openapi( + paths( + auth::openapi::register, + auth::openapi::login, + auth::openapi::oauth_state, + auth::openapi::oauth_login, + auth::openapi::list_oauth_providers, + auth::openapi::refresh_session, + auth::openapi::logout, + auth::openapi::me, + auth::openapi::list_sessions, + auth::openapi::revoke_session, + api_tokens::openapi::list_api_tokens, + api_tokens::openapi::create_api_token, + api_tokens::openapi::revoke_api_token, + shortcuts::openapi::get_user_shortcuts, + shortcuts::openapi::update_user_shortcuts, + auth::openapi::delete_account, + ws::documents::yjs::openapi::axum_ws_entry, + tags::openapi::list_tags, + documents::openapi::list_documents, + documents::openapi::create_document, + documents::openapi::get_document, + documents::openapi::update_document, + documents::openapi::duplicate_document, + documents::openapi::delete_document, + documents::openapi::get_document_content, + documents::openapi::update_document_content, + documents::openapi::patch_document_content, + documents::openapi::archive_document, + documents::openapi::unarchive_document, + documents::openapi::download_document, + documents::openapi::list_document_snapshots, + documents::openapi::get_document_snapshot_diff, + documents::openapi::restore_document_snapshot, + documents::openapi::download_document_snapshot, + documents::openapi::search_documents, + documents::openapi::get_backlinks, + documents::openapi::get_outgoing_links, + files::openapi::upload_file, + files::openapi::get_file, + files::openapi::get_file_by_name, + shares::openapi::create_share, + shares::openapi::delete_share, + shares::openapi::list_document_shares, + shares::openapi::validate_share_token, + shares::openapi::browse_share, + shares::openapi::list_active_shares, + shares::openapi::create_share_mount, + shares::openapi::list_share_mounts, + shares::openapi::delete_share_mount, + shares::openapi::list_applicable_shares, + shares::openapi::materialize_folder_share, + public::openapi::publish_document, + public::openapi::unpublish_document, + public::openapi::get_publish_status, + public::openapi::list_workspace_public_documents, + public::openapi::get_public_by_workspace_and_id, + public::openapi::get_public_content_by_workspace_and_id, + git::openapi::get_config, + git::openapi::create_or_update_config, + git::openapi::delete_config, + git::openapi::get_status, + git::openapi::get_changes, + git::openapi::get_history, + git::openapi::get_working_diff, + git::openapi::get_commit_diff, + git::openapi::sync_now, + git::openapi::import_repository, + git::openapi::pull_repository, + git::openapi::start_pull_session, + git::openapi::get_pull_session, + git::openapi::resolve_pull_session, + git::openapi::finalize_pull_session, + git::openapi::init_repository, + git::openapi::deinit_repository, + git::openapi::ignore_document, + git::openapi::ignore_folder, + git::openapi::get_gitignore_patterns, + git::openapi::add_gitignore_patterns, + git::openapi::check_path_ignored, + storage_ingest::openapi::enqueue_ingest_events, + markdown::openapi::render_markdown, + markdown::openapi::render_markdown_many, + plugins::openapi::get_manifest, + plugins::openapi::exec_action, + plugins::openapi::list_records, + plugins::openapi::create_record, + plugins::openapi::update_record, + plugins::openapi::delete_record, + plugins::openapi::get_kv_value, + plugins::openapi::put_kv_value, + plugins::openapi::install_from_url, + plugins::openapi::uninstall, + plugins::openapi::sse_updates, + plugins::openapi::get_plugin_asset, + workspaces::openapi::list_workspaces, + workspaces::openapi::create_workspace, + workspaces::openapi::get_workspace_detail, + workspaces::openapi::update_workspace, + workspaces::openapi::delete_workspace, + workspaces::openapi::leave_workspace, + workspaces::openapi::switch_workspace, + workspaces::openapi::list_members, + workspaces::openapi::update_member_role, + workspaces::openapi::remove_member, + workspaces::openapi::get_workspace_permissions, + workspaces::openapi::list_roles, + workspaces::openapi::create_role, + workspaces::openapi::update_role, + workspaces::openapi::delete_role, + workspaces::openapi::list_invitations, + workspaces::openapi::create_invitation, + workspaces::openapi::revoke_invitation, + workspaces::openapi::accept_invitation, + workspaces::openapi::download_workspace_archive, + health::openapi::health, + ), + components(schemas( + auth::RegisterRequest, + auth::LoginRequest, + auth::LoginResponse, + auth::OAuthLoginRequest, + auth::OAuthStateResponse, + auth::AuthProvidersResponse, + auth::AuthProviderInfoResponse, + auth::UserResponse, + auth::WorkspaceMembershipResponse, + auth::SessionResponse, + auth::RefreshResponse, + api_tokens::ApiTokenItem, + api_tokens::ApiTokenCreateRequest, + api_tokens::ApiTokenCreateResponse, + shortcuts::UserShortcutResponse, + shortcuts::UpdateUserShortcutRequest, + tags::TagItem, + documents::Document, + documents::DocumentListResponse, + documents::CreateDocumentRequest, + documents::UpdateDocumentRequest, + documents::DuplicateDocumentRequest, + documents::UpdateDocumentContentRequest, + documents::DocumentPatchOperationRequest, + documents::PatchDocumentContentRequest, + documents::SearchResult, + documents::BacklinkInfo, + documents::BacklinksResponse, + documents::OutgoingLink, + documents::OutgoingLinksResponse, + documents::DocumentDownloadBinary, + documents::DocumentArchiveBinary, + documents::DownloadFormat, + documents::DownloadDocumentQuery, + documents::SnapshotSummary, + documents::SnapshotListResponse, + documents::SnapshotDiffKind, + documents::SnapshotDiffSideResponse, + documents::SnapshotDiffResponse, + documents::SnapshotDiffBaseParam, + documents::SnapshotRestoreResponse, + files::UploadFileResponse, + files::UploadFileMultipart, + shares::CreateShareRequest, + shares::CreateShareResponse, + shares::CreateShareMountRequest, + shares::ShareItem, + shares::ShareDocumentResponse, + shares::ShareBrowseTreeItem, + shares::ShareBrowseResponse, + shares::ApplicableShareItem, + shares::ActiveShareItem, + shares::ShareMountItem, + shares::MaterializeResponse, + public::PublishResponse, + public::PublicDocumentSummary, + git::GitConfigResponse, + git::GitRemoteCheckResponse, + git::CreateGitConfigRequest, + git::UpdateGitConfigRequest, + git::GitStatus, + git::GitSyncRequest, + git::GitSyncResponse, + git::GitPullRequest, + git::GitPullResponse, + git::GitImportResponse, + git::GitPullSessionResponse, + git::GitPullResolution, + git::GitPullConflictItem, + git::GitChangeItem, + git::GitChangesResponse, + git::GitCommitItem, + git::GitHistoryResponse, + contracts::core::dtos::TextDiffLineType, + contracts::core::dtos::TextDiffLine, + contracts::core::dtos::TextDiffResult, + git::AddPatternsRequest, + git::CheckIgnoredRequest, + markdown::RenderOptionsPayload, + markdown::PlaceholderItemPayload, + markdown::RenderResponseBody, + markdown::RenderRequest, + markdown::RenderManyRequest, + markdown::RenderManyResponse, + plugins::ManifestItem, + plugins::RecordsResponse, + plugins::CreateRecordBody, + plugins::UpdateRecordBody, + plugins::KvValueResponse, + plugins::KvValueBody, + plugins::ExecBody, + plugins::ExecResultResponse, + plugins::InstallFromUrlBody, + plugins::InstallResponse, + plugins::UninstallBody, + workspaces::WorkspaceResponse, + workspaces::CreateWorkspaceRequest, + workspaces::WorkspaceMemberResponse, + workspaces::UpdateMemberRoleRequest, + workspaces::UpdateWorkspaceRequest, + workspaces::WorkspaceRoleResponse, + workspaces::PermissionOverridePayload, + workspaces::CreateWorkspaceRoleRequest, + workspaces::UpdateWorkspaceRoleRequest, + workspaces::SwitchWorkspaceResponse, + workspaces::WorkspacePermissionsResponse, + workspaces::WorkspaceInvitationResponse, + workspaces::CreateWorkspaceInvitationRequest, + workspaces::DownloadWorkspaceQuery, + storage_ingest::IngestBatchRequest, + storage_ingest::IngestEventRequest, + storage_ingest::IngestKindParam, + health::HealthResp, + )), + tags( + (name = "Auth", description = "Authentication"), + (name = "Documents", description = "Documents management"), + (name = "Files", description = "File management"), + (name = "Sharing", description = "Document sharing"), + (name = "Public Documents", description = "Public pages"), + (name = "Realtime", description = "Yjs WebSocket endpoint (/yjs/:id)"), + (name = "Git", description = "Git integration"), + (name = "Markdown", description = "Markdown rendering"), + (name = "Plugins", description = "Plugins management & data APIs"), + (name = "Storage", description = "Storage ingest APIs"), + (name = "Health", description = "System health checks") + ) +)] +pub struct ApiDoc; diff --git a/api/crates/presentation/src/security/mod.rs b/api/crates/presentation/src/security/mod.rs new file mode 100644 index 00000000..ba0c4a94 --- /dev/null +++ b/api/crates/presentation/src/security/mod.rs @@ -0,0 +1,2 @@ +pub mod request_status; +pub mod token; diff --git a/api/crates/presentation/src/security/request_status.rs b/api/crates/presentation/src/security/request_status.rs new file mode 100644 index 00000000..974acc6a --- /dev/null +++ b/api/crates/presentation/src/security/request_status.rs @@ -0,0 +1,28 @@ +use std::cell::Cell; + +use axum::http::{Request, StatusCode, header}; +use axum::{body::Body, middleware::Next, response::Response}; + +tokio::task_local! { + static TOKEN_EXPIRED_FLAG: Cell; +} + +pub fn mark_token_expired() { + let _ = TOKEN_EXPIRED_FLAG.try_with(|flag| flag.set(true)); +} + +pub async fn middleware(req: Request, next: Next) -> Response { + TOKEN_EXPIRED_FLAG + .scope(Cell::new(false), async move { + let mut response = next.run(req).await; + let expired = TOKEN_EXPIRED_FLAG.with(|flag| flag.get()); + if expired && response.status() == StatusCode::UNAUTHORIZED { + response.headers_mut().insert( + header::WWW_AUTHENTICATE, + header::HeaderValue::from_static("Bearer error=\"token_expired\""), + ); + } + response + }) + .await +} diff --git a/api/crates/presentation/src/security/token.rs b/api/crates/presentation/src/security/token.rs new file mode 100644 index 00000000..6e7f4884 --- /dev/null +++ b/api/crates/presentation/src/security/token.rs @@ -0,0 +1,160 @@ +use application::core::services::{access, errors::ServiceError}; +use axum::extract::FromRequestParts; +use axum::http::HeaderMap; +use axum::http::request::Parts; +use tracing::error; +use uuid::Uuid; + +use crate::context::HasAuthServices; +use crate::http::error::ApiError; +use crate::security::request_status; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ActorResolveError { + TokenExpired, + Unauthorized, +} + +#[derive(Debug, Clone)] +pub struct Bearer(pub String); + +#[derive(Debug, Clone)] +pub struct AccessTokenOverride(pub String); + +fn get_cookie(cookie_header: &str, name: &str) -> Option { + for part in cookie_header.split(';') { + let kv = part.trim(); + if let Some((k, v)) = kv.split_once('=') + && k.trim() == name + { + return Some(v.trim().to_string()); + } + } + None +} + +fn extract_bearer_token(headers: &HeaderMap) -> Option { + // Prefer the session cookie if present to avoid accidentally overriding it + // with other Bearer values (e.g. share tokens) that might be sent by the + // client. + if let Some(cookie) = headers + .get(axum::http::header::COOKIE) + .and_then(|v| v.to_str().ok()) + && let Some(token) = get_cookie(cookie, "access_token") + && !token.trim().is_empty() + { + return Some(token); + } + + if let Some(auth) = headers + .get(axum::http::header::AUTHORIZATION) + .and_then(|v| v.to_str().ok()) + && let Some(t) = auth.strip_prefix("Bearer ") + && let trimmed = t.trim() + && !trimmed.is_empty() + { + return Some(trimmed.to_string()); + } + None +} + +pub fn bearer_from_headers(headers: &HeaderMap) -> Option { + extract_bearer_token(headers).map(Bearer) +} + +#[axum::async_trait] +impl FromRequestParts for Bearer +where + S: Send + Sync, +{ + type Rejection = ApiError; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + if let Some(token) = parts.extensions.get::() { + return Ok(Bearer(token.0.clone())); + } + extract_bearer_token(&parts.headers) + .map(Bearer) + .ok_or(ApiError::unauthorized("unauthorized")) + } +} + +pub fn map_actor_error(err: ActorResolveError) -> ApiError { + match err { + ActorResolveError::TokenExpired => ApiError::unauthorized("token_expired"), + ActorResolveError::Unauthorized => ApiError::unauthorized("unauthorized"), + } +} + +pub async fn resolve_actor_from_token_str( + ctx: &impl HasAuthServices, + token: &str, +) -> Result { + let trimmed = token.trim(); + if trimmed.is_empty() { + return Err(ActorResolveError::Unauthorized); + } + + let service = ctx.auth_service(); + match service.subject_from_token(trimmed).await { + Ok(Some(sub)) => { + if let Ok(uid) = Uuid::parse_str(&sub) { + if let Some(session_id) = service.session_id_from_token_claim(trimmed) + && let Err(err) = ctx + .session_service() + .ensure_session_active(session_id) + .await + { + if err.is_internal() { + error!(error = ?err, "session_validation_failed"); + } + return Err(ActorResolveError::Unauthorized); + } + Ok(access::Actor::User(uid)) + } else { + Ok(access::Actor::Public) + } + } + Ok(None) => Ok(access::Actor::ShareToken(trimmed.to_string())), + Err(ServiceError::TokenExpired) => { + request_status::mark_token_expired(); + Err(ActorResolveError::TokenExpired) + } + Err(err) => { + if err.is_internal() { + error!(error = ?err, "token_validation_failed"); + } + Err(ActorResolveError::Unauthorized) + } + } +} + +pub async fn resolve_actor_from_parts( + ctx: &impl HasAuthServices, + bearer: Option, + share_token: Option<&str>, +) -> Result, ActorResolveError> { + if let Some(token) = share_token + && let Ok(actor) = resolve_actor_from_token_str(ctx, token).await + { + return Ok(Some(actor)); + } + + if let Some(b) = bearer + && let Ok(actor) = resolve_actor_from_token_str(ctx, &b.0).await + { + return Ok(Some(actor)); + } + + Ok(None) +} + +pub async fn require_user_id( + ctx: &impl HasAuthServices, + bearer: Bearer, +) -> Result { + match resolve_actor_from_token_str(ctx, &bearer.0).await? { + access::Actor::User(user_id) => Ok(user_id), + _ => Err(ActorResolveError::Unauthorized), + } +} diff --git a/api/crates/presentation/src/ws/documents/mod.rs b/api/crates/presentation/src/ws/documents/mod.rs new file mode 100644 index 00000000..218a5599 --- /dev/null +++ b/api/crates/presentation/src/ws/documents/mod.rs @@ -0,0 +1 @@ +pub mod yjs; diff --git a/api/src/presentation/ws/ws.rs b/api/crates/presentation/src/ws/documents/yjs.rs similarity index 76% rename from api/src/presentation/ws/ws.rs rename to api/crates/presentation/src/ws/documents/yjs.rs index 5893e2e6..a4913468 100644 --- a/api/src/presentation/ws/ws.rs +++ b/api/crates/presentation/src/ws/documents/yjs.rs @@ -1,10 +1,12 @@ use std::pin::Pin; -use std::sync::Arc; -use crate::application::access::Capability; -use crate::application::ports::realtime_port::RealtimeError; -use crate::presentation::context::{AppContext, DynRealtimeSink, DynRealtimeStream}; -use crate::presentation::http::auth::{self, AccessTokenOverride}; +use crate::context::{DynRealtimeSink, DynRealtimeStream, WsContext}; +use crate::security::request_status; +use crate::security::token::{ + AccessTokenOverride, ActorResolveError, bearer_from_headers, resolve_actor_from_token_str, +}; +use application::core::services::access::Capability; +use application::documents::ports::realtime::realtime_port::RealtimeError; use axum::extract::ws::{Message as AxumMessage, WebSocket, WebSocketUpgrade}; use axum::extract::{Extension, Path, Query, State}; use axum::http::HeaderMap; @@ -12,7 +14,6 @@ use axum::http::StatusCode; use axum::response::IntoResponse; use futures_util::{Sink, Stream, StreamExt}; use serde::Deserialize; -use tokio::sync::Mutex; use uuid::Uuid; #[derive(Debug, Deserialize, Clone)] @@ -21,8 +22,6 @@ pub struct AuthQuery { pub access_token: Option, } -// Uses AppContext as router state - #[utoipa::path( get, path = "/api/yjs/{id}", @@ -42,53 +41,43 @@ pub async fn axum_ws_entry( ws: WebSocketUpgrade, Query(query): Query, headers: HeaderMap, - State(state): State, + State(state): State, override_token: Option>, ) -> Result { let token = override_token.map(|Extension(t)| t.0).or_else(|| { query .token .or(query.access_token) - .or_else(|| { - headers - .get(axum::http::header::AUTHORIZATION) - .and_then(|h| h.to_str().ok().map(|s| s.to_owned())) - .and_then(|s| s.strip_prefix("Bearer ").map(|s| s.to_string())) - }) - .or_else(|| { - // Fallback to cookie `access_token` - headers - .get(axum::http::header::COOKIE) - .and_then(|h| h.to_str().ok()) - .and_then(|cookie_hdr| { - for part in cookie_hdr.split(';') { - let kv = part.trim(); - if let Some((k, v)) = kv.split_once('=') { - if k.trim() == "access_token" { - return Some(v.trim().to_string()); - } - } - } - None - }) - }) + .or_else(|| bearer_from_headers(&headers).map(|b| b.0)) }); // Try to parse document ID let doc_uuid = Uuid::parse_str(&doc_id).map_err(|_| StatusCode::UNAUTHORIZED)?; // Resolve actor capability - let actor = if let Some(token_str) = token.as_deref() { - auth::resolve_actor_from_token_str(&state, token_str).await - } else { - None - } - .ok_or(StatusCode::UNAUTHORIZED)?; + let actor = match token.as_deref() { + Some(token_str) => match resolve_actor_from_token_str(&state, token_str).await { + Ok(actor) => actor, + Err(ActorResolveError::TokenExpired) => { + request_status::mark_token_expired(); + return Err(StatusCode::UNAUTHORIZED); + } + Err(ActorResolveError::Unauthorized) => return Err(StatusCode::UNAUTHORIZED), + }, + None => return Err(StatusCode::UNAUTHORIZED), + }; let cap = state .authorization() .resolve_document(&actor, doc_uuid) - .await; + .await + .map_err(|err| { + if err.is_internal() { + StatusCode::INTERNAL_SERVER_ERROR + } else { + StatusCode::UNAUTHORIZED + } + })?; if cap == Capability::None { return Err(StatusCode::UNAUTHORIZED); } @@ -98,6 +87,10 @@ pub async fn axum_ws_entry( Ok(ws.on_upgrade(move |socket| peer_axum(doc_id, socket, ctx, can_edit))) } +pub mod openapi { + pub use super::*; +} + // WebSocket <-> Vec sink adapter struct WsBinarySink { inner: futures_util::stream::SplitSink, @@ -180,13 +173,12 @@ impl Stream for WsBinaryStream { } // WS peer using Axum WebSocket -async fn peer_axum(doc_id: String, ws: WebSocket, ctx: AppContext, can_edit: bool) { +async fn peer_axum(doc_id: String, ws: WebSocket, ctx: WsContext, can_edit: bool) { tracing::debug!(%doc_id, "WS peer:upgrade"); let (sink_raw, stream_raw) = ws.split(); let sink_box: Pin> = Box::pin(WsBinarySink { inner: sink_raw }); - let sink_dyn: DynRealtimeSink = Arc::new(Mutex::new( - sink_box as Pin, Error = RealtimeError> + Send + Sync>>, - )); + let sink_dyn: DynRealtimeSink = + sink_box as Pin, Error = RealtimeError> + Send + Sync>>; let stream_box: Pin> = Box::pin(WsBinaryStream { inner: stream_raw }); let stream_dyn: DynRealtimeStream = stream_box as Pin, RealtimeError>> + Send + Sync>>; diff --git a/api/crates/presentation/src/ws/mod.rs b/api/crates/presentation/src/ws/mod.rs new file mode 100644 index 00000000..487a38d5 --- /dev/null +++ b/api/crates/presentation/src/ws/mod.rs @@ -0,0 +1 @@ +pub mod documents; diff --git a/api/docker/entrypoint.sh b/api/entrypoint.sh similarity index 100% rename from api/docker/entrypoint.sh rename to api/entrypoint.sh diff --git a/api/openapi.json b/api/openapi.json new file mode 100644 index 00000000..a296188a --- /dev/null +++ b/api/openapi.json @@ -0,0 +1 @@ +{"openapi":"3.0.3","info":{"title":"presentation","description":"","license":{"name":""},"version":"0.1.0"},"paths":{"/api/auth/login":{"post":{"tags":["Auth"],"operationId":"login","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/LoginRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/LoginResponse"}}}}},"security":[{}]}},"/api/auth/logout":{"post":{"tags":["Auth"],"operationId":"logout","responses":{"204":{"description":""}}}},"/api/auth/me":{"get":{"tags":["Auth"],"operationId":"me","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/UserResponse"}}}}}},"delete":{"tags":["Auth"],"operationId":"delete_account","responses":{"204":{"description":""}}}},"/api/auth/oauth/{provider}":{"post":{"tags":["Auth"],"operationId":"oauth_login","parameters":[{"name":"provider","in":"path","description":"OAuth provider identifier (e.g., google)","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/OAuthLoginRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/LoginResponse"}}}}},"security":[{}]}},"/api/auth/oauth/{provider}/state":{"post":{"tags":["Auth"],"operationId":"oauth_state","parameters":[{"name":"provider","in":"path","description":"OAuth provider identifier","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/OAuthStateResponse"}}}}},"security":[{}]}},"/api/auth/providers":{"get":{"tags":["Auth"],"operationId":"list_oauth_providers","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/AuthProvidersResponse"}}}}},"security":[{}]}},"/api/auth/refresh":{"post":{"tags":["Auth"],"operationId":"refresh_session","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/RefreshResponse"}}}}}}},"/api/auth/register":{"post":{"tags":["Auth"],"operationId":"register","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/RegisterRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/UserResponse"}}}}},"security":[{}]}},"/api/auth/sessions":{"get":{"tags":["Auth"],"operationId":"list_sessions","responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/SessionResponse"}}}}}}}},"/api/auth/sessions/{id}":{"delete":{"tags":["Auth"],"operationId":"revoke_session","parameters":[{"name":"id","in":"path","description":"Session ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}}},"/api/documents":{"get":{"tags":["Documents"],"operationId":"list_documents","parameters":[{"name":"query","in":"query","description":"Search query","required":false,"schema":{"type":"string","nullable":true}},{"name":"tag","in":"query","description":"Filter by tag","required":false,"schema":{"type":"string","nullable":true}},{"name":"state","in":"query","description":"Filter by document state (active|archived|all)","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DocumentListResponse"}}}}}},"post":{"tags":["Documents"],"operationId":"create_document","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateDocumentRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}}},"/api/documents/search":{"get":{"tags":["Documents"],"operationId":"search_documents","parameters":[{"name":"q","in":"query","description":"Query","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/SearchResult"}}}}}}}},"/api/documents/{id}":{"get":{"tags":["Documents"],"operationId":"get_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}},"delete":{"tags":["Documents"],"operationId":"delete_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}},"patch":{"tags":["Documents"],"operationId":"update_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateDocumentRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}}},"/api/documents/{id}/archive":{"post":{"tags":["Documents"],"operationId":"archive_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}},"404":{"description":"Document not found"},"409":{"description":"Document already archived"}}}},"/api/documents/{id}/backlinks":{"get":{"tags":["Documents"],"operationId":"getBacklinks","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/BacklinksResponse"}}}}}}},"/api/documents/{id}/content":{"get":{"tags":["Documents"],"operationId":"get_document_content","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":""}}},"put":{"tags":["Documents"],"operationId":"update_document_content","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateDocumentContentRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}},"patch":{"tags":["Documents"],"operationId":"patch_document_content","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PatchDocumentContentRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}}},"/api/documents/{id}/download":{"get":{"tags":["Documents"],"operationId":"download_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}},{"name":"format","in":"query","description":"Download format (see schema for supported values)","required":false,"schema":{"allOf":[{"$ref":"#/components/schemas/DownloadFormat"}],"nullable":true}}],"responses":{"200":{"description":"Document download","content":{"application/octet-stream":{"schema":{"$ref":"#/components/schemas/DocumentDownloadBinary"}}}},"401":{"description":"Unauthorized"},"404":{"description":"Document not found"}}}},"/api/documents/{id}/duplicate":{"post":{"tags":["Documents"],"operationId":"duplicate_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/DuplicateDocumentRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}}},"/api/documents/{id}/links":{"get":{"tags":["Documents"],"operationId":"getOutgoingLinks","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/OutgoingLinksResponse"}}}}}}},"/api/documents/{id}/snapshots":{"get":{"tags":["Documents"],"operationId":"list_document_snapshots","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}},{"name":"limit","in":"query","description":"Maximum number of snapshots to return","required":false,"schema":{"type":"integer","format":"int64","nullable":true}},{"name":"offset","in":"query","description":"Offset for pagination","required":false,"schema":{"type":"integer","format":"int64","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/SnapshotListResponse"}}}}}}},"/api/documents/{id}/snapshots/{snapshot_id}/diff":{"get":{"tags":["Documents"],"operationId":"get_document_snapshot_diff","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"snapshot_id","in":"path","description":"Snapshot ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}},{"name":"compare","in":"query","description":"Snapshot ID to compare against (defaults to current document state)","required":false,"schema":{"type":"string","format":"uuid","nullable":true}},{"name":"base","in":"query","description":"Base comparison to use when compare is not provided (auto|current|previous)","required":false,"schema":{"allOf":[{"$ref":"#/components/schemas/SnapshotDiffBaseParam"}],"nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/SnapshotDiffResponse"}}}}}}},"/api/documents/{id}/snapshots/{snapshot_id}/download":{"get":{"tags":["Documents"],"operationId":"download_document_snapshot","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"snapshot_id","in":"path","description":"Snapshot ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"Snapshot archive","content":{"application/zip":{"schema":{"$ref":"#/components/schemas/DocumentArchiveBinary"}}}},"401":{"description":"Unauthorized"},"404":{"description":"Snapshot not found"}}}},"/api/documents/{id}/snapshots/{snapshot_id}/restore":{"post":{"tags":["Documents"],"operationId":"restore_document_snapshot","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"snapshot_id","in":"path","description":"Snapshot ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/SnapshotRestoreResponse"}}}}}}},"/api/documents/{id}/unarchive":{"post":{"tags":["Documents"],"operationId":"unarchive_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}},"404":{"description":"Document not found"},"409":{"description":"Document is not archived"}}}},"/api/files":{"post":{"tags":["Files"],"operationId":"upload_file","requestBody":{"content":{"multipart/form-data":{"schema":{"$ref":"#/components/schemas/UploadFileMultipart"}}},"required":true},"responses":{"201":{"description":"File uploaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/UploadFileResponse"}}}}}}},"/api/files/documents/{filename}":{"get":{"tags":["Files"],"operationId":"get_file_by_name","parameters":[{"name":"filename","in":"path","description":"File name","required":true,"schema":{"type":"string"}},{"name":"document_id","in":"query","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"OK","content":{"application/octet-stream":{"schema":{"type":"string","format":"binary"}}}}}}},"/api/files/{id}":{"get":{"tags":["Files"],"operationId":"get_file","parameters":[{"name":"id","in":"path","description":"File ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"OK","content":{"application/octet-stream":{"schema":{"type":"string","format":"binary"}}}}}}},"/api/git/changes":{"get":{"tags":["Git"],"operationId":"get_changes","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitChangesResponse"}}}}}}},"/api/git/config":{"get":{"tags":["Git"],"operationId":"get_config","responses":{"200":{"description":"","content":{"application/json":{"schema":{"allOf":[{"$ref":"#/components/schemas/GitConfigResponse"}],"nullable":true}}}}}},"post":{"tags":["Git"],"operationId":"create_or_update_config","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateGitConfigRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitConfigResponse"}}}}}},"delete":{"tags":["Git"],"operationId":"delete_config","responses":{"204":{"description":"Deleted"}}}},"/api/git/deinit":{"post":{"tags":["Git"],"operationId":"deinit_repository","responses":{"200":{"description":"OK"}}}},"/api/git/diff/commits/{from}/{to}":{"get":{"tags":["Git"],"operationId":"get_commit_diff","parameters":[{"name":"from","in":"path","description":"From","required":true,"schema":{"type":"string"}},{"name":"to","in":"path","description":"To","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/TextDiffResult"}}}}}}}},"/api/git/diff/working":{"get":{"tags":["Git"],"operationId":"get_working_diff","responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/TextDiffResult"}}}}}}}},"/api/git/gitignore/check":{"post":{"tags":["Git"],"operationId":"check_path_ignored","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CheckIgnoredRequest"}}},"required":true},"responses":{"200":{"description":"OK"}}}},"/api/git/gitignore/patterns":{"get":{"tags":["Git"],"operationId":"get_gitignore_patterns","responses":{"200":{"description":"OK"}}},"post":{"tags":["Git"],"operationId":"add_gitignore_patterns","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/AddPatternsRequest"}}},"required":true},"responses":{"200":{"description":"OK"}}}},"/api/git/history":{"get":{"tags":["Git"],"operationId":"get_history","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitHistoryResponse"}}}}}}},"/api/git/ignore/doc/{id}":{"post":{"tags":["Git"],"operationId":"ignore_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"OK"}}}},"/api/git/ignore/folder/{id}":{"post":{"tags":["Git"],"operationId":"ignore_folder","parameters":[{"name":"id","in":"path","description":"Folder ID","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"OK"}}}},"/api/git/import":{"post":{"tags":["Git"],"operationId":"import_repository","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateGitConfigRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitImportResponse"}}}}}}},"/api/git/init":{"post":{"tags":["Git"],"operationId":"init_repository","responses":{"200":{"description":"OK"}}}},"/api/git/pull":{"post":{"tags":["Git"],"operationId":"pull_repository","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullResponse"}}}},"409":{"description":"Conflicts detected","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullResponse"}}}}}}},"/api/git/pull/session/{id}":{"get":{"tags":["Git"],"operationId":"get_pull_session","parameters":[{"name":"id","in":"path","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}}}}},"/api/git/pull/session/{id}/finalize":{"post":{"tags":["Git"],"operationId":"finalize_pull_session","parameters":[{"name":"id","in":"path","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullResponse"}}}},"400":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullResponse"}}}},"409":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullResponse"}}}}}}},"/api/git/pull/session/{id}/resolve":{"post":{"tags":["Git"],"operationId":"resolve_pull_session","parameters":[{"name":"id","in":"path","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}},"400":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}},"409":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}}}}},"/api/git/pull/start":{"post":{"tags":["Git"],"operationId":"start_pull_session","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}},"400":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}},"409":{"description":"Conflicts detected","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}}}}},"/api/git/status":{"get":{"tags":["Git"],"operationId":"get_status","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitStatus"}}}}}}},"/api/git/sync":{"post":{"tags":["Git"],"operationId":"sync_now","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitSyncRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitSyncResponse"}}}},"409":{"description":"Conflicts during rebase/pull"}}}},"/api/health":{"get":{"tags":["Health"],"operationId":"health","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HealthResp"}}}}}}},"/api/markdown/render":{"post":{"tags":["Markdown"],"operationId":"render_markdown","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/RenderRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/RenderResponseBody"}}}}}}},"/api/markdown/render-many":{"post":{"tags":["Markdown"],"operationId":"render_markdown_many","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/RenderManyRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/RenderManyResponse"}}}}}}},"/api/me/api-tokens":{"get":{"tags":["Auth"],"operationId":"list_api_tokens","responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ApiTokenItem"}}}}}}},"post":{"tags":["Auth"],"operationId":"create_api_token","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ApiTokenCreateRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ApiTokenCreateResponse"}}}}}}},"/api/me/api-tokens/{id}":{"delete":{"tags":["Auth"],"operationId":"revoke_api_token","parameters":[{"name":"id","in":"path","description":"Token ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}}},"/api/me/plugins/install-from-url":{"post":{"tags":["Plugins"],"operationId":"pluginsInstallFromUrl","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/InstallFromUrlBody"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/InstallResponse"}}}}}}},"/api/me/plugins/manifest":{"get":{"tags":["Plugins"],"operationId":"pluginsGetManifest","responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ManifestItem"}}}}}}}},"/api/me/plugins/uninstall":{"post":{"tags":["Plugins"],"operationId":"pluginsUninstall","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UninstallBody"}}},"required":true},"responses":{"204":{"description":""}}}},"/api/me/plugins/updates":{"get":{"tags":["Plugins"],"operationId":"sse_updates","responses":{"200":{"description":"Plugin event stream"}}}},"/api/me/shortcuts":{"get":{"tags":["Auth"],"operationId":"get_user_shortcuts","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/UserShortcutResponse"}}}}}},"put":{"tags":["Auth"],"operationId":"update_user_shortcuts","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateUserShortcutRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/UserShortcutResponse"}}}}}}},"/api/plugin-assets":{"get":{"tags":["Plugins"],"operationId":"pluginsGetAsset","parameters":[{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"Plugin asset"}}}},"/api/plugins/{plugin}/docs/{doc_id}/kv/{key}":{"get":{"tags":["Plugins"],"operationId":"pluginsGetKv","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"doc_id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"key","in":"path","description":"Key","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/KvValueResponse"}}}}}},"put":{"tags":["Plugins"],"operationId":"pluginsPutKv","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"doc_id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"key","in":"path","description":"Key","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/KvValueBody"}}},"required":true},"responses":{"204":{"description":""}}}},"/api/plugins/{plugin}/docs/{doc_id}/records/{kind}":{"get":{"tags":["Plugins"],"operationId":"list_records","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"doc_id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"kind","in":"path","description":"Record kind","required":true,"schema":{"type":"string"}},{"name":"limit","in":"query","description":"Limit","required":false,"schema":{"type":"integer","format":"int64","nullable":true}},{"name":"offset","in":"query","description":"Offset","required":false,"schema":{"type":"integer","format":"int64","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/RecordsResponse"}}}}}},"post":{"tags":["Plugins"],"operationId":"pluginsCreateRecord","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"doc_id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"kind","in":"path","description":"Record kind","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateRecordBody"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{}}}}}}},"/api/plugins/{plugin}/exec/{action}":{"post":{"tags":["Plugins"],"operationId":"pluginsExecAction","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"action","in":"path","description":"Action","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ExecBody"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ExecResultResponse"}}}}}}},"/api/plugins/{plugin}/records/{id}":{"delete":{"tags":["Plugins"],"operationId":"pluginsDeleteRecord","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"id","in":"path","description":"Record ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}},"patch":{"tags":["Plugins"],"operationId":"pluginsUpdateRecord","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"id","in":"path","description":"Record ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateRecordBody"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{}}}}}}},"/api/public/documents/{id}":{"get":{"tags":["Public Documents"],"operationId":"get_publish_status","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"Published status","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PublishResponse"}}}}}},"post":{"tags":["Public Documents"],"operationId":"publish_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"Published","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PublishResponse"}}}}}},"delete":{"tags":["Public Documents"],"operationId":"unpublish_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":"Unpublished"}}}},"/api/public/workspaces/{slug}":{"get":{"tags":["Public Documents"],"operationId":"list_workspace_public_documents","parameters":[{"name":"slug","in":"path","description":"Workspace slug","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"Public documents for workspace","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/PublicDocumentSummary"}}}}}}}},"/api/public/workspaces/{slug}/{id}":{"get":{"tags":["Public Documents"],"operationId":"get_public_by_workspace_and_id","parameters":[{"name":"slug","in":"path","description":"Workspace slug","required":true,"schema":{"type":"string"}},{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"Document metadata","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}}},"/api/public/workspaces/{slug}/{id}/content":{"get":{"tags":["Public Documents"],"operationId":"get_public_content_by_workspace_and_id","parameters":[{"name":"slug","in":"path","description":"Workspace slug","required":true,"schema":{"type":"string"}},{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"Document content"}}}},"/api/shares":{"post":{"tags":["Sharing"],"operationId":"create_share","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateShareRequest"}}},"required":true},"responses":{"200":{"description":"Share link created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateShareResponse"}}}}}}},"/api/shares/active":{"get":{"tags":["Sharing"],"operationId":"list_active_shares","responses":{"200":{"description":"Active shares","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ActiveShareItem"}}}}}}}},"/api/shares/applicable":{"get":{"tags":["Sharing"],"operationId":"list_applicable_shares","parameters":[{"name":"doc_id","in":"query","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"Shares that include the document","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ApplicableShareItem"}}}}}}}},"/api/shares/browse":{"get":{"tags":["Sharing"],"operationId":"browse_share","parameters":[{"name":"token","in":"query","description":"Share token","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"Share tree","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ShareBrowseResponse"}}}}}}},"/api/shares/documents/{id}":{"get":{"tags":["Sharing"],"operationId":"list_document_shares","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ShareItem"}}}}}}}},"/api/shares/folders/{token}/materialize":{"post":{"tags":["Sharing"],"operationId":"materialize_folder_share","parameters":[{"name":"token","in":"path","description":"Folder share token","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"Created doc shares","content":{"application/json":{"schema":{"$ref":"#/components/schemas/MaterializeResponse"}}}}}}},"/api/shares/mounts":{"get":{"tags":["Sharing"],"operationId":"list_share_mounts","responses":{"200":{"description":"Share mounts","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ShareMountItem"}}}}}}},"post":{"tags":["Sharing"],"operationId":"create_share_mount","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateShareMountRequest"}}},"required":true},"responses":{"200":{"description":"Saved share mount","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ShareMountItem"}}}}}}},"/api/shares/mounts/{id}":{"delete":{"tags":["Sharing"],"operationId":"delete_share_mount","parameters":[{"name":"id","in":"path","description":"Share mount ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":"Share mount removed"}}}},"/api/shares/validate":{"get":{"tags":["Sharing"],"operationId":"validate_share_token","parameters":[{"name":"token","in":"query","description":"Share token","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"Document info","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ShareDocumentResponse"}}}}}}},"/api/shares/{token}":{"delete":{"tags":["Sharing"],"operationId":"delete_share","parameters":[{"name":"token","in":"path","description":"Share token","required":true,"schema":{"type":"string"}}],"responses":{"204":{"description":"Share link deleted"}}}},"/api/storage/ingest":{"post":{"tags":["Storage"],"operationId":"enqueue_ingest_events","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/IngestBatchRequest"}}},"required":true},"responses":{"202":{"description":"Events enqueued"},"400":{"description":"Invalid request"}}}},"/api/tags":{"get":{"tags":["Tags"],"operationId":"list_tags","parameters":[{"name":"q","in":"query","description":"Filter contains","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/TagItem"}}}}}}}},"/api/workspace-invitations/{token}/accept":{"post":{"tags":["Workspaces"],"operationId":"accept_invitation","parameters":[{"name":"token","in":"path","description":"Invitation token","required":true,"schema":{"type":"string"}}],"responses":{"204":{"description":""}}}},"/api/workspaces":{"get":{"tags":["Workspaces"],"operationId":"list_workspaces","responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/WorkspaceResponse"}}}}}}},"post":{"tags":["Workspaces"],"operationId":"create_workspace","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateWorkspaceRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceResponse"}}}}}}},"/api/workspaces/{id}":{"get":{"tags":["Workspaces"],"operationId":"get_workspace_detail","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceResponse"}}}}}},"put":{"tags":["Workspaces"],"operationId":"update_workspace","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateWorkspaceRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceResponse"}}}}}},"delete":{"tags":["Workspaces"],"operationId":"delete_workspace","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}}},"/api/workspaces/{id}/download":{"get":{"tags":["Workspaces"],"operationId":"download_workspace_archive","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"format","in":"query","description":"Download format (archive only)","required":false,"schema":{"allOf":[{"$ref":"#/components/schemas/DownloadFormat"}],"nullable":true}}],"responses":{"200":{"description":"Workspace download","content":{"application/octet-stream":{"schema":{"$ref":"#/components/schemas/DocumentDownloadBinary"}}}},"401":{"description":"Unauthorized"},"404":{"description":"Workspace not found"}}}},"/api/workspaces/{id}/invitations":{"get":{"tags":["Workspaces"],"operationId":"list_invitations","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/WorkspaceInvitationResponse"}}}}}}},"post":{"tags":["Workspaces"],"operationId":"create_invitation","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateWorkspaceInvitationRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceInvitationResponse"}}}}}}},"/api/workspaces/{id}/invitations/{invitation_id}":{"delete":{"tags":["Workspaces"],"operationId":"revoke_invitation","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"invitation_id","in":"path","description":"Invitation ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceInvitationResponse"}}}}}}},"/api/workspaces/{id}/leave":{"post":{"tags":["Workspaces"],"operationId":"leave_workspace","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}}},"/api/workspaces/{id}/members":{"get":{"tags":["Workspaces"],"operationId":"list_members","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/WorkspaceMemberResponse"}}}}}}}},"/api/workspaces/{id}/members/{user_id}":{"delete":{"tags":["Workspaces"],"operationId":"remove_member","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"user_id","in":"path","description":"Target user ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}},"patch":{"tags":["Workspaces"],"operationId":"update_member_role","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"user_id","in":"path","description":"Target user ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateMemberRoleRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceMemberResponse"}}}}}}},"/api/workspaces/{id}/permissions":{"get":{"tags":["Workspaces"],"operationId":"get_workspace_permissions","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspacePermissionsResponse"}}}}}}},"/api/workspaces/{id}/roles":{"get":{"tags":["Workspaces"],"operationId":"list_roles","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/WorkspaceRoleResponse"}}}}}}},"post":{"tags":["Workspaces"],"operationId":"create_role","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateWorkspaceRoleRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceRoleResponse"}}}}}}},"/api/workspaces/{id}/roles/{role_id}":{"delete":{"tags":["Workspaces"],"operationId":"delete_role","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"role_id","in":"path","description":"Role ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}},"patch":{"tags":["Workspaces"],"operationId":"update_role","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"role_id","in":"path","description":"Role ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateWorkspaceRoleRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceRoleResponse"}}}}}}},"/api/workspaces/{id}/switch":{"post":{"tags":["Workspaces"],"operationId":"switch_workspace","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/SwitchWorkspaceResponse"}}}}}}},"/api/yjs/{id}":{"get":{"tags":["Realtime"],"operationId":"axum_ws_entry","parameters":[{"name":"id","in":"path","description":"Document ID (UUID)","required":true,"schema":{"type":"string"}},{"name":"token","in":"query","description":"JWT or share token","required":false,"schema":{"type":"string","nullable":true}},{"name":"Authorization","in":"header","description":"Bearer token (JWT or share token)","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"101":{"description":"Switching Protocols (WebSocket upgrade)"},"401":{"description":"Unauthorized"}}}}},"components":{"schemas":{"ActiveShareItem":{"type":"object","required":["id","token","permission","created_at","document_id","document_title","document_type","url"],"properties":{"created_at":{"type":"string","format":"date-time"},"document_id":{"type":"string","format":"uuid"},"document_title":{"type":"string"},"document_type":{"type":"string"},"expires_at":{"type":"string","format":"date-time","nullable":true},"id":{"type":"string","format":"uuid"},"parent_share_id":{"type":"string","format":"uuid","nullable":true},"permission":{"type":"string"},"token":{"type":"string"},"url":{"type":"string"}}},"AddPatternsRequest":{"type":"object","required":["patterns"],"properties":{"patterns":{"type":"array","items":{"type":"string"}}}},"ApiTokenCreateRequest":{"type":"object","properties":{"name":{"type":"string","example":"Deploy token","nullable":true}}},"ApiTokenCreateResponse":{"type":"object","required":["id","name","created_at","token"],"properties":{"created_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"name":{"type":"string"},"token":{"type":"string"}}},"ApiTokenItem":{"type":"object","required":["id","name","created_at"],"properties":{"created_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"last_used_at":{"type":"string","format":"date-time","nullable":true},"name":{"type":"string"},"revoked_at":{"type":"string","format":"date-time","nullable":true}}},"ApplicableShareItem":{"type":"object","required":["token","permission","scope","excluded"],"properties":{"excluded":{"type":"boolean"},"permission":{"type":"string"},"scope":{"type":"string"},"token":{"type":"string"}}},"AuthProviderInfoResponse":{"type":"object","required":["id","requires_state","client_ids"],"properties":{"authorization_url":{"type":"string","nullable":true},"client_ids":{"type":"array","items":{"type":"string"}},"id":{"type":"string"},"name":{"type":"string","nullable":true},"redirect_uri":{"type":"string","nullable":true},"requires_state":{"type":"boolean"},"scopes":{"type":"array","items":{"type":"string"}}}},"AuthProvidersResponse":{"type":"object","required":["providers"],"properties":{"providers":{"type":"array","items":{"$ref":"#/components/schemas/AuthProviderInfoResponse"}}}},"BacklinkInfo":{"type":"object","required":["document_id","title","document_type","link_type","link_count"],"properties":{"document_id":{"type":"string"},"document_type":{"type":"string"},"file_path":{"type":"string","nullable":true},"link_count":{"type":"integer","format":"int64"},"link_text":{"type":"string","nullable":true},"link_type":{"type":"string"},"title":{"type":"string"}}},"BacklinksResponse":{"type":"object","required":["backlinks","total_count"],"properties":{"backlinks":{"type":"array","items":{"$ref":"#/components/schemas/BacklinkInfo"}},"total_count":{"type":"integer","minimum":0}}},"CheckIgnoredRequest":{"type":"object","required":["path"],"properties":{"path":{"type":"string"}}},"CreateDocumentRequest":{"type":"object","properties":{"parent_id":{"type":"string","format":"uuid","nullable":true},"title":{"type":"string","nullable":true},"type":{"type":"string","nullable":true}}},"CreateGitConfigRequest":{"type":"object","required":["repository_url","auth_type","auth_data"],"properties":{"auth_data":{},"auth_type":{"type":"string"},"auto_sync":{"type":"boolean","nullable":true},"branch_name":{"type":"string","nullable":true},"repository_url":{"type":"string"}}},"CreateRecordBody":{"type":"object","required":["data"],"properties":{"data":{}}},"CreateShareMountRequest":{"type":"object","required":["token"],"properties":{"parent_folder_id":{"type":"string","format":"uuid","nullable":true},"token":{"type":"string"}}},"CreateShareRequest":{"type":"object","required":["document_id"],"properties":{"document_id":{"type":"string","format":"uuid"},"expires_at":{"type":"string","format":"date-time","nullable":true},"permission":{"type":"string","nullable":true}}},"CreateShareResponse":{"type":"object","required":["token","url"],"properties":{"token":{"type":"string"},"url":{"type":"string"}}},"CreateWorkspaceInvitationRequest":{"type":"object","required":["email","role_kind"],"properties":{"custom_role_id":{"type":"string","format":"uuid","nullable":true},"email":{"type":"string"},"expires_at":{"type":"string","format":"date-time","nullable":true},"role_kind":{"type":"string"},"system_role":{"type":"string","nullable":true}}},"CreateWorkspaceRequest":{"type":"object","required":["name"],"properties":{"description":{"type":"string","nullable":true},"icon":{"type":"string","nullable":true},"name":{"type":"string"}}},"CreateWorkspaceRoleRequest":{"type":"object","required":["name","base_role"],"properties":{"base_role":{"type":"string"},"description":{"type":"string","nullable":true},"name":{"type":"string"},"overrides":{"type":"array","items":{"$ref":"#/components/schemas/PermissionOverridePayload"},"nullable":true},"priority":{"type":"integer","format":"int32","nullable":true}}},"Document":{"type":"object","required":["id","owner_id","workspace_id","title","type","created_at","updated_at","slug","desired_path"],"properties":{"archived_at":{"type":"string","format":"date-time","nullable":true},"archived_by":{"type":"string","format":"uuid","nullable":true},"archived_parent_id":{"type":"string","format":"uuid","nullable":true},"created_at":{"type":"string","format":"date-time"},"created_by":{"type":"string","format":"uuid","nullable":true},"created_by_plugin":{"type":"string","nullable":true},"desired_path":{"type":"string"},"id":{"type":"string","format":"uuid"},"owner_id":{"type":"string","format":"uuid","description":"Legacy alias for `workspace_id` kept for backward compatibility with older clients."},"parent_id":{"type":"string","format":"uuid","nullable":true},"path":{"type":"string","nullable":true},"slug":{"type":"string"},"title":{"type":"string"},"type":{"type":"string"},"updated_at":{"type":"string","format":"date-time"},"workspace_id":{"type":"string","format":"uuid"}}},"DocumentArchiveBinary":{"type":"string","format":"binary"},"DocumentDownloadBinary":{"type":"string","format":"binary"},"DocumentListResponse":{"type":"object","required":["items"],"properties":{"items":{"type":"array","items":{"$ref":"#/components/schemas/Document"}}}},"DocumentPatchOperationRequest":{"oneOf":[{"type":"object","required":["offset","text","op"],"properties":{"offset":{"type":"integer","minimum":0},"op":{"type":"string","enum":["insert"]},"text":{"type":"string"}}},{"type":"object","required":["offset","length","op"],"properties":{"length":{"type":"integer","minimum":0},"offset":{"type":"integer","minimum":0},"op":{"type":"string","enum":["delete"]}}},{"type":"object","required":["offset","length","text","op"],"properties":{"length":{"type":"integer","minimum":0},"offset":{"type":"integer","minimum":0},"op":{"type":"string","enum":["replace"]},"text":{"type":"string"}}}],"discriminator":{"propertyName":"op"}},"DownloadDocumentQuery":{"type":"object","properties":{"format":{"$ref":"#/components/schemas/DownloadFormat"},"token":{"type":"string","nullable":true}}},"DownloadFormat":{"type":"string","enum":["archive","markdown","html","html5","pdf","docx","latex","beamer","context","man","mediawiki","dokuwiki","textile","org","texinfo","opml","docbook","opendocument","odt","rtf","epub","epub3","fb2","asciidoc","icml","slidy","slideous","dzslides","revealjs","s5","json","plain","commonmark","commonmark_x","markdown_strict","markdown_phpextra","markdown_github","rst","native","haddock"]},"DownloadWorkspaceQuery":{"type":"object","properties":{"format":{"$ref":"#/components/schemas/DownloadFormat"}}},"DuplicateDocumentRequest":{"type":"object","properties":{"parent_id":{"type":"string","nullable":true},"title":{"type":"string","nullable":true}}},"ExecBody":{"type":"object","properties":{"payload":{"nullable":true}}},"ExecResultResponse":{"type":"object","required":["ok","effects"],"properties":{"data":{"nullable":true},"effects":{"type":"array","items":{}},"error":{"nullable":true},"ok":{"type":"boolean"}}},"GitChangeItem":{"type":"object","required":["path","status"],"properties":{"path":{"type":"string"},"status":{"type":"string"}}},"GitChangesResponse":{"type":"object","required":["files"],"properties":{"files":{"type":"array","items":{"$ref":"#/components/schemas/GitChangeItem"}}}},"GitCommitItem":{"type":"object","required":["hash","message","author_name","author_email","time"],"properties":{"author_email":{"type":"string"},"author_name":{"type":"string"},"hash":{"type":"string"},"message":{"type":"string"},"time":{"type":"string","format":"date-time"}}},"GitConfigResponse":{"type":"object","required":["id","repository_url","branch_name","auth_type","auto_sync","created_at","updated_at"],"properties":{"auth_type":{"type":"string"},"auto_sync":{"type":"boolean"},"branch_name":{"type":"string"},"created_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"remote_check":{"allOf":[{"$ref":"#/components/schemas/GitRemoteCheckResponse"}],"nullable":true},"repository_url":{"type":"string"},"updated_at":{"type":"string","format":"date-time"}}},"GitHistoryResponse":{"type":"object","required":["commits"],"properties":{"commits":{"type":"array","items":{"$ref":"#/components/schemas/GitCommitItem"}}}},"GitImportResponse":{"type":"object","required":["success","message","files_changed","docs_created","attachments_created"],"properties":{"attachments_created":{"type":"integer","format":"int32"},"commit_hash":{"type":"string","nullable":true},"docs_created":{"type":"integer","format":"int32"},"files_changed":{"type":"integer","format":"int32"},"message":{"type":"string"},"success":{"type":"boolean"}}},"GitPullConflictItem":{"type":"object","required":["path","is_binary"],"properties":{"base":{"type":"string","nullable":true},"document_id":{"type":"string","format":"uuid","nullable":true},"is_binary":{"type":"boolean"},"ours":{"type":"string","nullable":true},"path":{"type":"string"},"theirs":{"type":"string","nullable":true}}},"GitPullRequest":{"type":"object","properties":{"resolutions":{"type":"array","items":{"$ref":"#/components/schemas/GitPullResolution"},"nullable":true}}},"GitPullResolution":{"type":"object","required":["path","choice"],"properties":{"choice":{"type":"string"},"content":{"type":"string","nullable":true},"path":{"type":"string"}}},"GitPullResponse":{"type":"object","required":["success","message","files_changed"],"properties":{"commit_hash":{"type":"string","nullable":true},"conflicts":{"type":"array","items":{"$ref":"#/components/schemas/GitPullConflictItem"},"nullable":true},"files_changed":{"type":"integer","format":"int32"},"git_status":{"allOf":[{"$ref":"#/components/schemas/GitStatus"}],"nullable":true},"message":{"type":"string"},"success":{"type":"boolean"}}},"GitPullSessionResponse":{"type":"object","required":["session_id","status","conflicts","resolutions"],"properties":{"conflicts":{"type":"array","items":{"$ref":"#/components/schemas/GitPullConflictItem"}},"message":{"type":"string","nullable":true},"resolutions":{"type":"array","items":{"$ref":"#/components/schemas/GitPullResolution"}},"session_id":{"type":"string","format":"uuid"},"status":{"type":"string"}}},"GitRemoteCheckResponse":{"type":"object","required":["ok","message"],"properties":{"message":{"type":"string"},"ok":{"type":"boolean"},"reason":{"type":"string","nullable":true}}},"GitStatus":{"type":"object","required":["repository_initialized","has_remote","uncommitted_changes","untracked_files","sync_enabled"],"properties":{"current_branch":{"type":"string","nullable":true},"has_remote":{"type":"boolean"},"last_sync":{"type":"string","format":"date-time","nullable":true},"last_sync_commit_hash":{"type":"string","nullable":true},"last_sync_message":{"type":"string","nullable":true},"last_sync_status":{"type":"string","nullable":true},"repository_initialized":{"type":"boolean"},"sync_enabled":{"type":"boolean"},"uncommitted_changes":{"type":"integer","format":"int32","minimum":0},"untracked_files":{"type":"integer","format":"int32","minimum":0}}},"GitSyncRequest":{"type":"object","properties":{"force":{"type":"boolean","nullable":true},"full_scan":{"type":"boolean","nullable":true},"message":{"type":"string","nullable":true},"skip_push":{"type":"boolean","nullable":true}}},"GitSyncResponse":{"type":"object","required":["success","message","files_changed"],"properties":{"commit_hash":{"type":"string","nullable":true},"files_changed":{"type":"integer","format":"int32","minimum":0},"message":{"type":"string"},"success":{"type":"boolean"}}},"HealthResp":{"type":"object","required":["status"],"properties":{"status":{"type":"string"}}},"IngestBatchRequest":{"type":"object","required":["events"],"properties":{"events":{"type":"array","items":{"$ref":"#/components/schemas/IngestEventRequest"}}}},"IngestEventRequest":{"type":"object","required":["repo_path","kind"],"properties":{"backend":{"type":"string","nullable":true},"content_hash":{"type":"string","nullable":true},"kind":{"$ref":"#/components/schemas/IngestKindParam"},"payload":{"nullable":true},"repo_path":{"type":"string"}}},"IngestKindParam":{"type":"string","enum":["upsert","delete"]},"InstallFromUrlBody":{"type":"object","required":["url"],"properties":{"token":{"type":"string","nullable":true},"url":{"type":"string"}}},"InstallResponse":{"type":"object","required":["id","version"],"properties":{"id":{"type":"string"},"version":{"type":"string"}}},"KvValueBody":{"type":"object","required":["value"],"properties":{"value":{}}},"KvValueResponse":{"type":"object","required":["value"],"properties":{"value":{}}},"LoginRequest":{"type":"object","required":["email","password"],"properties":{"email":{"type":"string"},"password":{"type":"string"},"remember_me":{"type":"boolean"}}},"LoginResponse":{"type":"object","required":["access_token","user"],"properties":{"access_token":{"type":"string"},"user":{"$ref":"#/components/schemas/UserResponse"}}},"ManifestItem":{"type":"object","required":["id","version","scope","mounts","frontend","permissions","config","ui"],"properties":{"author":{"type":"string","nullable":true},"config":{},"frontend":{},"id":{"type":"string"},"mounts":{"type":"array","items":{"type":"string"}},"name":{"type":"string","nullable":true},"permissions":{"type":"array","items":{"type":"string"}},"repository":{"type":"string","nullable":true},"scope":{"type":"string"},"ui":{},"version":{"type":"string"}}},"MaterializeResponse":{"type":"object","required":["created"],"properties":{"created":{"type":"integer","format":"int64"}}},"OAuthLoginRequest":{"type":"object","properties":{"code":{"type":"string","nullable":true},"credential":{"type":"string","nullable":true},"redirect_uri":{"type":"string","nullable":true},"remember_me":{"type":"boolean"},"state":{"type":"string","nullable":true}}},"OAuthStateResponse":{"type":"object","required":["state"],"properties":{"state":{"type":"string"}}},"OutgoingLink":{"type":"object","required":["document_id","title","document_type","link_type"],"properties":{"document_id":{"type":"string"},"document_type":{"type":"string"},"file_path":{"type":"string","nullable":true},"link_text":{"type":"string","nullable":true},"link_type":{"type":"string"},"position_end":{"type":"integer","format":"int32","nullable":true},"position_start":{"type":"integer","format":"int32","nullable":true},"title":{"type":"string"}}},"OutgoingLinksResponse":{"type":"object","required":["links","total_count"],"properties":{"links":{"type":"array","items":{"$ref":"#/components/schemas/OutgoingLink"}},"total_count":{"type":"integer","minimum":0}}},"PatchDocumentContentRequest":{"type":"object","required":["operations"],"properties":{"operations":{"type":"array","items":{"$ref":"#/components/schemas/DocumentPatchOperationRequest"}}}},"PermissionOverridePayload":{"type":"object","required":["permission","allowed"],"properties":{"allowed":{"type":"boolean"},"permission":{"type":"string"}}},"PlaceholderItemPayload":{"type":"object","required":["kind","id","code"],"properties":{"code":{"type":"string"},"id":{"type":"string"},"kind":{"type":"string"}}},"PublicDocumentSummary":{"type":"object","required":["id","title","updated_at","published_at"],"properties":{"id":{"type":"string","format":"uuid"},"published_at":{"type":"string","format":"date-time"},"title":{"type":"string"},"updated_at":{"type":"string","format":"date-time"}}},"PublishResponse":{"type":"object","required":["slug","public_url"],"properties":{"public_url":{"type":"string"},"slug":{"type":"string"}}},"RecordsResponse":{"type":"object","required":["items"],"properties":{"items":{"type":"array","items":{}}}},"RefreshResponse":{"type":"object","required":["access_token"],"properties":{"access_token":{"type":"string"}}},"RegisterRequest":{"type":"object","required":["email","name","password"],"properties":{"email":{"type":"string"},"name":{"type":"string"},"password":{"type":"string"}}},"RenderManyRequest":{"type":"object","required":["items"],"properties":{"items":{"type":"array","items":{"$ref":"#/components/schemas/RenderRequest"}}}},"RenderManyResponse":{"type":"object","required":["items"],"properties":{"items":{"type":"array","items":{"$ref":"#/components/schemas/RenderResponseBody"}}}},"RenderOptionsPayload":{"type":"object","properties":{"absolute_attachments":{"type":"boolean","default":null,"nullable":true},"base_origin":{"type":"string","default":null,"nullable":true},"doc_id":{"type":"string","format":"uuid","default":null,"nullable":true},"features":{"type":"array","items":{"type":"string"},"default":null,"nullable":true},"flavor":{"type":"string","default":null,"nullable":true},"hardbreaks":{"type":"boolean","default":null,"nullable":true},"sanitize":{"type":"boolean","default":null,"nullable":true},"theme":{"type":"string","default":null,"nullable":true},"token":{"type":"string","default":null,"nullable":true}}},"RenderRequest":{"type":"object","required":["text"],"properties":{"options":{"$ref":"#/components/schemas/RenderOptionsPayload"},"text":{"type":"string"}}},"RenderResponseBody":{"type":"object","required":["html","hash"],"properties":{"hash":{"type":"string"},"html":{"type":"string"},"placeholders":{"type":"array","items":{"$ref":"#/components/schemas/PlaceholderItemPayload"}}}},"SearchResult":{"type":"object","required":["id","title","document_type","updated_at"],"properties":{"document_type":{"type":"string"},"id":{"type":"string","format":"uuid"},"path":{"type":"string","nullable":true},"title":{"type":"string"},"updated_at":{"type":"string","format":"date-time"}}},"SessionResponse":{"type":"object","required":["id","workspace_id","remember_me","created_at","last_seen_at","expires_at","current"],"properties":{"created_at":{"type":"string","format":"date-time"},"current":{"type":"boolean"},"expires_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"ip_address":{"type":"string","nullable":true},"last_seen_at":{"type":"string","format":"date-time"},"remember_me":{"type":"boolean"},"user_agent":{"type":"string","nullable":true},"workspace_id":{"type":"string","format":"uuid"}}},"ShareBrowseResponse":{"type":"object","required":["tree"],"properties":{"tree":{"type":"array","items":{"$ref":"#/components/schemas/ShareBrowseTreeItem"}}}},"ShareBrowseTreeItem":{"type":"object","required":["id","title","type","created_at","updated_at"],"properties":{"created_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"parent_id":{"type":"string","format":"uuid","nullable":true},"title":{"type":"string"},"type":{"type":"string","example":"document"},"updated_at":{"type":"string","format":"date-time"}}},"ShareDocumentResponse":{"type":"object","required":["id","title","permission"],"properties":{"content":{"type":"string","nullable":true},"id":{"type":"string","format":"uuid"},"permission":{"type":"string"},"title":{"type":"string"}}},"ShareItem":{"type":"object","required":["id","token","permission","url","scope"],"properties":{"expires_at":{"type":"string","format":"date-time","nullable":true},"id":{"type":"string","format":"uuid"},"parent_share_id":{"type":"string","format":"uuid","nullable":true},"permission":{"type":"string"},"scope":{"type":"string"},"token":{"type":"string"},"url":{"type":"string"}}},"ShareMountItem":{"type":"object","required":["id","token","target_document_id","target_document_type","target_title","permission","created_at"],"properties":{"created_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"parent_folder_id":{"type":"string","format":"uuid","nullable":true},"permission":{"type":"string"},"target_document_id":{"type":"string","format":"uuid"},"target_document_type":{"type":"string"},"target_title":{"type":"string"},"token":{"type":"string"}}},"SnapshotDiffBaseParam":{"type":"string","enum":["auto","current","previous"]},"SnapshotDiffKind":{"type":"string","enum":["current","snapshot"]},"SnapshotDiffResponse":{"type":"object","required":["base","target","diff"],"properties":{"base":{"$ref":"#/components/schemas/SnapshotDiffSideResponse"},"diff":{"$ref":"#/components/schemas/TextDiffResult"},"target":{"$ref":"#/components/schemas/SnapshotDiffSideResponse"}}},"SnapshotDiffSideResponse":{"type":"object","required":["kind","markdown"],"properties":{"kind":{"$ref":"#/components/schemas/SnapshotDiffKind"},"markdown":{"type":"string"},"snapshot":{"allOf":[{"$ref":"#/components/schemas/SnapshotSummary"}],"nullable":true}}},"SnapshotListResponse":{"type":"object","required":["items"],"properties":{"items":{"type":"array","items":{"$ref":"#/components/schemas/SnapshotSummary"}}}},"SnapshotRestoreResponse":{"type":"object","required":["snapshot"],"properties":{"snapshot":{"$ref":"#/components/schemas/SnapshotSummary"}}},"SnapshotSummary":{"type":"object","required":["id","document_id","label","kind","created_at","byte_size","content_hash"],"properties":{"byte_size":{"type":"integer","format":"int64"},"content_hash":{"type":"string"},"created_at":{"type":"string","format":"date-time"},"created_by":{"type":"string","format":"uuid","nullable":true},"document_id":{"type":"string","format":"uuid"},"id":{"type":"string","format":"uuid"},"kind":{"type":"string"},"label":{"type":"string"},"notes":{"type":"string","nullable":true}}},"SwitchWorkspaceResponse":{"type":"object","required":["access_token"],"properties":{"access_token":{"type":"string"}}},"TagItem":{"type":"object","required":["name","count"],"properties":{"count":{"type":"integer","format":"int64"},"name":{"type":"string"}}},"TextDiffLine":{"type":"object","required":["line_type","content"],"properties":{"content":{"type":"string"},"line_type":{"$ref":"#/components/schemas/TextDiffLineType"},"new_line_number":{"type":"integer","format":"int32","nullable":true,"minimum":0},"old_line_number":{"type":"integer","format":"int32","nullable":true,"minimum":0}}},"TextDiffLineType":{"type":"string","enum":["added","deleted","context"]},"TextDiffResult":{"type":"object","required":["file_path","diff_lines"],"properties":{"diff_lines":{"type":"array","items":{"$ref":"#/components/schemas/TextDiffLine"}},"file_path":{"type":"string"},"new_content":{"type":"string","nullable":true},"old_content":{"type":"string","nullable":true}}},"UninstallBody":{"type":"object","required":["id"],"properties":{"id":{"type":"string"}}},"UpdateDocumentContentRequest":{"type":"object","required":["content"],"properties":{"content":{"type":"string"}}},"UpdateDocumentRequest":{"type":"object","properties":{"parent_id":{"type":"string","nullable":true},"title":{"type":"string","nullable":true}}},"UpdateGitConfigRequest":{"type":"object","properties":{"auth_data":{"nullable":true},"auth_type":{"type":"string","nullable":true},"auto_sync":{"type":"boolean","nullable":true},"branch_name":{"type":"string","nullable":true},"repository_url":{"type":"string","nullable":true}}},"UpdateMemberRoleRequest":{"type":"object","required":["role_kind"],"properties":{"custom_role_id":{"type":"string","format":"uuid","nullable":true},"role_kind":{"type":"string"},"system_role":{"type":"string","nullable":true}}},"UpdateRecordBody":{"type":"object","required":["patch"],"properties":{"patch":{}}},"UpdateUserShortcutRequest":{"type":"object","properties":{"bindings":{"type":"object"},"leader_key":{"type":"string","example":"","nullable":true}}},"UpdateWorkspaceRequest":{"type":"object","properties":{"description":{"type":"string","nullable":true},"icon":{"type":"string","nullable":true},"name":{"type":"string","nullable":true}}},"UpdateWorkspaceRoleRequest":{"type":"object","properties":{"base_role":{"type":"string","nullable":true},"description":{"type":"string","nullable":true},"name":{"type":"string","nullable":true},"overrides":{"type":"array","items":{"$ref":"#/components/schemas/PermissionOverridePayload"},"nullable":true},"priority":{"type":"integer","format":"int32","nullable":true}}},"UploadFileMultipart":{"type":"object","required":["file","document_id"],"properties":{"document_id":{"type":"string","format":"uuid"},"file":{"type":"string","format":"binary"}}},"UploadFileResponse":{"type":"object","required":["id","url","filename","size"],"properties":{"content_type":{"type":"string","nullable":true},"filename":{"type":"string"},"id":{"type":"string","format":"uuid"},"size":{"type":"integer","format":"int64"},"url":{"type":"string"}}},"UserResponse":{"type":"object","required":["id","email","name","workspaces"],"properties":{"active_workspace":{"allOf":[{"$ref":"#/components/schemas/WorkspaceMembershipResponse"}],"nullable":true},"active_workspace_id":{"type":"string","format":"uuid","nullable":true},"active_workspace_permissions":{"type":"array","items":{"type":"string"}},"email":{"type":"string"},"id":{"type":"string","format":"uuid"},"name":{"type":"string"},"workspaces":{"type":"array","items":{"$ref":"#/components/schemas/WorkspaceMembershipResponse"}}}},"UserShortcutResponse":{"type":"object","required":["bindings"],"properties":{"bindings":{"type":"object"},"leader_key":{"type":"string","example":"","nullable":true},"updated_at":{"type":"string","format":"date-time","nullable":true}}},"WorkspaceInvitationResponse":{"type":"object","required":["id","workspace_id","email","role_kind","invited_by","token","created_at"],"properties":{"accepted_at":{"type":"string","format":"date-time","nullable":true},"accepted_by":{"type":"string","format":"uuid","nullable":true},"created_at":{"type":"string","format":"date-time"},"custom_role_id":{"type":"string","format":"uuid","nullable":true},"email":{"type":"string"},"expires_at":{"type":"string","format":"date-time","nullable":true},"id":{"type":"string","format":"uuid"},"invited_by":{"type":"string","format":"uuid"},"revoked_at":{"type":"string","format":"date-time","nullable":true},"role_kind":{"type":"string"},"system_role":{"type":"string","nullable":true},"token":{"type":"string"},"workspace_id":{"type":"string","format":"uuid"}}},"WorkspaceMemberResponse":{"type":"object","required":["workspace_id","user_id","email","name","role_kind","is_default"],"properties":{"custom_role_id":{"type":"string","format":"uuid","nullable":true},"email":{"type":"string"},"is_default":{"type":"boolean"},"name":{"type":"string"},"role_kind":{"type":"string"},"system_role":{"type":"string","nullable":true},"user_id":{"type":"string","format":"uuid"},"workspace_id":{"type":"string","format":"uuid"}}},"WorkspaceMembershipResponse":{"type":"object","required":["id","name","slug","is_personal","role_kind","is_default"],"properties":{"custom_role_id":{"type":"string","format":"uuid","nullable":true},"description":{"type":"string","nullable":true},"icon":{"type":"string","nullable":true},"id":{"type":"string","format":"uuid"},"is_default":{"type":"boolean"},"is_personal":{"type":"boolean"},"name":{"type":"string"},"role_kind":{"type":"string"},"slug":{"type":"string"},"system_role":{"type":"string","nullable":true}}},"WorkspacePermissionsResponse":{"type":"object","required":["workspace_id","permissions"],"properties":{"permissions":{"type":"array","items":{"type":"string"}},"workspace_id":{"type":"string","format":"uuid"}}},"WorkspaceResponse":{"type":"object","required":["id","name","slug","is_personal","role_kind","is_default"],"properties":{"custom_role_id":{"type":"string","format":"uuid","nullable":true},"description":{"type":"string","nullable":true},"icon":{"type":"string","nullable":true},"id":{"type":"string","format":"uuid"},"is_default":{"type":"boolean"},"is_personal":{"type":"boolean"},"name":{"type":"string"},"role_kind":{"type":"string"},"slug":{"type":"string"},"system_role":{"type":"string","nullable":true}}},"WorkspaceRoleResponse":{"type":"object","required":["id","workspace_id","name","base_role","priority","overrides"],"properties":{"base_role":{"type":"string"},"description":{"type":"string","nullable":true},"id":{"type":"string","format":"uuid"},"name":{"type":"string"},"overrides":{"type":"array","items":{"$ref":"#/components/schemas/PermissionOverridePayload"}},"priority":{"type":"integer","format":"int32"},"workspace_id":{"type":"string","format":"uuid"}}}}},"tags":[{"name":"Auth","description":"Authentication"},{"name":"Documents","description":"Documents management"},{"name":"Files","description":"File management"},{"name":"Sharing","description":"Document sharing"},{"name":"Public Documents","description":"Public pages"},{"name":"Realtime","description":"Yjs WebSocket endpoint (/yjs/:id)"},{"name":"Git","description":"Git integration"},{"name":"Markdown","description":"Markdown rendering"},{"name":"Plugins","description":"Plugins management & data APIs"},{"name":"Storage","description":"Storage ingest APIs"},{"name":"Health","description":"System health checks"}]} diff --git a/api/openapi/openapi.json b/api/openapi/openapi.json deleted file mode 100644 index a4ce10d3..00000000 --- a/api/openapi/openapi.json +++ /dev/null @@ -1 +0,0 @@ -{"openapi":"3.0.3","info":{"title":"api","description":"","license":{"name":""},"version":"0.1.0"},"paths":{"/api/auth/login":{"post":{"tags":["Auth"],"operationId":"login","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/LoginRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/LoginResponse"}}}}},"security":[{}]}},"/api/auth/logout":{"post":{"tags":["Auth"],"operationId":"logout","responses":{"204":{"description":""}}}},"/api/auth/me":{"get":{"tags":["Auth"],"operationId":"me","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/UserResponse"}}}}}},"delete":{"tags":["Auth"],"operationId":"delete_account","responses":{"204":{"description":""}}}},"/api/auth/oauth/{provider}":{"post":{"tags":["Auth"],"operationId":"oauth_login","parameters":[{"name":"provider","in":"path","description":"OAuth provider identifier (e.g., google)","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/OAuthLoginRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/LoginResponse"}}}}},"security":[{}]}},"/api/auth/oauth/{provider}/state":{"post":{"tags":["Auth"],"operationId":"oauth_state","parameters":[{"name":"provider","in":"path","description":"OAuth provider identifier","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/OAuthStateResponse"}}}}},"security":[{}]}},"/api/auth/providers":{"get":{"tags":["Auth"],"operationId":"list_oauth_providers","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/AuthProvidersResponse"}}}}},"security":[{}]}},"/api/auth/refresh":{"post":{"tags":["Auth"],"operationId":"refresh_session","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/RefreshResponse"}}}}}}},"/api/auth/register":{"post":{"tags":["Auth"],"operationId":"register","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/RegisterRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/UserResponse"}}}}},"security":[{}]}},"/api/auth/sessions":{"get":{"tags":["Auth"],"operationId":"list_sessions","responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/SessionResponse"}}}}}}}},"/api/auth/sessions/{id}":{"delete":{"tags":["Auth"],"operationId":"revoke_session","parameters":[{"name":"id","in":"path","description":"Session ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}}},"/api/documents":{"get":{"tags":["Documents"],"operationId":"list_documents","parameters":[{"name":"query","in":"query","description":"Search query","required":false,"schema":{"type":"string","nullable":true}},{"name":"tag","in":"query","description":"Filter by tag","required":false,"schema":{"type":"string","nullable":true}},{"name":"state","in":"query","description":"Filter by document state (active|archived|all)","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DocumentListResponse"}}}}}},"post":{"tags":["Documents"],"operationId":"create_document","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateDocumentRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}}},"/api/documents/search":{"get":{"tags":["Documents"],"operationId":"search_documents","parameters":[{"name":"q","in":"query","description":"Query","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/SearchResult"}}}}}}}},"/api/documents/{id}":{"get":{"tags":["Documents"],"operationId":"get_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}},"delete":{"tags":["Documents"],"operationId":"delete_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}},"patch":{"tags":["Documents"],"operationId":"update_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateDocumentRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}}},"/api/documents/{id}/archive":{"post":{"tags":["Documents"],"operationId":"archive_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}},"404":{"description":"Document not found"},"409":{"description":"Document already archived"}}}},"/api/documents/{id}/backlinks":{"get":{"tags":["Documents"],"operationId":"getBacklinks","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/BacklinksResponse"}}}}}}},"/api/documents/{id}/content":{"get":{"tags":["Documents"],"operationId":"get_document_content","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":""}}},"put":{"tags":["Documents"],"operationId":"update_document_content","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateDocumentContentRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}},"patch":{"tags":["Documents"],"operationId":"patch_document_content","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/PatchDocumentContentRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}}},"/api/documents/{id}/download":{"get":{"tags":["Documents"],"operationId":"download_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}},{"name":"format","in":"query","description":"Download format (see schema for supported values)","required":false,"schema":{"allOf":[{"$ref":"#/components/schemas/DownloadFormat"}],"nullable":true}}],"responses":{"200":{"description":"Document download","content":{"application/octet-stream":{"schema":{"$ref":"#/components/schemas/DocumentDownloadBinary"}}}},"401":{"description":"Unauthorized"},"404":{"description":"Document not found"}}}},"/api/documents/{id}/duplicate":{"post":{"tags":["Documents"],"operationId":"duplicate_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/DuplicateDocumentRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}}},"/api/documents/{id}/links":{"get":{"tags":["Documents"],"operationId":"getOutgoingLinks","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/OutgoingLinksResponse"}}}}}}},"/api/documents/{id}/snapshots":{"get":{"tags":["Documents"],"operationId":"list_document_snapshots","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}},{"name":"limit","in":"query","description":"Maximum number of snapshots to return","required":false,"schema":{"type":"integer","format":"int64","nullable":true}},{"name":"offset","in":"query","description":"Offset for pagination","required":false,"schema":{"type":"integer","format":"int64","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/SnapshotListResponse"}}}}}}},"/api/documents/{id}/snapshots/{snapshot_id}/diff":{"get":{"tags":["Documents"],"operationId":"get_document_snapshot_diff","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"snapshot_id","in":"path","description":"Snapshot ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}},{"name":"compare","in":"query","description":"Snapshot ID to compare against (defaults to current document state)","required":false,"schema":{"type":"string","format":"uuid","nullable":true}},{"name":"base","in":"query","description":"Base comparison to use when compare is not provided (auto|current|previous)","required":false,"schema":{"allOf":[{"$ref":"#/components/schemas/SnapshotDiffBaseParam"}],"nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/SnapshotDiffResponse"}}}}}}},"/api/documents/{id}/snapshots/{snapshot_id}/download":{"get":{"tags":["Documents"],"operationId":"download_document_snapshot","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"snapshot_id","in":"path","description":"Snapshot ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"Snapshot archive","content":{"application/zip":{"schema":{"$ref":"#/components/schemas/DocumentArchiveBinary"}}}},"401":{"description":"Unauthorized"},"404":{"description":"Snapshot not found"}}}},"/api/documents/{id}/snapshots/{snapshot_id}/restore":{"post":{"tags":["Documents"],"operationId":"restore_document_snapshot","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"snapshot_id","in":"path","description":"Snapshot ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"token","in":"query","description":"Share token (optional)","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/SnapshotRestoreResponse"}}}}}}},"/api/documents/{id}/unarchive":{"post":{"tags":["Documents"],"operationId":"unarchive_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}},"404":{"description":"Document not found"},"409":{"description":"Document is not archived"}}}},"/api/files":{"post":{"tags":["Files"],"summary":"POST /api/files (multipart/form-data)","description":"Fields:\n- file: binary file (required)\n- document_id: uuid (required by current schema)","operationId":"upload_file","requestBody":{"content":{"multipart/form-data":{"schema":{"$ref":"#/components/schemas/UploadFileMultipart"}}},"required":true},"responses":{"201":{"description":"File uploaded","content":{"application/json":{"schema":{"$ref":"#/components/schemas/UploadFileResponse"}}}}}}},"/api/files/documents/{filename}":{"get":{"tags":["Files"],"summary":"GET /api/files/documents/{filename}?document_id=uuid -> bytes","operationId":"get_file_by_name","parameters":[{"name":"filename","in":"path","description":"File name","required":true,"schema":{"type":"string"}},{"name":"document_id","in":"query","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"OK","content":{"application/octet-stream":{"schema":{"type":"string","format":"binary"}}}}}}},"/api/files/{id}":{"get":{"tags":["Files"],"summary":"GET /api/files/{id} -> bytes (fallback; primary is /uploads/{filename})","operationId":"get_file","parameters":[{"name":"id","in":"path","description":"File ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"OK","content":{"application/octet-stream":{"schema":{"type":"string","format":"binary"}}}}}}},"/api/git/changes":{"get":{"tags":["Git"],"operationId":"get_changes","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitChangesResponse"}}}}}}},"/api/git/config":{"get":{"tags":["Git"],"operationId":"get_config","responses":{"200":{"description":"","content":{"application/json":{"schema":{"allOf":[{"$ref":"#/components/schemas/GitConfigResponse"}],"nullable":true}}}}}},"post":{"tags":["Git"],"operationId":"create_or_update_config","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateGitConfigRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitConfigResponse"}}}}}},"delete":{"tags":["Git"],"operationId":"delete_config","responses":{"204":{"description":"Deleted"}}}},"/api/git/deinit":{"post":{"tags":["Git"],"operationId":"deinit_repository","responses":{"200":{"description":"OK"}}}},"/api/git/diff/commits/{from}/{to}":{"get":{"tags":["Git"],"operationId":"get_commit_diff","parameters":[{"name":"from","in":"path","description":"From","required":true,"schema":{"type":"string"}},{"name":"to","in":"path","description":"To","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/TextDiffResult"}}}}}}}},"/api/git/diff/working":{"get":{"tags":["Git"],"operationId":"get_working_diff","responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/TextDiffResult"}}}}}}}},"/api/git/gitignore/check":{"post":{"tags":["Git"],"operationId":"check_path_ignored","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CheckIgnoredRequest"}}},"required":true},"responses":{"200":{"description":"OK"}}}},"/api/git/gitignore/patterns":{"get":{"tags":["Git"],"operationId":"get_gitignore_patterns","responses":{"200":{"description":"OK"}}},"post":{"tags":["Git"],"operationId":"add_gitignore_patterns","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/AddPatternsRequest"}}},"required":true},"responses":{"200":{"description":"OK"}}}},"/api/git/history":{"get":{"tags":["Git"],"operationId":"get_history","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitHistoryResponse"}}}}}}},"/api/git/ignore/doc/{id}":{"post":{"tags":["Git"],"operationId":"ignore_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"OK"}}}},"/api/git/ignore/folder/{id}":{"post":{"tags":["Git"],"operationId":"ignore_folder","parameters":[{"name":"id","in":"path","description":"Folder ID","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"OK"}}}},"/api/git/import":{"post":{"tags":["Git"],"operationId":"import_repository","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateGitConfigRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitImportResponse"}}}}}}},"/api/git/init":{"post":{"tags":["Git"],"operationId":"init_repository","responses":{"200":{"description":"OK"}}}},"/api/git/pull":{"post":{"tags":["Git"],"operationId":"pull_repository","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullResponse"}}}},"409":{"description":"Conflicts detected","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullResponse"}}}}}}},"/api/git/pull/session/{id}":{"get":{"tags":["Git"],"operationId":"get_pull_session","parameters":[{"name":"id","in":"path","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}}}}},"/api/git/pull/session/{id}/finalize":{"post":{"tags":["Git"],"operationId":"finalize_pull_session","parameters":[{"name":"id","in":"path","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullResponse"}}}},"400":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullResponse"}}}},"409":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullResponse"}}}}}}},"/api/git/pull/session/{id}/resolve":{"post":{"tags":["Git"],"operationId":"resolve_pull_session","parameters":[{"name":"id","in":"path","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}},"400":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}},"409":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}}}}},"/api/git/pull/start":{"post":{"tags":["Git"],"operationId":"start_pull_session","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}},"400":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}},"409":{"description":"Conflicts detected","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitPullSessionResponse"}}}}}}},"/api/git/status":{"get":{"tags":["Git"],"operationId":"get_status","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitStatus"}}}}}}},"/api/git/sync":{"post":{"tags":["Git"],"operationId":"sync_now","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitSyncRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/GitSyncResponse"}}}},"409":{"description":"Conflicts during rebase/pull"}}}},"/api/health":{"get":{"tags":["Health"],"operationId":"health","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HealthResp"}}}}}}},"/api/markdown/render":{"post":{"tags":["Markdown"],"operationId":"render_markdown","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/RenderRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/RenderResponseBody"}}}}}}},"/api/markdown/render-many":{"post":{"tags":["Markdown"],"operationId":"render_markdown_many","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/RenderManyRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/RenderManyResponse"}}}}}}},"/api/me/api-tokens":{"get":{"tags":["Auth"],"operationId":"list_api_tokens","responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ApiTokenItem"}}}}}}},"post":{"tags":["Auth"],"operationId":"create_api_token","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ApiTokenCreateRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ApiTokenCreateResponse"}}}}}}},"/api/me/api-tokens/{id}":{"delete":{"tags":["Auth"],"operationId":"revoke_api_token","parameters":[{"name":"id","in":"path","description":"Token ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}}},"/api/me/plugins/install-from-url":{"post":{"tags":["Plugins"],"operationId":"pluginsInstallFromUrl","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/InstallFromUrlBody"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/InstallResponse"}}}}}}},"/api/me/plugins/manifest":{"get":{"tags":["Plugins"],"operationId":"pluginsGetManifest","responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ManifestItem"}}}}}}}},"/api/me/plugins/uninstall":{"post":{"tags":["Plugins"],"operationId":"pluginsUninstall","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UninstallBody"}}},"required":true},"responses":{"204":{"description":""}}}},"/api/me/plugins/updates":{"get":{"tags":["Plugins"],"operationId":"sse_updates","responses":{"200":{"description":"Plugin event stream"}}}},"/api/me/shortcuts":{"get":{"tags":["Auth"],"operationId":"get_user_shortcuts","responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/UserShortcutResponse"}}}}}},"put":{"tags":["Auth"],"operationId":"update_user_shortcuts","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateUserShortcutRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/UserShortcutResponse"}}}}}}},"/api/plugins/{plugin}/docs/{doc_id}/kv/{key}":{"get":{"tags":["Plugins"],"operationId":"pluginsGetKv","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"doc_id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"key","in":"path","description":"Key","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/KvValueResponse"}}}}}},"put":{"tags":["Plugins"],"operationId":"pluginsPutKv","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"doc_id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"key","in":"path","description":"Key","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/KvValueBody"}}},"required":true},"responses":{"204":{"description":""}}}},"/api/plugins/{plugin}/docs/{doc_id}/records/{kind}":{"get":{"tags":["Plugins"],"operationId":"list_records","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"doc_id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"kind","in":"path","description":"Record kind","required":true,"schema":{"type":"string"}},{"name":"limit","in":"query","description":"Limit","required":false,"schema":{"type":"integer","format":"int64","nullable":true}},{"name":"offset","in":"query","description":"Offset","required":false,"schema":{"type":"integer","format":"int64","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/RecordsResponse"}}}}}},"post":{"tags":["Plugins"],"operationId":"pluginsCreateRecord","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"doc_id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"kind","in":"path","description":"Record kind","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateRecordBody"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{}}}}}}},"/api/plugins/{plugin}/exec/{action}":{"post":{"tags":["Plugins"],"operationId":"pluginsExecAction","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"action","in":"path","description":"Action","required":true,"schema":{"type":"string"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ExecBody"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ExecResultResponse"}}}}}}},"/api/plugins/{plugin}/records/{id}":{"delete":{"tags":["Plugins"],"operationId":"pluginsDeleteRecord","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"id","in":"path","description":"Record ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}},"patch":{"tags":["Plugins"],"operationId":"pluginsUpdateRecord","parameters":[{"name":"plugin","in":"path","description":"Plugin ID","required":true,"schema":{"type":"string"}},{"name":"id","in":"path","description":"Record ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateRecordBody"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{}}}}}}},"/api/public/documents/{id}":{"get":{"tags":["Public Documents"],"operationId":"get_publish_status","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"Published status","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PublishResponse"}}}}}},"post":{"tags":["Public Documents"],"operationId":"publish_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"Published","content":{"application/json":{"schema":{"$ref":"#/components/schemas/PublishResponse"}}}}}},"delete":{"tags":["Public Documents"],"operationId":"unpublish_document","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":"Unpublished"}}}},"/api/public/workspaces/{slug}":{"get":{"tags":["Public Documents"],"operationId":"list_workspace_public_documents","parameters":[{"name":"slug","in":"path","description":"Workspace slug","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"Public documents for workspace","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/PublicDocumentSummary"}}}}}}}},"/api/public/workspaces/{slug}/{id}":{"get":{"tags":["Public Documents"],"operationId":"get_public_by_workspace_and_id","parameters":[{"name":"slug","in":"path","description":"Workspace slug","required":true,"schema":{"type":"string"}},{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"Document metadata","content":{"application/json":{"schema":{"$ref":"#/components/schemas/Document"}}}}}}},"/api/public/workspaces/{slug}/{id}/content":{"get":{"tags":["Public Documents"],"operationId":"get_public_content_by_workspace_and_id","parameters":[{"name":"slug","in":"path","description":"Workspace slug","required":true,"schema":{"type":"string"}},{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"Document content"}}}},"/api/shares":{"post":{"tags":["Sharing"],"operationId":"create_share","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateShareRequest"}}},"required":true},"responses":{"200":{"description":"Share link created","content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateShareResponse"}}}}}}},"/api/shares/active":{"get":{"tags":["Sharing"],"operationId":"list_active_shares","responses":{"200":{"description":"Active shares","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ActiveShareItem"}}}}}}}},"/api/shares/applicable":{"get":{"tags":["Sharing"],"operationId":"list_applicable_shares","parameters":[{"name":"doc_id","in":"query","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"Shares that include the document","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ApplicableShareItem"}}}}}}}},"/api/shares/browse":{"get":{"tags":["Sharing"],"operationId":"browse_share","parameters":[{"name":"token","in":"query","description":"Share token","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"Share tree","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ShareBrowseResponse"}}}}}}},"/api/shares/documents/{id}":{"get":{"tags":["Sharing"],"operationId":"list_document_shares","parameters":[{"name":"id","in":"path","description":"Document ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ShareItem"}}}}}}}},"/api/shares/folders/{token}/materialize":{"post":{"tags":["Sharing"],"operationId":"materialize_folder_share","parameters":[{"name":"token","in":"path","description":"Folder share token","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"Created doc shares","content":{"application/json":{"schema":{"$ref":"#/components/schemas/MaterializeResponse"}}}}}}},"/api/shares/mounts":{"get":{"tags":["Sharing"],"operationId":"list_share_mounts","responses":{"200":{"description":"Share mounts","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/ShareMountItem"}}}}}}},"post":{"tags":["Sharing"],"operationId":"create_share_mount","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateShareMountRequest"}}},"required":true},"responses":{"200":{"description":"Saved share mount","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ShareMountItem"}}}}}}},"/api/shares/mounts/{id}":{"delete":{"tags":["Sharing"],"operationId":"delete_share_mount","parameters":[{"name":"id","in":"path","description":"Share mount ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":"Share mount removed"}}}},"/api/shares/validate":{"get":{"tags":["Sharing"],"operationId":"validate_share_token","parameters":[{"name":"token","in":"query","description":"Share token","required":true,"schema":{"type":"string"}}],"responses":{"200":{"description":"Document info","content":{"application/json":{"schema":{"$ref":"#/components/schemas/ShareDocumentResponse"}}}}}}},"/api/shares/{token}":{"delete":{"tags":["Sharing"],"operationId":"delete_share","parameters":[{"name":"token","in":"path","description":"Share token","required":true,"schema":{"type":"string"}}],"responses":{"204":{"description":"Share link deleted"}}}},"/api/tags":{"get":{"tags":["Tags"],"operationId":"list_tags","parameters":[{"name":"q","in":"query","description":"Filter contains","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/TagItem"}}}}}}}},"/api/workspace-invitations/{token}/accept":{"post":{"tags":["Workspaces"],"operationId":"accept_invitation","parameters":[{"name":"token","in":"path","description":"Invitation token","required":true,"schema":{"type":"string"}}],"responses":{"204":{"description":""}}}},"/api/workspaces":{"get":{"tags":["Workspaces"],"operationId":"list_workspaces","responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/WorkspaceResponse"}}}}}}},"post":{"tags":["Workspaces"],"operationId":"create_workspace","requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateWorkspaceRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceResponse"}}}}}}},"/api/workspaces/{id}":{"get":{"tags":["Workspaces"],"operationId":"get_workspace_detail","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceResponse"}}}}}},"put":{"tags":["Workspaces"],"operationId":"update_workspace","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateWorkspaceRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceResponse"}}}}}},"delete":{"tags":["Workspaces"],"operationId":"delete_workspace","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}}},"/api/workspaces/{id}/download":{"get":{"tags":["Workspaces"],"operationId":"download_workspace_archive","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"format","in":"query","description":"Download format (archive only)","required":false,"schema":{"allOf":[{"$ref":"#/components/schemas/DownloadFormat"}],"nullable":true}}],"responses":{"200":{"description":"Workspace download","content":{"application/octet-stream":{"schema":{"$ref":"#/components/schemas/DocumentDownloadBinary"}}}},"401":{"description":"Unauthorized"},"404":{"description":"Workspace not found"}}}},"/api/workspaces/{id}/invitations":{"get":{"tags":["Workspaces"],"operationId":"list_invitations","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/WorkspaceInvitationResponse"}}}}}}},"post":{"tags":["Workspaces"],"operationId":"create_invitation","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateWorkspaceInvitationRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceInvitationResponse"}}}}}}},"/api/workspaces/{id}/invitations/{invitation_id}":{"delete":{"tags":["Workspaces"],"operationId":"revoke_invitation","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"invitation_id","in":"path","description":"Invitation ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceInvitationResponse"}}}}}}},"/api/workspaces/{id}/members":{"get":{"tags":["Workspaces"],"operationId":"list_members","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/WorkspaceMemberResponse"}}}}}}}},"/api/workspaces/{id}/members/{user_id}":{"delete":{"tags":["Workspaces"],"operationId":"remove_member","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"user_id","in":"path","description":"Target user ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}},"patch":{"tags":["Workspaces"],"operationId":"update_member_role","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"user_id","in":"path","description":"Target user ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateMemberRoleRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceMemberResponse"}}}}}}},"/api/workspaces/{id}/permissions":{"get":{"tags":["Workspaces"],"operationId":"get_workspace_permissions","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspacePermissionsResponse"}}}}}}},"/api/workspaces/{id}/roles":{"get":{"tags":["Workspaces"],"operationId":"list_roles","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"type":"array","items":{"$ref":"#/components/schemas/WorkspaceRoleResponse"}}}}}}},"post":{"tags":["Workspaces"],"operationId":"create_role","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/CreateWorkspaceRoleRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceRoleResponse"}}}}}}},"/api/workspaces/{id}/roles/{role_id}":{"delete":{"tags":["Workspaces"],"operationId":"delete_role","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"role_id","in":"path","description":"Role ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"204":{"description":""}}},"patch":{"tags":["Workspaces"],"operationId":"update_role","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}},{"name":"role_id","in":"path","description":"Role ID","required":true,"schema":{"type":"string","format":"uuid"}}],"requestBody":{"content":{"application/json":{"schema":{"$ref":"#/components/schemas/UpdateWorkspaceRoleRequest"}}},"required":true},"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/WorkspaceRoleResponse"}}}}}}},"/api/workspaces/{id}/switch":{"post":{"tags":["Workspaces"],"operationId":"switch_workspace","parameters":[{"name":"id","in":"path","description":"Workspace ID","required":true,"schema":{"type":"string","format":"uuid"}}],"responses":{"200":{"description":"","content":{"application/json":{"schema":{"$ref":"#/components/schemas/SwitchWorkspaceResponse"}}}}}}},"/api/yjs/{id}":{"get":{"tags":["Realtime"],"operationId":"axum_ws_entry","parameters":[{"name":"id","in":"path","description":"Document ID (UUID)","required":true,"schema":{"type":"string"}},{"name":"token","in":"query","description":"JWT or share token","required":false,"schema":{"type":"string","nullable":true}},{"name":"Authorization","in":"header","description":"Bearer token (JWT or share token)","required":false,"schema":{"type":"string","nullable":true}}],"responses":{"101":{"description":"Switching Protocols (WebSocket upgrade)"},"401":{"description":"Unauthorized"}}}}},"components":{"schemas":{"ActiveShareItem":{"type":"object","required":["id","token","permission","created_at","document_id","document_title","document_type","url"],"properties":{"created_at":{"type":"string","format":"date-time"},"document_id":{"type":"string","format":"uuid"},"document_title":{"type":"string"},"document_type":{"type":"string","description":"'document' or 'folder'"},"expires_at":{"type":"string","format":"date-time","nullable":true},"id":{"type":"string","format":"uuid"},"parent_share_id":{"type":"string","format":"uuid","nullable":true},"permission":{"type":"string"},"token":{"type":"string"},"url":{"type":"string"}}},"AddPatternsRequest":{"type":"object","required":["patterns"],"properties":{"patterns":{"type":"array","items":{"type":"string"}}}},"ApiTokenCreateRequest":{"type":"object","properties":{"name":{"type":"string","example":"Deploy token","nullable":true}}},"ApiTokenCreateResponse":{"type":"object","required":["id","name","created_at","token"],"properties":{"created_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"name":{"type":"string"},"token":{"type":"string"}}},"ApiTokenItem":{"type":"object","required":["id","name","created_at"],"properties":{"created_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"last_used_at":{"type":"string","format":"date-time","nullable":true},"name":{"type":"string"},"revoked_at":{"type":"string","format":"date-time","nullable":true}}},"ApplicableShareItem":{"type":"object","required":["token","permission","scope","excluded"],"properties":{"excluded":{"type":"boolean"},"permission":{"type":"string"},"scope":{"type":"string","description":"'document' or 'folder'"},"token":{"type":"string"}}},"AuthProviderInfoResponse":{"type":"object","required":["id","requires_state","client_ids"],"properties":{"authorization_url":{"type":"string","nullable":true},"client_ids":{"type":"array","items":{"type":"string"}},"id":{"type":"string"},"name":{"type":"string","nullable":true},"redirect_uri":{"type":"string","nullable":true},"requires_state":{"type":"boolean"},"scopes":{"type":"array","items":{"type":"string"}}}},"AuthProvidersResponse":{"type":"object","required":["providers"],"properties":{"providers":{"type":"array","items":{"$ref":"#/components/schemas/AuthProviderInfoResponse"}}}},"BacklinkInfo":{"type":"object","required":["document_id","title","document_type","link_type","link_count"],"properties":{"document_id":{"type":"string"},"document_type":{"type":"string"},"file_path":{"type":"string","nullable":true},"link_count":{"type":"integer","format":"int64"},"link_text":{"type":"string","nullable":true},"link_type":{"type":"string"},"title":{"type":"string"}}},"BacklinksResponse":{"type":"object","required":["backlinks","total_count"],"properties":{"backlinks":{"type":"array","items":{"$ref":"#/components/schemas/BacklinkInfo"}},"total_count":{"type":"integer","minimum":0}}},"CheckIgnoredRequest":{"type":"object","required":["path"],"properties":{"path":{"type":"string"}}},"CreateDocumentRequest":{"type":"object","properties":{"parent_id":{"type":"string","format":"uuid","nullable":true},"title":{"type":"string","nullable":true},"type":{"type":"string","nullable":true}}},"CreateGitConfigRequest":{"type":"object","required":["repository_url","auth_type","auth_data"],"properties":{"auth_data":{},"auth_type":{"type":"string"},"auto_sync":{"type":"boolean","nullable":true},"branch_name":{"type":"string","nullable":true},"repository_url":{"type":"string"}}},"CreateRecordBody":{"type":"object","required":["data"],"properties":{"data":{}}},"CreateShareMountRequest":{"type":"object","required":["token"],"properties":{"parent_folder_id":{"type":"string","format":"uuid","nullable":true},"token":{"type":"string"}}},"CreateShareRequest":{"type":"object","required":["document_id"],"properties":{"document_id":{"type":"string","format":"uuid"},"expires_at":{"type":"string","format":"date-time","nullable":true},"permission":{"type":"string","nullable":true}}},"CreateShareResponse":{"type":"object","required":["token","url"],"properties":{"token":{"type":"string"},"url":{"type":"string"}}},"CreateWorkspaceInvitationRequest":{"type":"object","required":["email","role_kind"],"properties":{"custom_role_id":{"type":"string","format":"uuid","nullable":true},"email":{"type":"string"},"expires_at":{"type":"string","format":"date-time","nullable":true},"role_kind":{"type":"string"},"system_role":{"type":"string","nullable":true}}},"CreateWorkspaceRequest":{"type":"object","required":["name"],"properties":{"description":{"type":"string","nullable":true},"icon":{"type":"string","nullable":true},"name":{"type":"string"}}},"CreateWorkspaceRoleRequest":{"type":"object","required":["name","base_role"],"properties":{"base_role":{"type":"string"},"description":{"type":"string","nullable":true},"name":{"type":"string"},"overrides":{"type":"array","items":{"$ref":"#/components/schemas/PermissionOverridePayload"},"nullable":true},"priority":{"type":"integer","format":"int32","nullable":true}}},"Document":{"type":"object","required":["id","owner_id","workspace_id","title","type","created_at","updated_at","slug","desired_path"],"properties":{"archived_at":{"type":"string","format":"date-time","nullable":true},"archived_by":{"type":"string","format":"uuid","nullable":true},"archived_parent_id":{"type":"string","format":"uuid","nullable":true},"created_at":{"type":"string","format":"date-time"},"created_by":{"type":"string","format":"uuid","nullable":true},"created_by_plugin":{"type":"string","nullable":true},"desired_path":{"type":"string"},"id":{"type":"string","format":"uuid"},"owner_id":{"type":"string","format":"uuid"},"parent_id":{"type":"string","format":"uuid","nullable":true},"path":{"type":"string","nullable":true},"slug":{"type":"string"},"title":{"type":"string"},"type":{"type":"string"},"updated_at":{"type":"string","format":"date-time"},"workspace_id":{"type":"string","format":"uuid"}}},"DocumentArchiveBinary":{"type":"string","format":"binary"},"DocumentDownloadBinary":{"type":"string","format":"binary"},"DocumentListResponse":{"type":"object","required":["items"],"properties":{"items":{"type":"array","items":{"$ref":"#/components/schemas/Document"}}}},"DocumentPatchOperationRequest":{"oneOf":[{"type":"object","required":["offset","text","op"],"properties":{"offset":{"type":"integer","minimum":0},"op":{"type":"string","enum":["insert"]},"text":{"type":"string"}}},{"type":"object","required":["offset","length","op"],"properties":{"length":{"type":"integer","minimum":0},"offset":{"type":"integer","minimum":0},"op":{"type":"string","enum":["delete"]}}},{"type":"object","required":["offset","length","text","op"],"properties":{"length":{"type":"integer","minimum":0},"offset":{"type":"integer","minimum":0},"op":{"type":"string","enum":["replace"]},"text":{"type":"string"}}}],"discriminator":{"propertyName":"op"}},"DownloadDocumentQuery":{"type":"object","properties":{"format":{"$ref":"#/components/schemas/DownloadFormat"},"token":{"type":"string","nullable":true}}},"DownloadFormat":{"type":"string","enum":["archive","markdown","html","html5","pdf","docx","latex","beamer","context","man","mediawiki","dokuwiki","textile","org","texinfo","opml","docbook","opendocument","odt","rtf","epub","epub3","fb2","asciidoc","icml","slidy","slideous","dzslides","revealjs","s5","json","plain","commonmark","commonmark_x","markdown_strict","markdown_phpextra","markdown_github","rst","native","haddock"]},"DownloadWorkspaceQuery":{"type":"object","properties":{"format":{"$ref":"#/components/schemas/DownloadFormat"}}},"DuplicateDocumentRequest":{"type":"object","properties":{"parent_id":{"type":"string","nullable":true},"title":{"type":"string","nullable":true}}},"ExecBody":{"type":"object","properties":{"payload":{"nullable":true}}},"ExecResultResponse":{"type":"object","required":["ok","effects"],"properties":{"data":{"nullable":true},"effects":{"type":"array","items":{}},"error":{"nullable":true},"ok":{"type":"boolean"}}},"GitChangeItem":{"type":"object","required":["path","status"],"properties":{"path":{"type":"string"},"status":{"type":"string"}}},"GitChangesResponse":{"type":"object","required":["files"],"properties":{"files":{"type":"array","items":{"$ref":"#/components/schemas/GitChangeItem"}}}},"GitCommitItem":{"type":"object","required":["hash","message","author_name","author_email","time"],"properties":{"author_email":{"type":"string"},"author_name":{"type":"string"},"hash":{"type":"string"},"message":{"type":"string"},"time":{"type":"string","format":"date-time"}}},"GitConfigResponse":{"type":"object","required":["id","repository_url","branch_name","auth_type","auto_sync","created_at","updated_at"],"properties":{"auth_type":{"type":"string"},"auto_sync":{"type":"boolean"},"branch_name":{"type":"string"},"created_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"remote_check":{"allOf":[{"$ref":"#/components/schemas/GitRemoteCheckResponse"}],"nullable":true},"repository_url":{"type":"string"},"updated_at":{"type":"string","format":"date-time"}}},"GitHistoryResponse":{"type":"object","required":["commits"],"properties":{"commits":{"type":"array","items":{"$ref":"#/components/schemas/GitCommitItem"}}}},"GitImportResponse":{"type":"object","required":["success","message","files_changed","docs_created","attachments_created"],"properties":{"attachments_created":{"type":"integer","format":"int32"},"commit_hash":{"type":"string","nullable":true},"docs_created":{"type":"integer","format":"int32"},"files_changed":{"type":"integer","format":"int32"},"message":{"type":"string"},"success":{"type":"boolean"}}},"GitPullConflictItem":{"type":"object","required":["path","is_binary"],"properties":{"base":{"type":"string","nullable":true},"document_id":{"type":"string","format":"uuid","nullable":true},"is_binary":{"type":"boolean"},"ours":{"type":"string","nullable":true},"path":{"type":"string"},"theirs":{"type":"string","nullable":true}}},"GitPullRequest":{"type":"object","properties":{"resolutions":{"type":"array","items":{"$ref":"#/components/schemas/GitPullResolution"},"nullable":true}}},"GitPullResolution":{"type":"object","required":["path","choice"],"properties":{"choice":{"type":"string"},"content":{"type":"string","nullable":true},"path":{"type":"string"}}},"GitPullResponse":{"type":"object","required":["success","message","files_changed"],"properties":{"commit_hash":{"type":"string","nullable":true},"conflicts":{"type":"array","items":{"$ref":"#/components/schemas/GitPullConflictItem"},"nullable":true},"files_changed":{"type":"integer","format":"int32"},"git_status":{"allOf":[{"$ref":"#/components/schemas/GitStatus"}],"nullable":true},"message":{"type":"string"},"success":{"type":"boolean"}}},"GitPullSessionResponse":{"type":"object","required":["session_id","status","conflicts","resolutions"],"properties":{"conflicts":{"type":"array","items":{"$ref":"#/components/schemas/GitPullConflictItem"}},"message":{"type":"string","nullable":true},"resolutions":{"type":"array","items":{"$ref":"#/components/schemas/GitPullResolution"}},"session_id":{"type":"string","format":"uuid"},"status":{"type":"string"}}},"GitRemoteCheckResponse":{"type":"object","required":["ok","message"],"properties":{"message":{"type":"string"},"ok":{"type":"boolean"},"reason":{"type":"string","nullable":true}}},"GitStatus":{"type":"object","required":["repository_initialized","has_remote","uncommitted_changes","untracked_files","sync_enabled"],"properties":{"current_branch":{"type":"string","nullable":true},"has_remote":{"type":"boolean"},"last_sync":{"type":"string","format":"date-time","nullable":true},"last_sync_commit_hash":{"type":"string","nullable":true},"last_sync_message":{"type":"string","nullable":true},"last_sync_status":{"type":"string","nullable":true},"repository_initialized":{"type":"boolean"},"sync_enabled":{"type":"boolean"},"uncommitted_changes":{"type":"integer","format":"int32","minimum":0},"untracked_files":{"type":"integer","format":"int32","minimum":0}}},"GitSyncRequest":{"type":"object","properties":{"force":{"type":"boolean","nullable":true},"full_scan":{"type":"boolean","nullable":true},"message":{"type":"string","nullable":true},"skip_push":{"type":"boolean","nullable":true}}},"GitSyncResponse":{"type":"object","required":["success","message","files_changed"],"properties":{"commit_hash":{"type":"string","nullable":true},"files_changed":{"type":"integer","format":"int32","minimum":0},"message":{"type":"string"},"success":{"type":"boolean"}}},"HealthResp":{"type":"object","required":["status"],"properties":{"status":{"type":"string"}}},"InstallFromUrlBody":{"type":"object","required":["url"],"properties":{"token":{"type":"string","nullable":true},"url":{"type":"string"}}},"InstallResponse":{"type":"object","required":["id","version"],"properties":{"id":{"type":"string"},"version":{"type":"string"}}},"KvValueBody":{"type":"object","required":["value"],"properties":{"value":{}}},"KvValueResponse":{"type":"object","required":["value"],"properties":{"value":{}}},"LoginRequest":{"type":"object","required":["email","password"],"properties":{"email":{"type":"string"},"password":{"type":"string"},"remember_me":{"type":"boolean"}}},"LoginResponse":{"type":"object","required":["access_token","user"],"properties":{"access_token":{"type":"string"},"user":{"$ref":"#/components/schemas/UserResponse"}}},"ManifestItem":{"type":"object","required":["id","version","scope","mounts","frontend","permissions","config","ui"],"properties":{"author":{"type":"string","nullable":true},"config":{},"frontend":{},"id":{"type":"string"},"mounts":{"type":"array","items":{"type":"string"}},"name":{"type":"string","nullable":true},"permissions":{"type":"array","items":{"type":"string"}},"repository":{"type":"string","nullable":true},"scope":{"type":"string"},"ui":{},"version":{"type":"string"}}},"MaterializeResponse":{"type":"object","required":["created"],"properties":{"created":{"type":"integer","format":"int64"}}},"OAuthLoginRequest":{"type":"object","properties":{"code":{"type":"string","nullable":true},"credential":{"type":"string","nullable":true},"redirect_uri":{"type":"string","nullable":true},"remember_me":{"type":"boolean"},"state":{"type":"string","nullable":true}}},"OAuthStateResponse":{"type":"object","required":["state"],"properties":{"state":{"type":"string"}}},"OutgoingLink":{"type":"object","required":["document_id","title","document_type","link_type"],"properties":{"document_id":{"type":"string"},"document_type":{"type":"string"},"file_path":{"type":"string","nullable":true},"link_text":{"type":"string","nullable":true},"link_type":{"type":"string"},"position_end":{"type":"integer","format":"int32","nullable":true},"position_start":{"type":"integer","format":"int32","nullable":true},"title":{"type":"string"}}},"OutgoingLinksResponse":{"type":"object","required":["links","total_count"],"properties":{"links":{"type":"array","items":{"$ref":"#/components/schemas/OutgoingLink"}},"total_count":{"type":"integer","minimum":0}}},"PatchDocumentContentRequest":{"type":"object","required":["operations"],"properties":{"operations":{"type":"array","items":{"$ref":"#/components/schemas/DocumentPatchOperationRequest"}}}},"PermissionOverridePayload":{"type":"object","required":["permission","allowed"],"properties":{"allowed":{"type":"boolean"},"permission":{"type":"string"}}},"PlaceholderItemPayload":{"type":"object","required":["kind","id","code"],"properties":{"code":{"type":"string"},"id":{"type":"string"},"kind":{"type":"string"}}},"PublicDocumentSummary":{"type":"object","required":["id","title","updated_at","published_at"],"properties":{"id":{"type":"string","format":"uuid"},"published_at":{"type":"string","format":"date-time"},"title":{"type":"string"},"updated_at":{"type":"string","format":"date-time"}}},"PublishResponse":{"type":"object","required":["slug","public_url"],"properties":{"public_url":{"type":"string"},"slug":{"type":"string"}}},"RecordsResponse":{"type":"object","required":["items"],"properties":{"items":{"type":"array","items":{}}}},"RefreshResponse":{"type":"object","required":["access_token"],"properties":{"access_token":{"type":"string"}}},"RegisterRequest":{"type":"object","required":["email","name","password"],"properties":{"email":{"type":"string"},"name":{"type":"string"},"password":{"type":"string"}}},"RenderManyRequest":{"type":"object","required":["items"],"properties":{"items":{"type":"array","items":{"$ref":"#/components/schemas/RenderRequest"}}}},"RenderManyResponse":{"type":"object","required":["items"],"properties":{"items":{"type":"array","items":{"$ref":"#/components/schemas/RenderResponseBody"}}}},"RenderOptionsPayload":{"type":"object","properties":{"absolute_attachments":{"type":"boolean","default":null,"nullable":true},"base_origin":{"type":"string","default":null,"nullable":true},"doc_id":{"type":"string","format":"uuid","default":null,"nullable":true},"features":{"type":"array","items":{"type":"string"},"default":null,"nullable":true},"flavor":{"type":"string","default":null,"nullable":true},"hardbreaks":{"type":"boolean","default":null,"nullable":true},"sanitize":{"type":"boolean","default":null,"nullable":true},"theme":{"type":"string","default":null,"nullable":true},"token":{"type":"string","default":null,"nullable":true}}},"RenderRequest":{"type":"object","required":["text"],"properties":{"options":{"$ref":"#/components/schemas/RenderOptionsPayload"},"text":{"type":"string"}}},"RenderResponseBody":{"type":"object","required":["html","hash"],"properties":{"hash":{"type":"string"},"html":{"type":"string"},"placeholders":{"type":"array","items":{"$ref":"#/components/schemas/PlaceholderItemPayload"}}}},"SearchResult":{"type":"object","required":["id","title","document_type","updated_at"],"properties":{"document_type":{"type":"string"},"id":{"type":"string","format":"uuid"},"path":{"type":"string","nullable":true},"title":{"type":"string"},"updated_at":{"type":"string","format":"date-time"}}},"SessionResponse":{"type":"object","required":["id","workspace_id","remember_me","created_at","last_seen_at","expires_at","current"],"properties":{"created_at":{"type":"string","format":"date-time"},"current":{"type":"boolean"},"expires_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"ip_address":{"type":"string","nullable":true},"last_seen_at":{"type":"string","format":"date-time"},"remember_me":{"type":"boolean"},"user_agent":{"type":"string","nullable":true},"workspace_id":{"type":"string","format":"uuid"}}},"ShareBrowseResponse":{"type":"object","required":["tree"],"properties":{"tree":{"type":"array","items":{"$ref":"#/components/schemas/ShareBrowseTreeItem"}}}},"ShareBrowseTreeItem":{"type":"object","required":["id","title","type","created_at","updated_at"],"properties":{"created_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"parent_id":{"type":"string","format":"uuid","nullable":true},"title":{"type":"string"},"type":{"type":"string","example":"document"},"updated_at":{"type":"string","format":"date-time"}}},"ShareDocumentResponse":{"type":"object","required":["id","title","permission"],"properties":{"content":{"type":"string","nullable":true},"id":{"type":"string","format":"uuid"},"permission":{"type":"string"},"title":{"type":"string"}}},"ShareItem":{"type":"object","required":["id","token","permission","url","scope"],"properties":{"expires_at":{"type":"string","format":"date-time","nullable":true},"id":{"type":"string","format":"uuid"},"parent_share_id":{"type":"string","format":"uuid","description":"If present, this document share was materialized from a folder share","nullable":true},"permission":{"type":"string"},"scope":{"type":"string","description":"document | folder"},"token":{"type":"string"},"url":{"type":"string"}}},"ShareMountItem":{"type":"object","required":["id","token","target_document_id","target_document_type","target_title","permission","created_at"],"properties":{"created_at":{"type":"string","format":"date-time"},"id":{"type":"string","format":"uuid"},"parent_folder_id":{"type":"string","format":"uuid","nullable":true},"permission":{"type":"string"},"target_document_id":{"type":"string","format":"uuid"},"target_document_type":{"type":"string"},"target_title":{"type":"string"},"token":{"type":"string"}}},"SnapshotDiffBaseParam":{"type":"string","enum":["auto","current","previous"]},"SnapshotDiffKind":{"type":"string","enum":["current","snapshot"]},"SnapshotDiffResponse":{"type":"object","required":["base","target","diff"],"properties":{"base":{"$ref":"#/components/schemas/SnapshotDiffSideResponse"},"diff":{"$ref":"#/components/schemas/TextDiffResult"},"target":{"$ref":"#/components/schemas/SnapshotDiffSideResponse"}}},"SnapshotDiffSideResponse":{"type":"object","required":["kind","markdown"],"properties":{"kind":{"$ref":"#/components/schemas/SnapshotDiffKind"},"markdown":{"type":"string"},"snapshot":{"allOf":[{"$ref":"#/components/schemas/SnapshotSummary"}],"nullable":true}}},"SnapshotListResponse":{"type":"object","required":["items"],"properties":{"items":{"type":"array","items":{"$ref":"#/components/schemas/SnapshotSummary"}}}},"SnapshotRestoreResponse":{"type":"object","required":["snapshot"],"properties":{"snapshot":{"$ref":"#/components/schemas/SnapshotSummary"}}},"SnapshotSummary":{"type":"object","required":["id","document_id","label","kind","created_at","byte_size","content_hash"],"properties":{"byte_size":{"type":"integer","format":"int64"},"content_hash":{"type":"string"},"created_at":{"type":"string","format":"date-time"},"created_by":{"type":"string","format":"uuid","nullable":true},"document_id":{"type":"string","format":"uuid"},"id":{"type":"string","format":"uuid"},"kind":{"type":"string"},"label":{"type":"string"},"notes":{"type":"string","nullable":true}}},"SwitchWorkspaceResponse":{"type":"object","required":["access_token"],"properties":{"access_token":{"type":"string"}}},"TagItem":{"type":"object","required":["name","count"],"properties":{"count":{"type":"integer","format":"int64"},"name":{"type":"string"}}},"TextDiffLine":{"type":"object","required":["line_type","content"],"properties":{"content":{"type":"string"},"line_type":{"$ref":"#/components/schemas/TextDiffLineType"},"new_line_number":{"type":"integer","format":"int32","nullable":true,"minimum":0},"old_line_number":{"type":"integer","format":"int32","nullable":true,"minimum":0}}},"TextDiffLineType":{"type":"string","enum":["added","deleted","context"]},"TextDiffResult":{"type":"object","required":["file_path","diff_lines"],"properties":{"diff_lines":{"type":"array","items":{"$ref":"#/components/schemas/TextDiffLine"}},"file_path":{"type":"string"},"new_content":{"type":"string","nullable":true},"old_content":{"type":"string","nullable":true}}},"UninstallBody":{"type":"object","required":["id"],"properties":{"id":{"type":"string"}}},"UpdateDocumentContentRequest":{"type":"object","required":["content"],"properties":{"content":{"type":"string"}}},"UpdateDocumentRequest":{"type":"object","properties":{"parent_id":{"type":"string","nullable":true},"title":{"type":"string","nullable":true}}},"UpdateGitConfigRequest":{"type":"object","properties":{"auth_data":{"nullable":true},"auth_type":{"type":"string","nullable":true},"auto_sync":{"type":"boolean","nullable":true},"branch_name":{"type":"string","nullable":true},"repository_url":{"type":"string","nullable":true}}},"UpdateMemberRoleRequest":{"type":"object","required":["role_kind"],"properties":{"custom_role_id":{"type":"string","format":"uuid","nullable":true},"role_kind":{"type":"string"},"system_role":{"type":"string","nullable":true}}},"UpdateRecordBody":{"type":"object","required":["patch"],"properties":{"patch":{}}},"UpdateUserShortcutRequest":{"type":"object","properties":{"bindings":{"type":"object"},"leader_key":{"type":"string","example":"","nullable":true}}},"UpdateWorkspaceRequest":{"type":"object","properties":{"description":{"type":"string","nullable":true},"icon":{"type":"string","nullable":true},"name":{"type":"string","nullable":true}}},"UpdateWorkspaceRoleRequest":{"type":"object","properties":{"base_role":{"type":"string","nullable":true},"description":{"type":"string","nullable":true},"name":{"type":"string","nullable":true},"overrides":{"type":"array","items":{"$ref":"#/components/schemas/PermissionOverridePayload"},"nullable":true},"priority":{"type":"integer","format":"int32","nullable":true}}},"UploadFileMultipart":{"type":"object","required":["file","document_id"],"properties":{"document_id":{"type":"string","format":"uuid","description":"Target document ID"},"file":{"type":"string","format":"binary","description":"File to upload"}}},"UploadFileResponse":{"type":"object","required":["id","url","filename","size"],"properties":{"content_type":{"type":"string","nullable":true},"filename":{"type":"string"},"id":{"type":"string","format":"uuid"},"size":{"type":"integer","format":"int64"},"url":{"type":"string"}}},"UserResponse":{"type":"object","required":["id","email","name","workspaces"],"properties":{"active_workspace":{"allOf":[{"$ref":"#/components/schemas/WorkspaceMembershipResponse"}],"nullable":true},"active_workspace_id":{"type":"string","format":"uuid","nullable":true},"active_workspace_permissions":{"type":"array","items":{"type":"string"}},"email":{"type":"string"},"id":{"type":"string","format":"uuid"},"name":{"type":"string"},"workspaces":{"type":"array","items":{"$ref":"#/components/schemas/WorkspaceMembershipResponse"}}}},"UserShortcutResponse":{"type":"object","required":["bindings"],"properties":{"bindings":{"type":"object"},"leader_key":{"type":"string","example":"","nullable":true},"updated_at":{"type":"string","format":"date-time","nullable":true}}},"WorkspaceInvitationResponse":{"type":"object","required":["id","workspace_id","email","role_kind","invited_by","token","created_at"],"properties":{"accepted_at":{"type":"string","format":"date-time","nullable":true},"accepted_by":{"type":"string","format":"uuid","nullable":true},"created_at":{"type":"string","format":"date-time"},"custom_role_id":{"type":"string","format":"uuid","nullable":true},"email":{"type":"string"},"expires_at":{"type":"string","format":"date-time","nullable":true},"id":{"type":"string","format":"uuid"},"invited_by":{"type":"string","format":"uuid"},"revoked_at":{"type":"string","format":"date-time","nullable":true},"role_kind":{"type":"string"},"system_role":{"type":"string","nullable":true},"token":{"type":"string"},"workspace_id":{"type":"string","format":"uuid"}}},"WorkspaceMemberResponse":{"type":"object","required":["workspace_id","user_id","email","name","role_kind","is_default"],"properties":{"custom_role_id":{"type":"string","format":"uuid","nullable":true},"email":{"type":"string"},"is_default":{"type":"boolean"},"name":{"type":"string"},"role_kind":{"type":"string"},"system_role":{"type":"string","nullable":true},"user_id":{"type":"string","format":"uuid"},"workspace_id":{"type":"string","format":"uuid"}}},"WorkspaceMembershipResponse":{"type":"object","required":["id","name","slug","is_personal","role_kind","is_default"],"properties":{"custom_role_id":{"type":"string","format":"uuid","nullable":true},"description":{"type":"string","nullable":true},"icon":{"type":"string","nullable":true},"id":{"type":"string","format":"uuid"},"is_default":{"type":"boolean"},"is_personal":{"type":"boolean"},"name":{"type":"string"},"role_kind":{"type":"string"},"slug":{"type":"string"},"system_role":{"type":"string","nullable":true}}},"WorkspacePermissionsResponse":{"type":"object","required":["workspace_id","permissions"],"properties":{"permissions":{"type":"array","items":{"type":"string"}},"workspace_id":{"type":"string","format":"uuid"}}},"WorkspaceResponse":{"type":"object","required":["id","name","slug","is_personal","role_kind","is_default"],"properties":{"custom_role_id":{"type":"string","format":"uuid","nullable":true},"description":{"type":"string","nullable":true},"icon":{"type":"string","nullable":true},"id":{"type":"string","format":"uuid"},"is_default":{"type":"boolean"},"is_personal":{"type":"boolean"},"name":{"type":"string"},"role_kind":{"type":"string"},"slug":{"type":"string"},"system_role":{"type":"string","nullable":true}}},"WorkspaceRoleResponse":{"type":"object","required":["id","workspace_id","name","base_role","priority","overrides"],"properties":{"base_role":{"type":"string"},"description":{"type":"string","nullable":true},"id":{"type":"string","format":"uuid"},"name":{"type":"string"},"overrides":{"type":"array","items":{"$ref":"#/components/schemas/PermissionOverridePayload"}},"priority":{"type":"integer","format":"int32"},"workspace_id":{"type":"string","format":"uuid"}}}}},"tags":[{"name":"Auth","description":"Authentication"},{"name":"Documents","description":"Documents management"},{"name":"Files","description":"File management"},{"name":"Sharing","description":"Document sharing"},{"name":"Public Documents","description":"Public pages"},{"name":"Realtime","description":"Yjs WebSocket endpoint (/yjs/:id)"},{"name":"Git","description":"Git integration"},{"name":"Markdown","description":"Markdown rendering"},{"name":"Plugins","description":"Plugins management & data APIs"},{"name":"Health","description":"System health checks"}]} diff --git a/api/rust-toolchain.toml b/api/rust-toolchain.toml new file mode 100644 index 00000000..1a216558 --- /dev/null +++ b/api/rust-toolchain.toml @@ -0,0 +1,3 @@ +[toolchain] +channel = "1.92.0" +components = ["rustfmt", "clippy"] diff --git a/api/src/application/access/mod.rs b/api/src/application/access/mod.rs deleted file mode 100644 index e8d6c9ce..00000000 --- a/api/src/application/access/mod.rs +++ /dev/null @@ -1,148 +0,0 @@ -use uuid::Uuid; - -use crate::application::ports::access_repository::AccessRepository; -use crate::application::ports::share_access_port::ShareAccessPort; -use crate::domain::workspaces::permissions::{PERM_DOC_EDIT, PERM_DOC_VIEW}; - -#[derive(Debug, Clone)] -pub enum Actor { - User(Uuid), - ShareToken(String), - Public, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub enum Capability { - None, - View, - Edit, -} - -// Presentation layer is responsible for building Actor from HTTP inputs. -// This module intentionally avoids depending on presentation types. - -pub async fn resolve_document( - access_repo: &A, - shares_repo: &R, - actor: &Actor, - doc_id: Uuid, -) -> Capability -where - A: AccessRepository + ?Sized, - R: ShareAccessPort + ?Sized, -{ - match actor { - Actor::User(uid) => { - let access = match access_repo.resolve_user_document_access(doc_id, *uid).await { - Ok(Some(access)) => access, - _ => return Capability::None, - }; - if !access.permissions.allows(PERM_DOC_VIEW) { - return Capability::None; - } - if access.is_archived { - Capability::View - } else if access.permissions.allows(PERM_DOC_EDIT) { - Capability::Edit - } else { - Capability::View - } - } - Actor::ShareToken(t) => { - // Resolve token target and then decide access when document matches token scope - if let Ok(Some((share_id, perm, expires_at, shared_id, shared_type, _workspace_id))) = - shares_repo.resolve_share_by_token(t).await - { - if access_repo - .is_document_archived(doc_id) - .await - .unwrap_or(false) - { - return Capability::None; - } - // Check expiration - if let Some(exp) = expires_at { - if exp < chrono::Utc::now() { - return Capability::None; - } - } - if shared_type != "folder" { - if shared_id == doc_id { - if perm == "edit" { - Capability::Edit - } else { - Capability::View - } - } else { - Capability::None - } - } else { - // Need a materialized child share for this doc - match shares_repo - .get_materialized_permission(share_id, doc_id) - .await - { - Ok(Some(p)) => { - if p == "edit" { - Capability::Edit - } else { - Capability::View - } - } - _ => Capability::None, - } - } - } else { - Capability::None - } - } - Actor::Public => { - let is_pub = access_repo - .is_document_public(doc_id) - .await - .unwrap_or(false); - if is_pub { - // Public documents remain view-only even when archived. - Capability::View - } else { - Capability::None - } - } - } -} - -pub async fn require_view( - access_repo: &A, - shares_repo: &R, - actor: &Actor, - doc_id: Uuid, -) -> anyhow::Result -where - A: AccessRepository + ?Sized, - R: ShareAccessPort + ?Sized, -{ - let cap = resolve_document(access_repo, shares_repo, actor, doc_id).await; - if cap >= Capability::View { - Ok(cap) - } else { - anyhow::bail!("unauthorized") - } -} - -pub async fn require_edit( - access_repo: &A, - shares_repo: &R, - actor: &Actor, - doc_id: Uuid, -) -> anyhow::Result<()> -where - A: AccessRepository + ?Sized, - R: ShareAccessPort + ?Sized, -{ - let cap = resolve_document(access_repo, shares_repo, actor, doc_id).await; - if cap >= Capability::Edit { - Ok(()) - } else { - anyhow::bail!("forbidden") - } -} diff --git a/api/src/application/dto/mod.rs b/api/src/application/dto/mod.rs deleted file mode 100644 index c3423ad9..00000000 --- a/api/src/application/dto/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -pub mod api_tokens; -pub mod auth; -pub mod diff; -pub mod document_export; -pub mod documents; -pub mod git; -pub mod plugins; -pub mod public; -pub mod shares; -pub mod tags; -pub mod user_shortcuts; -// pub mod files; // add if/when needed diff --git a/api/src/application/mod.rs b/api/src/application/mod.rs deleted file mode 100644 index ef51706e..00000000 --- a/api/src/application/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub mod access; -pub mod dto; -pub mod linkgraph; -pub mod ports; -pub mod services; -pub mod use_cases; -pub mod utils; diff --git a/api/src/application/ports/doc_event_log.rs b/api/src/application/ports/doc_event_log.rs deleted file mode 100644 index 9066dadb..00000000 --- a/api/src/application/ports/doc_event_log.rs +++ /dev/null @@ -1,24 +0,0 @@ -use async_trait::async_trait; -use serde_json::Value; -use sqlx::{Postgres, Transaction}; -use uuid::Uuid; - -#[async_trait] -pub trait DocEventLog: Send + Sync { - async fn append( - &self, - workspace_id: Uuid, - doc_id: Uuid, - event_type: &str, - payload: Option, - ) -> anyhow::Result<()>; - - async fn append_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - doc_id: Uuid, - event_type: &str, - payload: Option, - ) -> anyhow::Result<()>; -} diff --git a/api/src/application/ports/document_repository.rs b/api/src/application/ports/document_repository.rs deleted file mode 100644 index 5868481d..00000000 --- a/api/src/application/ports/document_repository.rs +++ /dev/null @@ -1,207 +0,0 @@ -use async_trait::async_trait; -use sqlx::{Postgres, Transaction}; -use uuid::Uuid; - -use crate::domain::documents::document::Document as DomainDocument; -use crate::domain::documents::document::{ - BacklinkInfo as DomBacklinkInfo, OutgoingLink as DomOutgoingLink, SearchHit, -}; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DocumentPathConflictError; - -impl std::fmt::Display for DocumentPathConflictError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "document path conflict") - } -} - -impl std::error::Error for DocumentPathConflictError {} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum DocumentListState { - Active, - Archived, - All, -} - -impl Default for DocumentListState { - fn default() -> Self { - DocumentListState::Active - } -} - -#[async_trait] -pub trait DocumentRepository: Send + Sync { - async fn list_for_user( - &self, - workspace_id: Uuid, - query: Option, - tag: Option, - state: DocumentListState, - ) -> anyhow::Result>; - - async fn list_ids_for_user(&self, workspace_id: Uuid) -> anyhow::Result>; - - async fn list_paths_for_user(&self, workspace_id: Uuid) -> anyhow::Result>; - - async fn list_workspace_documents( - &self, - workspace_id: Uuid, - ) -> anyhow::Result>; - - async fn get_by_id(&self, id: Uuid) -> anyhow::Result>; - - async fn search_for_user( - &self, - workspace_id: Uuid, - query: Option, - limit: i64, - ) -> anyhow::Result>; - - async fn create_for_user( - &self, - workspace_id: Uuid, - created_by: Uuid, - title: &str, - parent_id: Option, - doc_type: &str, - created_by_plugin: Option<&str>, - ) -> anyhow::Result; - - async fn create_for_user_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - created_by: Uuid, - title: &str, - parent_id: Option, - doc_type: &str, - created_by_plugin: Option<&str>, - ) -> anyhow::Result; - - // parent_id: None => not provided; Some(None) => set NULL; Some(Some(uuid)) => set to value - async fn update_title_and_parent_for_user( - &self, - id: Uuid, - workspace_id: Uuid, - title: Option, - parent_id: Option>, - ) -> anyhow::Result>; - - async fn update_title_and_parent_for_user_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - id: Uuid, - workspace_id: Uuid, - title: Option, - parent_id: Option>, - ) -> anyhow::Result>; - - // Returns Some(type) if deleted, None if not found/unauthorized - async fn delete_owned(&self, id: Uuid, workspace_id: Uuid) -> anyhow::Result>; - - async fn delete_owned_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result>; - - async fn backlinks_for( - &self, - workspace_id: Uuid, - target_id: Uuid, - ) -> anyhow::Result>; - - async fn outgoing_links_for( - &self, - workspace_id: Uuid, - source_id: Uuid, - ) -> anyhow::Result>; - - // Lightweight meta for ownership-scoped queries - async fn get_meta_for_owner( - &self, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result>; - - async fn get_meta_for_owner_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result>; - - async fn archive_subtree( - &self, - doc_id: Uuid, - workspace_id: Uuid, - archived_by: Uuid, - ) -> anyhow::Result>; - - async fn archive_subtree_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - doc_id: Uuid, - workspace_id: Uuid, - archived_by: Uuid, - ) -> anyhow::Result>; - - async fn unarchive_subtree( - &self, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result>; - - async fn unarchive_subtree_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result>; - - async fn list_owned_subtree_documents( - &self, - workspace_id: Uuid, - root_id: Uuid, - ) -> anyhow::Result>; - - async fn list_owned_subtree_documents_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - root_id: Uuid, - ) -> anyhow::Result>; - - async fn get_by_owner_and_path( - &self, - workspace_id: Uuid, - relative_path: &str, - ) -> anyhow::Result>; - - async fn update_repo_path( - &self, - doc_id: Uuid, - workspace_id: Uuid, - relative_path: &str, - ) -> anyhow::Result<()>; -} - -#[derive(Debug, Clone)] -pub struct DocMeta { - pub workspace_id: Uuid, - pub doc_type: String, - pub path: Option, - pub slug: String, - pub desired_path: String, - pub title: String, - pub archived_at: Option>, -} - -#[derive(Debug, Clone)] -pub struct SubtreeDocument { - pub id: Uuid, - pub doc_type: String, -} diff --git a/api/src/application/ports/files_repository.rs b/api/src/application/ports/files_repository.rs deleted file mode 100644 index 238a173a..00000000 --- a/api/src/application/ports/files_repository.rs +++ /dev/null @@ -1,67 +0,0 @@ -use async_trait::async_trait; -use sqlx::{Postgres, Transaction}; -use uuid::Uuid; - -#[async_trait] -pub trait FilesRepository: Send + Sync { - async fn is_workspace_document(&self, doc_id: Uuid, workspace_id: Uuid) - -> anyhow::Result; - async fn insert_file( - &self, - doc_id: Uuid, - filename: &str, - content_type: Option<&str>, - size: i64, - storage_path: &str, - content_hash: &str, - ) -> anyhow::Result; - async fn get_file_meta( - &self, - file_id: Uuid, - ) -> anyhow::Result, Uuid)>>; // storage_path, content_type, workspace_id - async fn get_file_path_by_doc_and_name( - &self, - doc_id: Uuid, - filename: &str, - ) -> anyhow::Result)>>; - - async fn list_storage_paths_for_document(&self, doc_id: Uuid) -> anyhow::Result>; - async fn list_storage_paths_for_document_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - doc_id: Uuid, - ) -> anyhow::Result>; - - async fn list_files_for_document(&self, doc_id: Uuid) -> anyhow::Result>; - - async fn list_storage_paths_for_workspace( - &self, - workspace_id: Uuid, - ) -> anyhow::Result>; - - async fn find_by_storage_path( - &self, - storage_path: &str, - ) -> anyhow::Result>; // (file_id, document_id, workspace_id) - - async fn update_storage_path(&self, file_id: Uuid, storage_path: &str) -> anyhow::Result<()>; - - async fn update_hash_and_size( - &self, - file_id: Uuid, - size: i64, - content_hash: &str, - ) -> anyhow::Result<()>; - - async fn delete_by_id(&self, file_id: Uuid) -> anyhow::Result<()>; -} - -#[derive(Debug, Clone)] -pub struct FileRecord { - pub id: Uuid, - pub filename: String, - pub content_type: Option, - pub size: i64, - pub storage_path: String, - pub content_hash: String, -} diff --git a/api/src/application/ports/git_pull_session_repository.rs b/api/src/application/ports/git_pull_session_repository.rs deleted file mode 100644 index 1a330926..00000000 --- a/api/src/application/ports/git_pull_session_repository.rs +++ /dev/null @@ -1,10 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -use crate::application::dto::git::GitPullSessionDto; - -#[async_trait] -pub trait GitPullSessionRepository: Send + Sync { - async fn upsert(&self, session: GitPullSessionDto) -> anyhow::Result<()>; - async fn get(&self, workspace_id: Uuid, id: Uuid) -> anyhow::Result>; -} diff --git a/api/src/application/ports/git_repository.rs b/api/src/application/ports/git_repository.rs deleted file mode 100644 index a60291b8..00000000 --- a/api/src/application/ports/git_repository.rs +++ /dev/null @@ -1,73 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -#[derive(Debug, Clone)] -pub struct UserGitCfg { - pub repository_url: String, - pub branch_name: String, - pub auth_type: Option, - pub auth_data: Option, - pub auto_sync: bool, -} - -#[async_trait] -pub trait GitRepository: Send + Sync { - async fn get_config( - &self, - workspace_id: Uuid, - ) -> anyhow::Result< - Option<( - Uuid, - String, - String, - String, - bool, - chrono::DateTime, - chrono::DateTime, - )>, - >; - async fn upsert_config( - &self, - workspace_id: Uuid, - repository_url: &str, - branch_name: Option<&str>, - auth_type: &str, - auth_data: &serde_json::Value, - auto_sync: Option, - ) -> anyhow::Result<( - Uuid, - String, - String, - String, - bool, - chrono::DateTime, - chrono::DateTime, - )>; - async fn delete_config(&self, workspace_id: Uuid) -> anyhow::Result; - async fn load_user_git_cfg(&self, workspace_id: Uuid) -> anyhow::Result>; - async fn get_last_sync_log( - &self, - workspace_id: Uuid, - ) -> anyhow::Result< - Option<( - Option>, - Option, - Option, - Option, - )>, - >; - async fn log_sync_operation( - &self, - workspace_id: Uuid, - operation: &str, - status: &str, - message: Option<&str>, - commit_hash: Option<&str>, - ) -> anyhow::Result<()>; - - async fn delete_sync_logs(&self, workspace_id: Uuid) -> anyhow::Result<()>; - - async fn delete_repository_state(&self, workspace_id: Uuid) -> anyhow::Result<()>; - - async fn list_auto_sync_workspaces(&self) -> anyhow::Result>; -} diff --git a/api/src/application/ports/git_workspace.rs b/api/src/application/ports/git_workspace.rs deleted file mode 100644 index 6c3b5ea3..00000000 --- a/api/src/application/ports/git_workspace.rs +++ /dev/null @@ -1,66 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -use crate::application::dto::diff::TextDiffResult; -use crate::application::dto::git::{ - GitChangeItem, GitCommitInfo, GitImportOutcome, GitPullRequestDto, GitPullResultDto, - GitRemoteCheckDto, GitSyncOutcome, GitSyncRequestDto, GitWorkspaceStatus, -}; -use crate::application::ports::git_repository::UserGitCfg; - -#[async_trait] -pub trait GitWorkspacePort: Send + Sync { - async fn ensure_repository( - &self, - workspace_id: Uuid, - default_branch: &str, - ) -> anyhow::Result<()>; - async fn remove_repository(&self, workspace_id: Uuid) -> anyhow::Result<()>; - async fn status(&self, workspace_id: Uuid) -> anyhow::Result; - async fn list_changes(&self, workspace_id: Uuid) -> anyhow::Result>; - async fn working_diff(&self, workspace_id: Uuid) -> anyhow::Result>; - async fn commit_diff( - &self, - workspace_id: Uuid, - from: &str, - to: &str, - ) -> anyhow::Result>; - async fn history(&self, workspace_id: Uuid) -> anyhow::Result>; - async fn sync( - &self, - workspace_id: Uuid, - req: &GitSyncRequestDto, - cfg: Option<&UserGitCfg>, - ) -> anyhow::Result; - async fn import_repository( - &self, - workspace_id: Uuid, - actor_id: Uuid, - cfg: &UserGitCfg, - ) -> anyhow::Result; - async fn pull( - &self, - workspace_id: Uuid, - actor_id: Uuid, - req: &GitPullRequestDto, - cfg: &UserGitCfg, - ) -> anyhow::Result; - async fn head_commit(&self, workspace_id: Uuid) -> anyhow::Result>>; - async fn remote_head( - &self, - workspace_id: Uuid, - cfg: &UserGitCfg, - ) -> anyhow::Result>>; - async fn has_pending_changes(&self, workspace_id: Uuid) -> anyhow::Result; - async fn drift_since_commit( - &self, - workspace_id: Uuid, - base_commit: &[u8], - ) -> anyhow::Result; - - async fn check_remote( - &self, - workspace_id: Uuid, - cfg: &UserGitCfg, - ) -> anyhow::Result; -} diff --git a/api/src/application/ports/gitignore_port.rs b/api/src/application/ports/gitignore_port.rs deleted file mode 100644 index 82d986a9..00000000 --- a/api/src/application/ports/gitignore_port.rs +++ /dev/null @@ -1,12 +0,0 @@ -use async_trait::async_trait; - -#[async_trait] -pub trait GitignorePort: Send + Sync { - async fn ensure_gitignore(&self, dir: &str) -> anyhow::Result; - async fn upsert_gitignore_patterns( - &self, - dir: &str, - patterns: &[String], - ) -> anyhow::Result; - async fn read_gitignore_patterns(&self, dir: &str) -> anyhow::Result>; -} diff --git a/api/src/application/ports/linkgraph_repository.rs b/api/src/application/ports/linkgraph_repository.rs deleted file mode 100644 index be7a4371..00000000 --- a/api/src/application/ports/linkgraph_repository.rs +++ /dev/null @@ -1,22 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -#[async_trait] -pub trait LinkGraphRepository: Send + Sync { - async fn clear_links_for_source(&self, source_id: Uuid) -> anyhow::Result<()>; - async fn exists_doc_for_owner(&self, doc_id: Uuid, owner_id: Uuid) -> anyhow::Result; - async fn find_doc_id_by_owner_and_title( - &self, - owner_id: Uuid, - title: &str, - ) -> anyhow::Result>; - async fn upsert_link( - &self, - source_id: Uuid, - target_id: Uuid, - link_type: &str, - link_text: Option, - position_start: i32, - position_end: i32, - ) -> anyhow::Result<()>; -} diff --git a/api/src/application/ports/mod.rs b/api/src/application/ports/mod.rs deleted file mode 100644 index 5dbff5b8..00000000 --- a/api/src/application/ports/mod.rs +++ /dev/null @@ -1,42 +0,0 @@ -pub mod access_repository; -pub mod api_token_repository; -pub mod awareness_port; -pub mod doc_event_log; -pub mod document_exporter; -pub mod document_repository; -pub mod document_snapshot_archive_repository; -pub mod files_repository; -pub mod git_pull_session_repository; -pub mod git_rebuild_job_queue; -pub mod git_repository; -pub mod git_storage; -pub mod git_workspace; -pub mod gitignore_port; -pub mod health_probe; -pub mod linkgraph_repository; -pub mod plugin_asset_store; -pub mod plugin_event_publisher; -pub mod plugin_event_subscriber; -pub mod plugin_installation_repository; -pub mod plugin_installer; -pub mod plugin_package_fetcher; -pub mod plugin_repository; -pub mod plugin_runtime; -pub mod public_repository; -pub mod realtime_hydration_port; -pub mod realtime_persistence_port; -pub mod realtime_port; -pub mod realtime_types; -pub mod share_access_port; -pub mod shares_repository; -pub mod storage_ingest_queue; -pub mod storage_port; -pub mod storage_projection_queue; -pub mod storage_reconcile_backend; -pub mod storage_reconcile_jobs; -pub mod tag_repository; -pub mod tagging_repository; -pub mod user_repository; -pub mod user_session_repository; -pub mod user_shortcut_repository; -pub mod workspace_repository; diff --git a/api/src/application/ports/plugin_event_subscriber.rs b/api/src/application/ports/plugin_event_subscriber.rs deleted file mode 100644 index 0fd4ef62..00000000 --- a/api/src/application/ports/plugin_event_subscriber.rs +++ /dev/null @@ -1,9 +0,0 @@ -use async_trait::async_trait; -use futures_util::stream::BoxStream; - -use crate::application::ports::plugin_event_publisher::PluginScopedEvent; - -#[async_trait] -pub trait PluginEventSubscriber: Send + Sync { - async fn subscribe(&self) -> anyhow::Result>; -} diff --git a/api/src/application/ports/plugin_package_fetcher.rs b/api/src/application/ports/plugin_package_fetcher.rs deleted file mode 100644 index f3f140c9..00000000 --- a/api/src/application/ports/plugin_package_fetcher.rs +++ /dev/null @@ -1,6 +0,0 @@ -use async_trait::async_trait; - -#[async_trait] -pub trait PluginPackageFetcher: Send + Sync { - async fn fetch(&self, url: &str, token: Option<&str>) -> anyhow::Result>; -} diff --git a/api/src/application/ports/public_repository.rs b/api/src/application/ports/public_repository.rs deleted file mode 100644 index 9b83c5ac..00000000 --- a/api/src/application/ports/public_repository.rs +++ /dev/null @@ -1,44 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -use crate::domain::documents::document::Document; - -#[async_trait] -pub trait PublicRepository: Send + Sync { - async fn ensure_workspace_title_and_slug( - &self, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result>; // (title, workspace_slug) - async fn upsert_public_document(&self, doc_id: Uuid, slug: &str) -> anyhow::Result<()>; - async fn slug_exists(&self, slug: &str) -> anyhow::Result; - async fn is_workspace_document(&self, doc_id: Uuid, workspace_id: Uuid) - -> anyhow::Result; - async fn delete_public_document(&self, doc_id: Uuid) -> anyhow::Result; - async fn get_publish_status( - &self, - workspace_id: Uuid, - doc_id: Uuid, - ) -> anyhow::Result>; // (slug, workspace_slug) - async fn list_workspace_public_documents( - &self, - workspace_slug: &str, - ) -> anyhow::Result< - Vec<( - Uuid, - String, - chrono::DateTime, - chrono::DateTime, - )>, - >; - async fn get_public_meta_by_workspace_and_id( - &self, - workspace_slug: &str, - doc_id: Uuid, - ) -> anyhow::Result>; - async fn public_exists_by_workspace_and_id( - &self, - workspace_slug: &str, - doc_id: Uuid, - ) -> anyhow::Result; -} diff --git a/api/src/application/ports/share_access_port.rs b/api/src/application/ports/share_access_port.rs deleted file mode 100644 index fd0e5bc3..00000000 --- a/api/src/application/ports/share_access_port.rs +++ /dev/null @@ -1,25 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -#[async_trait] -pub trait ShareAccessPort: Send + Sync { - async fn resolve_share_by_token( - &self, - token: &str, - ) -> anyhow::Result< - Option<( - Uuid, - String, - Option>, - Uuid, - String, - Uuid, - )>, - >; - - async fn get_materialized_permission( - &self, - parent_share_id: Uuid, - doc_id: Uuid, - ) -> anyhow::Result>; -} diff --git a/api/src/application/ports/shares_repository.rs b/api/src/application/ports/shares_repository.rs deleted file mode 100644 index 52b54ae9..00000000 --- a/api/src/application/ports/shares_repository.rs +++ /dev/null @@ -1,121 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -#[derive(Debug, Clone)] -pub struct ShareRow { - pub id: Uuid, - pub token: String, - pub permission: String, - pub expires_at: Option>, - pub parent_share_id: Option, - pub document_id: Uuid, - pub document_type: String, - pub document_title: String, - pub created_at: chrono::DateTime, -} - -#[derive(Debug, Clone)] -pub struct ShareMountRow { - pub id: Uuid, - pub token: String, - pub target_document_id: Uuid, - pub target_document_type: String, - pub target_title: String, - pub permission: String, - pub parent_folder_id: Option, - pub created_at: chrono::DateTime, -} - -#[async_trait] -pub trait SharesRepository: Send + Sync { - async fn create_share( - &self, - workspace_id: Uuid, - actor_id: Uuid, - document_id: Uuid, - permission: &str, - expires_at: Option>, - ) -> anyhow::Result<(String, Uuid, String)>; // (token_saved, share_id, document_type) - - async fn list_document_shares( - &self, - workspace_id: Uuid, - document_id: Uuid, - ) -> anyhow::Result>; - - async fn delete_share(&self, workspace_id: Uuid, token: &str) -> anyhow::Result; - - async fn validate_share_token( - &self, - token: &str, - ) -> anyhow::Result>, String)>>; // (document_id, permission, expires_at, title) - - async fn list_applicable_shares_for_doc( - &self, - workspace_id: Uuid, - doc_id: Uuid, - ) -> anyhow::Result>)>>; // (token, permission, expires) - - async fn list_active_shares(&self, workspace_id: Uuid) -> anyhow::Result>; - - async fn resolve_share_by_token( - &self, - token: &str, - ) -> anyhow::Result< - Option<( - Uuid, - String, - Option>, - Uuid, - String, - Uuid, - )>, - >; // (share_id, permission, expires_at, shared_id, shared_type, workspace_id) - - async fn list_share_mounts(&self, workspace_id: Uuid) -> anyhow::Result>; - - async fn create_share_mount( - &self, - workspace_id: Uuid, - actor_id: Uuid, - token: &str, - target_document_id: Uuid, - target_document_type: &str, - target_title: &str, - permission: &str, - parent_folder_id: Option, - ) -> anyhow::Result; - - async fn delete_share_mount(&self, workspace_id: Uuid, mount_id: Uuid) -> anyhow::Result; - - async fn get_share_document_meta( - &self, - token: &str, - ) -> anyhow::Result>; // (document_id, owner_id, workspace_id) - - async fn list_subtree_nodes( - &self, - root_id: Uuid, - ) -> anyhow::Result< - Vec<( - Uuid, - String, - String, - Option, - chrono::DateTime, - chrono::DateTime, - )>, - >; // (id,title,type,parent_id,created_at,updated_at) - - async fn list_materialized_children(&self, parent_share_id: Uuid) -> anyhow::Result>; - - async fn materialize_folder_share( - &self, - workspace_id: Uuid, - actor_id: Uuid, - token: &str, - ) -> anyhow::Result; - - async fn revoke_subtree_shares(&self, workspace_id: Uuid, root_id: Uuid) - -> anyhow::Result; -} diff --git a/api/src/application/ports/storage_reconcile_backend.rs b/api/src/application/ports/storage_reconcile_backend.rs deleted file mode 100644 index 03b45124..00000000 --- a/api/src/application/ports/storage_reconcile_backend.rs +++ /dev/null @@ -1,7 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -#[async_trait] -pub trait StorageReconcileBackend: Send + Sync { - async fn list_paths(&self, user_id: Uuid) -> anyhow::Result>; -} diff --git a/api/src/application/ports/tagging_repository.rs b/api/src/application/ports/tagging_repository.rs deleted file mode 100644 index 42bbd001..00000000 --- a/api/src/application/ports/tagging_repository.rs +++ /dev/null @@ -1,10 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -#[async_trait] -pub trait TaggingRepository: Send + Sync { - async fn clear_document_tags(&self, doc_id: Uuid) -> anyhow::Result<()>; - async fn upsert_tag_return_id(&self, name: &str) -> anyhow::Result; - async fn owner_doc_exists(&self, doc_id: Uuid, owner_id: Uuid) -> anyhow::Result; - async fn associate_document_tag(&self, doc_id: Uuid, tag_id: i64) -> anyhow::Result<()>; -} diff --git a/api/src/application/services/api_tokens.rs b/api/src/application/services/api_tokens.rs deleted file mode 100644 index f6c6a8ec..00000000 --- a/api/src/application/services/api_tokens.rs +++ /dev/null @@ -1,121 +0,0 @@ -use std::sync::Arc; - -use argon2::{ - Argon2, - password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString}, -}; -use rand::{Rng, distributions::Alphanumeric, rngs::OsRng}; -use uuid::Uuid; - -use crate::application::dto::api_tokens::{ApiTokenDto, CreatedApiTokenDto}; -use crate::application::ports::api_token_repository::ApiTokenRepository; -use crate::application::services::errors::ServiceError; -use crate::application::use_cases::api_tokens::create_token::CreateApiToken; -use crate::application::use_cases::api_tokens::list_tokens::ListApiTokens; -use crate::application::use_cases::api_tokens::revoke_token::RevokeApiToken; -use crate::application::utils::hash::sha256_hex_str; -use crate::domain::workspaces::permissions::{PERM_API_TOKEN_MANAGE, PermissionSet}; - -pub struct ApiTokenService { - repo: Arc, -} - -impl ApiTokenService { - pub fn new(repo: Arc) -> Self { - Self { repo } - } - - pub async fn list( - &self, - workspace_id: Uuid, - permissions: &PermissionSet, - ) -> Result, ServiceError> { - ensure_api_token_permission(workspace_id, permissions)?; - let uc = ListApiTokens { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_id).await.map_err(ServiceError::from) - } - - pub async fn create( - &self, - workspace_id: Uuid, - user_id: Uuid, - permissions: &PermissionSet, - name: Option<&str>, - ) -> Result { - ensure_api_token_permission(workspace_id, permissions)?; - let uc = CreateApiToken { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_id, user_id, name) - .await - .map_err(ServiceError::from) - } - - pub async fn revoke( - &self, - workspace_id: Uuid, - id: Uuid, - permissions: &PermissionSet, - ) -> Result { - ensure_api_token_permission(workspace_id, permissions)?; - let uc = RevokeApiToken { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_id, id) - .await - .map_err(ServiceError::from) - } -} - -fn ensure_api_token_permission( - _workspace_id: Uuid, - permissions: &PermissionSet, -) -> Result<(), ServiceError> { - if permissions.allows(PERM_API_TOKEN_MANAGE) { - Ok(()) - } else { - Err(ServiceError::Forbidden) - } -} - -pub struct GeneratedApiToken { - pub plaintext: String, - pub token_hash: String, - pub token_digest: String, -} - -pub fn generate_api_token() -> anyhow::Result { - let random: String = OsRng - .sample_iter(&Alphanumeric) - .take(48) - .map(char::from) - .collect(); - let plaintext = format!("rmd_{random}"); - - let salt = SaltString::generate(&mut OsRng); - let argon = Argon2::default(); - let hash = argon - .hash_password(plaintext.as_bytes(), &salt) - .map_err(|e| anyhow::anyhow!(e.to_string()))? - .to_string(); - let digest = compute_digest(&plaintext); - - Ok(GeneratedApiToken { - plaintext, - token_hash: hash, - token_digest: digest, - }) -} - -pub fn compute_digest(token: &str) -> String { - sha256_hex_str(token) -} - -pub fn verify_token(token: &str, token_hash: &str) -> anyhow::Result { - let parsed = PasswordHash::new(token_hash).map_err(|e| anyhow::anyhow!(e.to_string()))?; - Ok(Argon2::default() - .verify_password(token.as_bytes(), &parsed) - .is_ok()) -} diff --git a/api/src/application/services/auth/service.rs b/api/src/application/services/auth/service.rs deleted file mode 100644 index b54d269d..00000000 --- a/api/src/application/services/auth/service.rs +++ /dev/null @@ -1,136 +0,0 @@ -use std::sync::Arc; - -use anyhow::Error as AnyError; -use chrono::Utc; -use jsonwebtoken::errors::ErrorKind; -use jsonwebtoken::{DecodingKey, EncodingKey, Header, Validation}; -use serde::{Deserialize, Serialize}; -use uuid::Uuid; - -use crate::application::services::auth::token_validation::TokenValidationService; -use crate::application::services::errors::ServiceError; - -#[derive(Clone)] -pub struct AuthService { - jwt_secret: String, - tokens: Arc, - jwt_expires_secs: usize, -} - -#[derive(Debug, Clone)] -pub struct IssuedSession { - pub token: String, - pub expires_at: usize, -} - -#[derive(Debug, Deserialize, Serialize)] -struct Claims { - sub: String, - #[serde(default)] - workspace_id: Option, - #[serde(default)] - iat: usize, - #[allow(dead_code)] - exp: usize, - #[serde(default, skip_serializing_if = "Option::is_none")] - sid: Option, -} - -impl AuthService { - fn decode_claims(&self, token: &str) -> Result { - jsonwebtoken::decode::( - token, - &DecodingKey::from_secret(self.jwt_secret.as_bytes()), - &Validation::default(), - ) - .map(|data| data.claims) - } - - pub fn new( - jwt_secret: impl Into, - tokens: Arc, - jwt_expires_secs: usize, - ) -> Self { - Self { - jwt_secret: jwt_secret.into(), - tokens, - jwt_expires_secs, - } - } - - pub async fn subject_from_token(&self, token: &str) -> Result, ServiceError> { - match self.decode_claims(token) { - Ok(claims) => { - return Ok(Some(claims.sub)); - } - Err(err) => { - if matches!(err.kind(), ErrorKind::ExpiredSignature) { - return Err(ServiceError::TokenExpired); - } - } - } - - self.tokens - .validate(token) - .await - .map(|opt| opt.map(|(user_id, _)| user_id.to_string())) - } - - pub fn workspace_from_token_claim(&self, token: &str) -> Option { - self.decode_claims(token) - .ok() - .and_then(|claims| claims.workspace_id) - .and_then(|raw| Uuid::parse_str(&raw).ok()) - } - - pub fn session_id_from_token_claim(&self, token: &str) -> Option { - self.decode_claims(token) - .ok() - .and_then(|claims| claims.sid) - .and_then(|raw| Uuid::parse_str(&raw).ok()) - } - - pub async fn workspace_from_token_async( - &self, - token: &str, - ) -> Result, ServiceError> { - if let Some(id) = self.workspace_from_token_claim(token) { - return Ok(Some(id)); - } - self.tokens - .validate(token) - .await - .map(|opt| opt.map(|(_, workspace_id)| workspace_id)) - } - - pub fn issue_session( - &self, - user_id: Uuid, - workspace_id: Uuid, - session_id: Option, - ) -> Result { - let now = Utc::now().timestamp() as usize; - let exp = now + self.jwt_expires_secs; - let claims = Claims { - sub: user_id.to_string(), - workspace_id: Some(workspace_id.to_string()), - iat: now, - exp, - sid: session_id.map(|id| id.to_string()), - }; - let token = jsonwebtoken::encode( - &Header::default(), - &claims, - &EncodingKey::from_secret(self.jwt_secret.as_bytes()), - ) - .map_err(|err| ServiceError::Unexpected(AnyError::new(err)))?; - Ok(IssuedSession { - token, - expires_at: exp, - }) - } - - pub fn session_ttl_secs(&self) -> usize { - self.jwt_expires_secs - } -} diff --git a/api/src/application/services/auth/token_validation.rs b/api/src/application/services/auth/token_validation.rs deleted file mode 100644 index 4465b455..00000000 --- a/api/src/application/services/auth/token_validation.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::sync::Arc; - -use uuid::Uuid; - -use crate::application::ports::api_token_repository::ApiTokenRepository; -use crate::application::services::api_tokens::{compute_digest, verify_token}; -use crate::application::services::errors::ServiceError; - -pub struct TokenValidationService { - repo: Arc, -} - -impl TokenValidationService { - pub fn new(repo: Arc) -> Self { - Self { repo } - } - - pub async fn validate(&self, token: &str) -> Result, ServiceError> { - let digest = compute_digest(token); - let record = self - .repo - .find_by_digest(&digest) - .await - .map_err(ServiceError::from)?; - let Some(secret) = record else { - return Ok(None); - }; - if secret.token.revoked_at.is_some() { - return Ok(None); - } - let ok = verify_token(token, &secret.token_hash).map_err(ServiceError::from)?; - if !ok { - return Ok(None); - } - self.repo - .touch_last_used(secret.token.id) - .await - .map_err(ServiceError::from)?; - Ok(Some((secret.token.owner_id, secret.token.workspace_id))) - } -} diff --git a/api/src/application/services/authorization.rs b/api/src/application/services/authorization.rs deleted file mode 100644 index 546a03c1..00000000 --- a/api/src/application/services/authorization.rs +++ /dev/null @@ -1,62 +0,0 @@ -use std::sync::Arc; - -use uuid::Uuid; - -use crate::application::access::{self, Actor, Capability}; -use crate::application::ports::access_repository::AccessRepository; -use crate::application::ports::share_access_port::ShareAccessPort; -use crate::application::services::errors::ServiceError; - -#[derive(Clone)] -pub struct AuthorizationService { - access_repo: Arc, - share_access: Arc, -} - -impl AuthorizationService { - pub fn new( - access_repo: Arc, - share_access: Arc, - ) -> Self { - Self { - access_repo, - share_access, - } - } - - pub async fn resolve_document(&self, actor: &Actor, doc_id: Uuid) -> Capability { - access::resolve_document( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - } - - pub async fn require_view( - &self, - actor: &Actor, - doc_id: Uuid, - ) -> Result { - access::require_view( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - .map_err(|_| ServiceError::Forbidden) - } - - pub async fn require_edit(&self, actor: &Actor, doc_id: Uuid) -> Result<(), ServiceError> { - access::require_edit( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - .map_err(|_| ServiceError::Forbidden) - } -} diff --git a/api/src/application/services/documents.rs b/api/src/application/services/documents.rs deleted file mode 100644 index 1df89a05..00000000 --- a/api/src/application/services/documents.rs +++ /dev/null @@ -1,1546 +0,0 @@ -use std::path::Path; -use std::sync::Arc; - -use sqlx::{Pool, Postgres, Transaction}; -use tracing::{error, warn}; -use uuid::Uuid; - -use crate::application::access::{self, Actor}; -use crate::application::dto::document_export::{DocumentDownload, DocumentDownloadFormat}; -use crate::application::dto::documents::{ - DocumentListFilter, SnapshotDiffBaseMode, SnapshotDiffDto, SnapshotDiffSideDto, - SnapshotSummaryDto, -}; -use crate::application::ports::access_repository::AccessRepository; -use crate::application::ports::doc_event_log::DocEventLog; -use crate::application::ports::document_exporter::DocumentExporter; -use crate::application::ports::document_repository::{ - DocMeta, DocumentListState, DocumentPathConflictError, DocumentRepository, -}; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::realtime_port::RealtimeEngine; -use crate::application::ports::share_access_port::ShareAccessPort; -use crate::application::ports::storage_port::StorageResolverPort; -use crate::application::ports::storage_projection_queue::{ - StorageDeleteJobMetadata, StorageJobReason, StorageProjectionJobKind, StorageProjectionQueue, - WorkspaceJobMetadata, -}; -use crate::application::services::errors::ServiceError; -use crate::application::services::realtime::snapshot::{SnapshotService, snapshot_from_markdown}; -use crate::application::use_cases::documents::archive_document::ArchiveDocument; -use crate::application::use_cases::documents::create_document::CreateDocument; -use crate::application::use_cases::documents::delete_document::DeleteDocument; -use crate::application::use_cases::documents::download_document::{ - DownloadDocument as DownloadDocumentUseCase, FolderDownloadUnsupportedFormat, -}; -use crate::application::use_cases::documents::get_backlinks::GetBacklinks; -use crate::application::use_cases::documents::get_document::GetDocument; -use crate::application::use_cases::documents::get_outgoing_links::GetOutgoingLinks; -use crate::application::use_cases::documents::list_documents::ListDocuments; -use crate::application::use_cases::documents::list_snapshots::ListSnapshots; -use crate::application::use_cases::documents::restore_snapshot::RestoreSnapshot; -use crate::application::use_cases::documents::search_documents::SearchDocuments; -use crate::application::use_cases::documents::snapshot_diff::{ - SnapshotDiff, SnapshotDiffResult, SnapshotDiffSide, -}; -use crate::application::use_cases::documents::snapshot_download::{ - DownloadSnapshot, SnapshotDownload, -}; -use crate::application::use_cases::documents::unarchive_document::UnarchiveDocument; -use crate::application::use_cases::documents::update_document::UpdateDocument; -use crate::application::utils::hash::sha256_hex; -use crate::domain::documents::document::{ - BacklinkInfo as DomainBacklink, Document as DomainDocument, OutgoingLink as DomainOutgoingLink, - SearchHit, -}; -use crate::domain::workspaces::permissions::{ - PERM_DOC_ARCHIVE, PERM_DOC_CREATE, PERM_DOC_DELETE, PERM_DOC_EDIT, PERM_DOC_MOVE, - PERM_FOLDER_CREATE, PERM_FOLDER_DELETE, PermissionSet, -}; -use serde_json::json; - -pub struct DocumentService { - db: Pool, - document_repo: Arc, - files_repo: Arc, - access_repo: Arc, - share_access: Arc, - storage: Arc, - events: Arc, - storage_jobs: Arc, - realtime: Arc, - snapshot_service: Arc, - exporter: Arc, -} - -impl DocumentService { - #[allow(clippy::too_many_arguments)] - pub fn new( - db: Pool, - document_repo: Arc, - files_repo: Arc, - access_repo: Arc, - share_access: Arc, - storage: Arc, - events: Arc, - storage_jobs: Arc, - realtime: Arc, - snapshot_service: Arc, - exporter: Arc, - ) -> Self { - Self { - db, - document_repo, - files_repo, - access_repo, - share_access, - storage, - events, - storage_jobs, - realtime, - snapshot_service, - exporter, - } - } - - async fn begin_transaction(&self) -> Result, ServiceError> { - self.db.begin().await.map_err(map_sqlx_error) - } - - pub async fn list_for_user( - &self, - workspace_id: Uuid, - query: Option, - tag: Option, - state: DocumentListFilter, - ) -> Result, ServiceError> { - let uc = ListDocuments { - repo: self.document_repo.as_ref(), - }; - uc.execute(workspace_id, query, tag, to_repo_state(state)) - .await - .map_err(ServiceError::from) - } - - pub async fn create_for_user( - &self, - workspace_id: Uuid, - actor_id: Uuid, - permissions: &PermissionSet, - title: &str, - parent_id: Option, - doc_type: &str, - created_by_plugin: Option<&str>, - ) -> Result { - ensure_can_create(permissions, doc_type)?; - if let Some(parent_id) = parent_id { - self.ensure_active_parent(workspace_id, parent_id).await?; - } - let uc = CreateDocument { - repo: self.document_repo.as_ref(), - }; - let mut tx = self.begin_transaction().await?; - let doc = match uc - .execute_tx( - &mut tx, - workspace_id, - actor_id, - title, - parent_id, - doc_type, - created_by_plugin, - ) - .await - { - Ok(doc) => doc, - Err(err) => { - if err.downcast_ref::().is_some() { - tx.rollback().await.ok(); - return Err(ServiceError::Conflict); - } - error!(error = ?err, "document_create_repo_failed"); - tx.rollback().await.ok(); - return Err(ServiceError::from(err)); - } - }; - self.enqueue_projection_for_document_tx(&mut tx, &doc, "create_document") - .await?; - let repo_path = doc.desired_path.clone(); - let event_payload = json!({ - "title": doc.title, - "parent_id": doc.parent_id, - "doc_type": doc.doc_type, - "repo_path": repo_path, - "slug": doc.slug, - "desired_path": doc.desired_path, - "owner_id": doc.workspace_id, - "actor_id": actor_id, - }); - tx.commit().await.map_err(map_sqlx_error)?; - self.record_event( - doc.workspace_id, - doc.id, - "document.created", - Some(event_payload), - ) - .await; - Ok(doc) - } - - pub async fn duplicate_document( - &self, - workspace_id: Uuid, - source_id: Uuid, - actor_id: Uuid, - permissions: &PermissionSet, - title: Option, - parent_id: Option>, - ) -> Result { - let actor = Actor::User(actor_id); - access::require_view( - self.access_repo.as_ref(), - self.share_access.as_ref(), - &actor, - source_id, - ) - .await - .map_err(|_| ServiceError::Forbidden)?; - - let source = self - .document_repo - .get_by_id(source_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound)?; - if source.workspace_id != workspace_id { - return Err(ServiceError::NotFound); - } - if source.doc_type == "folder" { - return Err(ServiceError::BadRequest("cannot_duplicate_folder")); - } - - let target_parent = match parent_id { - Some(explicit) => explicit, - None => source.parent_id.or(source.archived_parent_id), - }; - - let source_content = self - .realtime - .get_content(&source_id.to_string()) - .await - .map_err(ServiceError::from)? - .unwrap_or_default(); - - let attachments = self.snapshot_attachments(source.id).await?; - let new_title = duplicate_title(&source.title, title); - let new_doc = self - .create_for_user( - workspace_id, - actor_id, - permissions, - &new_title, - target_parent, - &source.doc_type, - source.created_by_plugin.as_deref(), - ) - .await?; - - let result = async { - let updated_doc = self - .update_content(&actor, new_doc.id, &source_content) - .await?; - - self.copy_attachments(&updated_doc, &attachments, actor_id) - .await?; - - Ok::<_, ServiceError>(updated_doc) - } - .await; - - match result { - Ok(doc) => Ok(doc), - Err(err) => { - if let Err(clean_err) = self - .delete_for_user_internal( - workspace_id, - new_doc.id, - Some(actor_id), - permissions, - false, - ) - .await - { - warn!( - document_id = %new_doc.id, - error = ?clean_err, - "duplicate_cleanup_failed" - ); - } - Err(err) - } - } - } - - pub async fn get_for_actor( - &self, - actor: &Actor, - doc_id: Uuid, - ) -> Result { - let uc = GetDocument { - repo: self.document_repo.as_ref(), - shares: self.share_access.as_ref(), - access: self.access_repo.as_ref(), - }; - uc.execute(actor, doc_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound) - } - - pub async fn delete_for_user( - &self, - workspace_id: Uuid, - doc_id: Uuid, - actor_id: Option, - permissions: &PermissionSet, - ) -> Result { - self.delete_for_user_internal(workspace_id, doc_id, actor_id, permissions, true) - .await - } - - async fn delete_for_user_internal( - &self, - workspace_id: Uuid, - doc_id: Uuid, - actor_id: Option, - permissions: &PermissionSet, - enforce_permissions: bool, - ) -> Result { - let mut tx = self.begin_transaction().await?; - let root_meta = self - .document_repo - .get_meta_for_owner_tx(&mut tx, doc_id, workspace_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound)?; - if enforce_permissions { - ensure_can_delete(permissions, &root_meta.doc_type)?; - } - let delete_plan = self - .build_delete_plan(&mut tx, doc_id, workspace_id, root_meta.clone()) - .await?; - if delete_plan.is_empty() { - tx.rollback().await.map_err(map_sqlx_error)?; - return Ok(false); - } - let permission_snapshot = if enforce_permissions { - permissions.to_vec() - } else { - // Cleanup flows (e.g., duplicate rollback) bypass user permissions so storage delete - // jobs always have authority to remove docs and attachments. - PermissionSet::all().to_vec() - }; - let uc = DeleteDocument { - repo: self.document_repo.as_ref(), - }; - let mut deleted = false; - let mut delete_events = Vec::new(); - for entry in delete_plan { - if uc - .execute_tx(&mut tx, entry.doc_id, workspace_id) - .await - .map_err(ServiceError::from)? - .is_some() - { - deleted = true; - self.enqueue_delete_job_for_entry( - &mut tx, - workspace_id, - &entry, - &permission_snapshot, - actor_id, - ) - .await?; - delete_events.push(entry.clone()); - } - } - if deleted { - tx.commit().await.map_err(map_sqlx_error)?; - for entry in delete_events { - self.record_delete_event(workspace_id, &entry, actor_id) - .await; - } - Ok(true) - } else { - tx.rollback().await.map_err(map_sqlx_error)?; - Ok(false) - } - } - - pub async fn get_content(&self, actor: &Actor, doc_id: Uuid) -> Result { - access::require_view( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - .map_err(|_| ServiceError::NotFound)?; - - let content = self - .realtime - .get_content(&doc_id.to_string()) - .await - .map_err(ServiceError::from)? - .unwrap_or_default(); - Ok(content) - } - - pub async fn update_content( - &self, - actor: &Actor, - doc_id: Uuid, - content: &str, - ) -> Result { - access::require_edit( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - .map_err(|_| ServiceError::Unauthorized)?; - - let snapshot_bytes = snapshot_from_markdown(content); - self.realtime - .apply_snapshot(&doc_id.to_string(), snapshot_bytes.as_slice()) - .await - .map_err(ServiceError::from)?; - - if let Err(err) = self.realtime.force_persist(&doc_id.to_string()).await { - warn!(document_id = %doc_id, error = ?err, "document_force_persist_after_update_failed"); - } - - let doc = self - .document_repo - .get_by_id(doc_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound)?; - let mut tx = self.begin_transaction().await?; - self.enqueue_doc_sync_tx(&mut tx, doc.workspace_id, doc.id, "update_content") - .await?; - let repo_path = doc.desired_path.clone(); - let event_payload = json!({ - "repo_path": repo_path, - "desired_path": doc.desired_path, - "slug": doc.slug, - "doc_type": doc.doc_type, - "owner_id": doc.workspace_id, - }); - tx.commit().await.map_err(map_sqlx_error)?; - self.record_event( - doc.workspace_id, - doc.id, - "document.content_updated", - Some(event_payload), - ) - .await; - Ok(doc) - } - - pub async fn patch_content( - &self, - actor: &Actor, - doc_id: Uuid, - operations: &[DocumentPatchOperation], - ) -> Result { - if operations.is_empty() { - return Err(ServiceError::BadRequest("patch_operations_required")); - } - - access::require_edit( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - .map_err(|_| ServiceError::Unauthorized)?; - - let current = self - .realtime - .get_content(&doc_id.to_string()) - .await - .map_err(ServiceError::from)? - .unwrap_or_default(); - let updated = apply_patch_operations(¤t, operations)?; - - self.update_content(actor, doc_id, &updated).await - } - - pub async fn download_document( - &self, - actor: &Actor, - doc_id: Uuid, - format: DocumentDownloadFormat, - ) -> Result { - let uc = DownloadDocumentUseCase { - documents: self.document_repo.as_ref(), - files: self.files_repo.as_ref(), - storage: self.storage.as_ref(), - access: self.access_repo.as_ref(), - shares: self.share_access.as_ref(), - snapshot: self.snapshot_service.as_ref(), - exporter: self.exporter.as_ref(), - }; - uc.execute(actor, doc_id, format) - .await - .map_err(|err| { - if err - .downcast_ref::() - .is_some() - { - ServiceError::BadRequest("folder_archive_only") - } else { - ServiceError::from(err) - } - })? - .ok_or(ServiceError::NotFound) - } - - pub async fn download_workspace_root( - &self, - actor: &Actor, - workspace_id: Uuid, - workspace_name: &str, - format: DocumentDownloadFormat, - ) -> Result { - let uc = DownloadDocumentUseCase { - documents: self.document_repo.as_ref(), - files: self.files_repo.as_ref(), - storage: self.storage.as_ref(), - access: self.access_repo.as_ref(), - shares: self.share_access.as_ref(), - snapshot: self.snapshot_service.as_ref(), - exporter: self.exporter.as_ref(), - }; - uc.download_workspace_root(actor, workspace_id, workspace_name, format) - .await - .map_err(|err| { - if err - .downcast_ref::() - .is_some() - { - ServiceError::BadRequest("folder_archive_only") - } else { - ServiceError::from(err) - } - })? - .ok_or(ServiceError::NotFound) - } - - pub async fn update_metadata( - &self, - workspace_id: Uuid, - doc_id: Uuid, - actor_id: Uuid, - permissions: &PermissionSet, - title: Option, - parent_id: Option>, - ) -> Result { - let meta = self.load_owner_meta(workspace_id, doc_id).await?; - if meta.archived_at.is_some() { - return Err(ServiceError::Conflict); - } - let rename_requested = title.is_some(); - let move_requested = parent_id.is_some(); - if rename_requested { - ensure_can_edit(permissions, &meta.doc_type)?; - } - if move_requested { - ensure_can_move(permissions, &meta.doc_type)?; - } - if let Some(Some(parent)) = parent_id { - self.ensure_active_parent(workspace_id, parent).await?; - } - let previous_repo_path = workspace_repo_relative(workspace_id, meta.path.as_deref()); - let uc = UpdateDocument { - repo: self.document_repo.as_ref(), - }; - let mut tx = self.begin_transaction().await?; - let doc = match uc - .execute_tx(&mut tx, doc_id, workspace_id, title, parent_id) - .await - { - Ok(Some(doc)) => doc, - Ok(None) => { - tx.rollback().await.ok(); - return Err(ServiceError::NotFound); - } - Err(err) => { - if err.downcast_ref::().is_some() { - tx.rollback().await.ok(); - return Err(ServiceError::Conflict); - } - error!(error = ?err, "document_update_repo_failed"); - return Err(ServiceError::from(err)); - } - }; - self.enqueue_projection_for_document_tx(&mut tx, &doc, "update_metadata") - .await?; - let repo_path = doc.desired_path.clone(); - let event_payload = json!({ - "title": doc.title, - "parent_id": doc.parent_id, - "repo_path": repo_path, - "doc_type": doc.doc_type, - "slug": doc.slug, - "desired_path": doc.desired_path, - "owner_id": doc.workspace_id, - "actor_id": actor_id, - "previous_path": previous_repo_path, - "previous_desired_path": meta.desired_path, - }); - tx.commit().await.map_err(map_sqlx_error)?; - self.record_event( - doc.workspace_id, - doc.id, - "document.metadata_updated", - Some(event_payload), - ) - .await; - Ok(doc) - } - - pub async fn archive_document( - &self, - workspace_id: Uuid, - doc_id: Uuid, - actor_id: Uuid, - permissions: &PermissionSet, - ) -> Result { - let meta = self.load_owner_meta(workspace_id, doc_id).await?; - if meta.archived_at.is_some() { - return Err(ServiceError::Conflict); - } - ensure_can_archive(permissions, &meta.doc_type)?; - let previous_repo_path = workspace_repo_relative(workspace_id, meta.path.as_deref()); - let uc = ArchiveDocument { - repo: self.document_repo.as_ref(), - realtime: self.realtime.as_ref(), - }; - let mut tx = self.begin_transaction().await?; - let doc = uc - .execute_tx(&mut tx, workspace_id, doc_id, actor_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound)?; - self.enqueue_projection_for_document_tx(&mut tx, &doc, "archive_document") - .await?; - let repo_path = doc.desired_path.clone(); - let event_payload = json!({ - "repo_path": repo_path, - "doc_type": doc.doc_type, - "slug": doc.slug, - "desired_path": doc.desired_path, - "owner_id": doc.workspace_id, - "actor_id": actor_id, - "previous_path": previous_repo_path, - "previous_desired_path": meta.desired_path, - }); - tx.commit().await.map_err(map_sqlx_error)?; - self.record_event( - doc.workspace_id, - doc.id, - "document.archived", - Some(event_payload), - ) - .await; - Ok(doc) - } - - pub async fn unarchive_document( - &self, - workspace_id: Uuid, - doc_id: Uuid, - actor_id: Uuid, - permissions: &PermissionSet, - ) -> Result { - let meta = self.load_owner_meta(workspace_id, doc_id).await?; - if meta.archived_at.is_none() { - return Err(ServiceError::Conflict); - } - ensure_can_archive(permissions, &meta.doc_type)?; - let previous_repo_path = workspace_repo_relative(workspace_id, meta.path.as_deref()); - let uc = UnarchiveDocument { - repo: self.document_repo.as_ref(), - realtime: self.realtime.as_ref(), - }; - let mut tx = self.begin_transaction().await?; - let doc = uc - .execute_tx(&mut tx, workspace_id, doc_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound)?; - self.enqueue_projection_for_document_tx(&mut tx, &doc, "unarchive_document") - .await?; - let repo_path = doc.desired_path.clone(); - let event_payload = json!({ - "repo_path": repo_path, - "doc_type": doc.doc_type, - "slug": doc.slug, - "desired_path": doc.desired_path, - "owner_id": doc.workspace_id, - "actor_id": actor_id, - "previous_path": previous_repo_path, - "previous_desired_path": meta.desired_path, - }); - tx.commit().await.map_err(map_sqlx_error)?; - self.record_event( - doc.workspace_id, - doc.id, - "document.unarchived", - Some(event_payload), - ) - .await; - Ok(doc) - } - - pub async fn list_snapshots( - &self, - actor: &Actor, - doc_id: Uuid, - limit: i64, - offset: i64, - ) -> Result, ServiceError> { - access::require_view( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - .map_err(|_| ServiceError::Unauthorized)?; - - let uc = ListSnapshots { - snapshots: self.snapshot_service.as_ref(), - }; - let records = uc - .execute(doc_id, limit, offset) - .await - .map_err(ServiceError::from)?; - Ok(records.into_iter().map(SnapshotSummaryDto::from).collect()) - } - - pub async fn snapshot_diff( - &self, - actor: &Actor, - doc_id: Uuid, - snapshot_id: Uuid, - compare: Option, - base_mode: SnapshotDiffBaseMode, - ) -> Result { - access::require_view( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - .map_err(|_| ServiceError::Unauthorized)?; - - let uc = SnapshotDiff { - snapshots: self.snapshot_service.as_ref(), - realtime: self.realtime.as_ref(), - }; - let result = uc - .execute(doc_id, snapshot_id, compare, base_mode) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound)?; - - Ok(snapshot_diff_dto_from_result(result)) - } - - pub async fn restore_snapshot( - &self, - actor: &Actor, - doc_id: Uuid, - snapshot_id: Uuid, - ) -> Result { - access::require_edit( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - .map_err(|_| ServiceError::Unauthorized)?; - - let created_by = match actor { - Actor::User(uid) => Some(*uid), - _ => None, - }; - - let uc = RestoreSnapshot { - snapshots: self.snapshot_service.as_ref(), - realtime: self.realtime.as_ref(), - }; - let record = uc - .execute(doc_id, snapshot_id, created_by) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound)?; - - Ok(SnapshotSummaryDto::from(record)) - } - - pub async fn download_snapshot( - &self, - actor: &Actor, - doc_id: Uuid, - snapshot_id: Uuid, - ) -> Result { - access::require_view( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - .map_err(|_| ServiceError::Unauthorized)?; - - let uc = DownloadSnapshot { - files: self.files_repo.as_ref(), - storage: self.storage.as_ref(), - snapshots: self.snapshot_service.as_ref(), - }; - uc.execute(doc_id, snapshot_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound) - } - - pub async fn search_for_user( - &self, - workspace_id: Uuid, - query: Option, - limit: i64, - ) -> Result, ServiceError> { - let uc = SearchDocuments { - repo: self.document_repo.as_ref(), - }; - uc.execute(workspace_id, query, limit) - .await - .map_err(ServiceError::from) - } - - pub async fn backlinks( - &self, - actor: &Actor, - workspace_id: Uuid, - doc_id: Uuid, - ) -> Result, ServiceError> { - access::require_view( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - .map_err(|_| ServiceError::NotFound)?; - - let uc = GetBacklinks { - repo: self.document_repo.as_ref(), - }; - uc.execute(workspace_id, doc_id) - .await - .map_err(ServiceError::from) - } - - pub async fn outgoing_links( - &self, - actor: &Actor, - workspace_id: Uuid, - doc_id: Uuid, - ) -> Result, ServiceError> { - access::require_view( - self.access_repo.as_ref(), - self.share_access.as_ref(), - actor, - doc_id, - ) - .await - .map_err(|_| ServiceError::NotFound)?; - - let uc = GetOutgoingLinks { - repo: self.document_repo.as_ref(), - }; - uc.execute(workspace_id, doc_id) - .await - .map_err(ServiceError::from) - } - - async fn snapshot_attachments( - &self, - doc_id: Uuid, - ) -> Result, ServiceError> { - let files = self - .files_repo - .list_files_for_document(doc_id) - .await - .map_err(ServiceError::from)?; - let mut snapshots = Vec::new(); - for file in files { - let abs_path = self.storage.absolute_from_relative(&file.storage_path); - let exists = self - .storage - .exists(&abs_path) - .await - .map_err(ServiceError::from)?; - if !exists { - warn!( - document_id = %doc_id, - storage_path = %file.storage_path, - "duplicate_attachment_missing" - ); - continue; - } - let bytes = self - .storage - .read_bytes(&abs_path) - .await - .map_err(ServiceError::from)?; - let content_hash = hash_bytes(&bytes); - snapshots.push(AttachmentSnapshot { - filename: file.filename, - content_type: file.content_type, - bytes, - content_hash, - }); - } - Ok(snapshots) - } - - async fn copy_attachments( - &self, - target_doc: &DomainDocument, - attachments: &[AttachmentSnapshot], - actor_id: Uuid, - ) -> Result<(), ServiceError> { - if attachments.is_empty() { - return Ok(()); - } - let base_dir = self - .storage - .build_doc_dir(target_doc.id) - .await - .map_err(ServiceError::from)?; - for attachment in attachments { - let filename = Path::new(&attachment.filename) - .file_name() - .and_then(|f| f.to_str()) - .map(str::to_string) - .filter(|f| !f.is_empty()) - .unwrap_or_else(|| attachment.filename.clone()); - let target_path = base_dir.join("attachments").join(&filename); - self.storage - .write_bytes(&target_path, &attachment.bytes) - .await - .map_err(ServiceError::from)?; - let storage_path = self - .storage - .relative_from_uploads(&target_path) - .replace('\\', "/"); - self.files_repo - .insert_file( - target_doc.id, - &filename, - attachment.content_type.as_deref(), - attachment.bytes.len() as i64, - &storage_path, - &attachment.content_hash, - ) - .await - .map_err(ServiceError::from)?; - if let Some(repo_path) = - repo_relative_from_storage(target_doc.workspace_id, &storage_path) - { - let payload = json!({ - "repo_path": repo_path, - "storage_path": storage_path, - "backend": "api", - "size": attachment.bytes.len() as i64, - "content_hash": attachment.content_hash, - "workspace_id": target_doc.workspace_id.to_string(), - "actor_id": actor_id.to_string(), - }); - self.record_event( - target_doc.workspace_id, - target_doc.id, - "attachment.ingest_upsert", - Some(payload), - ) - .await; - } - } - Ok(()) - } - - async fn ensure_active_parent( - &self, - workspace_id: Uuid, - parent_id: Uuid, - ) -> Result<(), ServiceError> { - match self - .document_repo - .get_meta_for_owner(parent_id, workspace_id) - .await - .map_err(ServiceError::from)? - { - Some(meta) => { - if meta.archived_at.is_some() { - Err(ServiceError::Conflict) - } else { - Ok(()) - } - } - None => Err(ServiceError::NotFound), - } - } - - async fn load_owner_meta( - &self, - workspace_id: Uuid, - doc_id: Uuid, - ) -> Result { - self.document_repo - .get_meta_for_owner(doc_id, workspace_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound) - } - - async fn enqueue_projection_for_document_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - doc: &DomainDocument, - reason: &'static str, - ) -> Result<(), ServiceError> { - if doc.doc_type == "folder" { - self.enqueue_folder_sync_tx(tx, doc.workspace_id, doc.id, reason) - .await - } else { - self.enqueue_doc_sync_tx(tx, doc.workspace_id, doc.id, reason) - .await - } - } - - async fn enqueue_doc_sync_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - doc_id: Uuid, - reason: &'static str, - ) -> Result<(), ServiceError> { - let encoded_reason = serde_json::to_string(&StorageJobReason { - reason: reason.to_string(), - metadata: Some(WorkspaceJobMetadata { workspace_id }), - }) - .ok(); - self.storage_jobs - .enqueue_doc_job_tx( - tx, - workspace_id, - doc_id, - StorageProjectionJobKind::DocSync, - encoded_reason.as_deref(), - ) - .await - .map_err(|err| { - warn!( - error = ?err, - doc_id = %doc_id, - "storage_projection_enqueue_failed" - ); - ServiceError::Unexpected(err) - }) - } - - async fn enqueue_doc_delete_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - doc_id: Uuid, - reason: &'static str, - metadata: Option, - ) -> Result<(), ServiceError> { - let encoded_reason = metadata.and_then(|meta| { - serde_json::to_string(&StorageJobReason { - reason: reason.to_string(), - metadata: Some(meta), - }) - .ok() - }); - let reason_str = encoded_reason.as_deref().unwrap_or(reason); - self.storage_jobs - .enqueue_doc_job_tx( - tx, - workspace_id, - doc_id, - StorageProjectionJobKind::DeleteDoc, - Some(reason_str), - ) - .await - .map_err(|err| { - warn!( - error = ?err, - doc_id = %doc_id, - "storage_projection_enqueue_failed" - ); - ServiceError::Unexpected(err) - }) - } - - async fn enqueue_folder_sync_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - folder_id: Uuid, - reason: &'static str, - ) -> Result<(), ServiceError> { - self.storage_jobs - .enqueue_folder_job_tx( - tx, - workspace_id, - folder_id, - StorageProjectionJobKind::FolderSync, - Some(reason), - ) - .await - .map_err(|err| { - warn!( - error = ?err, - folder_id = %folder_id, - "storage_projection_enqueue_failed" - ); - ServiceError::Unexpected(err) - }) - } - - async fn enqueue_folder_delete_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - folder_id: Uuid, - reason: &'static str, - metadata: Option, - ) -> Result<(), ServiceError> { - let encoded_reason = metadata.and_then(|meta| { - serde_json::to_string(&StorageJobReason { - reason: reason.to_string(), - metadata: Some(meta), - }) - .ok() - }); - let reason_str = encoded_reason.as_deref().unwrap_or(reason); - self.storage_jobs - .enqueue_folder_job_tx( - tx, - workspace_id, - folder_id, - StorageProjectionJobKind::DeleteFolder, - Some(reason_str), - ) - .await - .map_err(|err| { - warn!( - error = ?err, - folder_id = %folder_id, - "storage_projection_enqueue_failed" - ); - ServiceError::Unexpected(err) - }) - } - - async fn record_event( - &self, - workspace_id: Uuid, - doc_id: Uuid, - event_type: &'static str, - payload: Option, - ) { - if let Err(err) = self - .events - .append(workspace_id, doc_id, event_type, payload) - .await - { - warn!( - error = ?err, - doc_id = %doc_id, - event_type, - "doc_event_log_append_failed" - ); - } - } - - async fn build_delete_plan( - &self, - tx: &mut Transaction<'_, Postgres>, - doc_id: Uuid, - workspace_id: Uuid, - root_meta: DocMeta, - ) -> Result, ServiceError> { - if root_meta.doc_type != "folder" { - let attachments = self - .files_repo - .list_storage_paths_for_document_tx(tx, doc_id) - .await - .map_err(ServiceError::from)?; - return Ok(vec![PendingDelete { - doc_id, - doc_type: root_meta.doc_type.clone(), - meta: root_meta, - attachments, - reason: "delete_document", - }]); - } - - let subtree = self - .document_repo - .list_owned_subtree_documents_tx(tx, workspace_id, doc_id) - .await - .map_err(ServiceError::from)?; - let mut entries = Vec::new(); - for node in subtree { - let meta = if node.id == doc_id { - root_meta.clone() - } else { - self.document_repo - .get_meta_for_owner_tx(tx, node.id, workspace_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound)? - }; - let attachments = if node.doc_type != "folder" { - self.files_repo - .list_storage_paths_for_document_tx(tx, node.id) - .await - .map_err(ServiceError::from)? - } else { - Vec::new() - }; - let reason = if node.id == doc_id { - "delete_folder" - } else if node.doc_type == "folder" { - "delete_folder_descendant" - } else { - "delete_document_descendant" - }; - entries.push(PendingDelete { - doc_id: node.id, - doc_type: node.doc_type, - meta, - attachments, - reason, - }); - } - entries.sort_by(|a, b| { - let depth_a = path_depth(&a.meta.desired_path); - let depth_b = path_depth(&b.meta.desired_path); - depth_b - .cmp(&depth_a) - .then_with(|| is_folder(&a.doc_type).cmp(&is_folder(&b.doc_type))) - }); - Ok(entries) - } - - async fn enqueue_delete_job_for_entry( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - entry: &PendingDelete, - permission_snapshot: &[String], - actor_id: Option, - ) -> Result<(), ServiceError> { - let repo_path = entry.repo_path(workspace_id); - let metadata = StorageDeleteJobMetadata { - workspace_id, - repo_path: Some(repo_path), - doc_type: entry.doc_type.clone(), - attachment_paths: if entry.attachments.is_empty() { - None - } else { - Some(entry.attachments.clone()) - }, - permission_snapshot: permission_snapshot.to_vec(), - actor_id, - }; - if entry.doc_type == "folder" { - self.enqueue_folder_delete_tx( - tx, - workspace_id, - entry.doc_id, - entry.reason, - Some(metadata), - ) - .await - } else { - self.enqueue_doc_delete_tx(tx, workspace_id, entry.doc_id, entry.reason, Some(metadata)) - .await - } - } - - async fn record_delete_event( - &self, - workspace_id: Uuid, - entry: &PendingDelete, - actor_id: Option, - ) { - let repo_path = entry.repo_path(workspace_id); - let previous_repo_path = workspace_repo_relative(workspace_id, entry.meta.path.as_deref()); - let mut payload = json!({ - "doc_type": entry.doc_type, - "repo_path": repo_path, - "slug": entry.meta.slug, - "desired_path": entry.meta.desired_path, - "owner_id": workspace_id, - "previous_path": previous_repo_path, - }); - if let Some(actor) = actor_id { - if let serde_json::Value::Object(ref mut map) = payload { - map.insert("actor_id".into(), json!(actor)); - } - } - self.record_event( - workspace_id, - entry.doc_id, - "document.deleted", - Some(payload), - ) - .await; - } -} - -#[derive(Debug, Clone)] -pub enum DocumentPatchOperation { - Insert { - offset: usize, - text: String, - }, - Delete { - offset: usize, - length: usize, - }, - Replace { - offset: usize, - length: usize, - text: String, - }, -} - -#[derive(Debug, Clone)] -struct AttachmentSnapshot { - filename: String, - content_type: Option, - bytes: Vec, - content_hash: String, -} - -fn apply_patch_operations( - initial: &str, - operations: &[DocumentPatchOperation], -) -> Result { - let mut chars: Vec = initial.chars().collect(); - for operation in operations { - match operation { - DocumentPatchOperation::Insert { offset, text } => { - splice_chars(&mut chars, *offset, 0, text)?; - } - DocumentPatchOperation::Delete { offset, length } => { - splice_chars(&mut chars, *offset, *length, "")?; - } - DocumentPatchOperation::Replace { - offset, - length, - text, - } => { - splice_chars(&mut chars, *offset, *length, text)?; - } - } - } - Ok(chars.into_iter().collect()) -} - -fn splice_chars( - chars: &mut Vec, - offset: usize, - length: usize, - replacement: &str, -) -> Result<(), ServiceError> { - if offset > chars.len() { - return Err(ServiceError::BadRequest("patch_offset_out_of_bounds")); - } - let end = offset - .checked_add(length) - .ok_or(ServiceError::BadRequest("patch_length_overflow"))?; - if end > chars.len() { - return Err(ServiceError::BadRequest("patch_range_out_of_bounds")); - } - chars.splice(offset..end, replacement.chars()); - Ok(()) -} - -fn duplicate_title(source_title: &str, override_title: Option) -> String { - if let Some(custom) = override_title { - let trimmed = custom.trim(); - if !trimmed.is_empty() { - return trimmed.to_string(); - } - } - let base = source_title.trim(); - let fallback = if base.is_empty() { "Untitled" } else { base }; - format!("{fallback} (Copy)") -} - -fn hash_bytes(bytes: &[u8]) -> String { - sha256_hex(bytes) -} - -fn path_depth(path: &str) -> usize { - path.split('/') - .filter(|segment| !segment.is_empty()) - .count() -} - -fn is_folder(doc_type: &str) -> usize { - if doc_type == "folder" { 1 } else { 0 } -} - -#[derive(Clone)] -struct PendingDelete { - doc_id: Uuid, - doc_type: String, - meta: DocMeta, - attachments: Vec, - reason: &'static str, -} - -impl PendingDelete { - fn repo_path(&self, workspace_id: Uuid) -> String { - workspace_repo_relative(workspace_id, self.meta.path.as_deref()) - .unwrap_or_else(|| self.meta.desired_path.clone()) - } -} - -fn snapshot_diff_dto_from_result(result: SnapshotDiffResult) -> SnapshotDiffDto { - SnapshotDiffDto { - base: snapshot_diff_side_from_use_case(result.base), - target: snapshot_diff_side_from_use_case(result.target), - diff: result.diff, - } -} - -fn snapshot_diff_side_from_use_case(side: SnapshotDiffSide) -> SnapshotDiffSideDto { - match side { - SnapshotDiffSide::Current { markdown } => SnapshotDiffSideDto::Current { markdown }, - SnapshotDiffSide::Snapshot { record, markdown } => SnapshotDiffSideDto::Snapshot { - snapshot: SnapshotSummaryDto::from(record), - markdown, - }, - } -} - -fn repo_relative_from_storage(workspace_id: Uuid, storage_path: &str) -> Option { - let trimmed = storage_path.trim_start_matches('/'); - let owner_prefix = workspace_id.to_string(); - let remainder = trimmed - .strip_prefix(&owner_prefix) - .map(|rest| rest.trim_start_matches('/')) - .unwrap_or(trimmed); - if remainder.is_empty() { - None - } else { - Some(remainder.to_string()) - } -} - -fn workspace_repo_relative(workspace_id: Uuid, stored_path: Option<&str>) -> Option { - let stored = stored_path?.trim_start_matches('/'); - if stored.is_empty() { - return None; - } - let owner_prefix = workspace_id.to_string(); - let repo = if let Some(rest) = stored.strip_prefix(&owner_prefix) { - rest.trim_start_matches('/') - } else { - stored - }; - if repo.is_empty() { - None - } else { - Some(repo.to_string()) - } -} - -fn ensure_can_create(permissions: &PermissionSet, doc_type: &str) -> Result<(), ServiceError> { - ensure_folder_sensitive_permission( - permissions, - doc_type, - PERM_DOC_CREATE, - Some(PERM_FOLDER_CREATE), - ) -} - -fn ensure_can_delete(permissions: &PermissionSet, doc_type: &str) -> Result<(), ServiceError> { - ensure_folder_sensitive_permission( - permissions, - doc_type, - PERM_DOC_DELETE, - Some(PERM_FOLDER_DELETE), - ) -} - -fn ensure_can_edit(permissions: &PermissionSet, doc_type: &str) -> Result<(), ServiceError> { - ensure_folder_sensitive_permission(permissions, doc_type, PERM_DOC_EDIT, None) -} - -fn ensure_can_move(permissions: &PermissionSet, doc_type: &str) -> Result<(), ServiceError> { - ensure_folder_sensitive_permission(permissions, doc_type, PERM_DOC_MOVE, None) -} - -fn ensure_can_archive(permissions: &PermissionSet, doc_type: &str) -> Result<(), ServiceError> { - ensure_folder_sensitive_permission(permissions, doc_type, PERM_DOC_ARCHIVE, None) -} - -fn ensure_folder_sensitive_permission( - permissions: &PermissionSet, - doc_type: &str, - doc_permission: &'static str, - folder_permission: Option<&'static str>, -) -> Result<(), ServiceError> { - let required = if doc_type == "folder" { - folder_permission.unwrap_or(doc_permission) - } else { - doc_permission - }; - if permissions.allows(required) { - Ok(()) - } else { - Err(ServiceError::Forbidden) - } -} - -fn to_repo_state(filter: DocumentListFilter) -> DocumentListState { - match filter { - DocumentListFilter::Active => DocumentListState::Active, - DocumentListFilter::Archived => DocumentListState::Archived, - DocumentListFilter::All => DocumentListState::All, - } -} - -fn map_sqlx_error(err: sqlx::Error) -> ServiceError { - error!(error = ?err, "document_sql_error"); - ServiceError::Unexpected(err.into()) -} diff --git a/api/src/application/services/mod.rs b/api/src/application/services/mod.rs deleted file mode 100644 index 546d6e45..00000000 --- a/api/src/application/services/mod.rs +++ /dev/null @@ -1,27 +0,0 @@ -pub mod api_tokens; -pub mod auth; -pub mod authorization; -pub mod diff; -pub mod doc_events; -pub mod documents; -pub mod errors; -pub mod files; -pub mod git; -pub mod git_rebuild; -pub mod git_rebuild_scheduler; -pub mod health; -pub mod markdown; -pub mod markdown_render; -pub mod metrics; -pub mod plugins; -pub mod public; -pub mod realtime; -pub mod shares; -pub mod storage_ingest; -pub mod storage_projection_cache; -pub mod storage_reconcile; -pub mod storage_reconcile_scheduler; -pub mod tagging; -pub mod tags; -pub mod user_shortcuts; -pub mod workspaces; diff --git a/api/src/application/services/plugins/data.rs b/api/src/application/services/plugins/data.rs deleted file mode 100644 index ed862d63..00000000 --- a/api/src/application/services/plugins/data.rs +++ /dev/null @@ -1,111 +0,0 @@ -use std::sync::Arc; - -use uuid::Uuid; - -use crate::application::ports::plugin_repository::{PluginRecord, PluginRepository}; -use crate::application::services::errors::ServiceError; -use crate::application::use_cases::plugins::kv::{GetPluginKv, PutPluginKv}; -use crate::application::use_cases::plugins::records::{ - CreatePluginRecord, DeletePluginRecord, GetPluginRecord, ListPluginRecords, UpdatePluginRecord, -}; - -pub struct PluginDataService { - repo: Arc, -} - -impl PluginDataService { - pub fn new(repo: Arc) -> Self { - Self { repo } - } - - pub async fn list_records( - &self, - plugin: &str, - scope: &str, - scope_id: Uuid, - kind: &str, - limit: i64, - offset: i64, - ) -> Result, ServiceError> { - let uc = ListPluginRecords { - repo: self.repo.as_ref(), - }; - uc.execute(plugin, scope, scope_id, kind, limit, offset) - .await - .map_err(ServiceError::from) - } - - pub async fn create_record( - &self, - plugin: &str, - scope: &str, - scope_id: Uuid, - kind: &str, - data: &serde_json::Value, - ) -> Result { - let uc = CreatePluginRecord { - repo: self.repo.as_ref(), - }; - uc.execute(plugin, scope, scope_id, kind, data) - .await - .map_err(ServiceError::from) - } - - pub async fn get_record(&self, record_id: Uuid) -> Result, ServiceError> { - let uc = GetPluginRecord { - repo: self.repo.as_ref(), - }; - uc.execute(record_id).await.map_err(ServiceError::from) - } - - pub async fn update_record( - &self, - record_id: Uuid, - patch: &serde_json::Value, - ) -> Result, ServiceError> { - let uc = UpdatePluginRecord { - repo: self.repo.as_ref(), - }; - uc.execute(record_id, patch) - .await - .map_err(ServiceError::from) - } - - pub async fn delete_record(&self, record_id: Uuid) -> Result { - let uc = DeletePluginRecord { - repo: self.repo.as_ref(), - }; - uc.execute(record_id).await.map_err(ServiceError::from) - } - - pub async fn get_kv( - &self, - plugin: &str, - scope: &str, - scope_id: Option, - key: &str, - ) -> Result, ServiceError> { - let uc = GetPluginKv { - repo: self.repo.as_ref(), - }; - uc.execute(plugin, scope, scope_id, key) - .await - .map_err(ServiceError::from) - } - - pub async fn put_kv( - &self, - plugin: &str, - scope: &str, - scope_id: Option, - key: &str, - value: &serde_json::Value, - ) -> Result<(), ServiceError> { - let uc = PutPluginKv { - repo: self.repo.as_ref(), - }; - uc.execute(plugin, scope, scope_id, key, value) - .await - .map_err(ServiceError::from) - } -} diff --git a/api/src/application/services/plugins/execution.rs b/api/src/application/services/plugins/execution.rs deleted file mode 100644 index 513f2810..00000000 --- a/api/src/application/services/plugins/execution.rs +++ /dev/null @@ -1,65 +0,0 @@ -use std::sync::Arc; - -use uuid::Uuid; - -use crate::application::dto::plugins::ExecResult; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::plugin_repository::PluginRepository; -use crate::application::ports::plugin_runtime::PluginRuntime; -use crate::application::services::errors::ServiceError; -use crate::application::use_cases::plugins::exec_action::ExecutePluginAction; -use crate::domain::workspaces::permissions::PermissionSet; - -pub struct PluginExecutionService { - plugin_repo: Arc, - document_repo: Arc, - runtime: Arc, - authorization: Arc, -} - -impl PluginExecutionService { - pub fn new( - plugin_repo: Arc, - document_repo: Arc, - runtime: Arc, - authorization: Arc, - ) -> Self { - Self { - plugin_repo, - document_repo, - runtime, - authorization, - } - } - - pub async fn execute_action( - &self, - workspace_id: Uuid, - user_id: Uuid, - permissions: &PermissionSet, - plugin: &str, - action: &str, - payload: Option, - allowed_doc_id: Option, - actor: &crate::application::access::Actor, - ) -> Result, ServiceError> { - let uc = ExecutePluginAction { - runtime: self.runtime.as_ref(), - plugin_repo: self.plugin_repo.as_ref(), - document_repo: self.document_repo.as_ref(), - authorization: self.authorization.as_ref(), - }; - uc.execute( - workspace_id, - user_id, - permissions, - plugin, - action, - payload, - allowed_doc_id, - actor, - ) - .await - .map_err(ServiceError::from) - } -} diff --git a/api/src/application/services/public.rs b/api/src/application/services/public.rs deleted file mode 100644 index 0d39ac52..00000000 --- a/api/src/application/services/public.rs +++ /dev/null @@ -1,142 +0,0 @@ -use std::sync::Arc; - -use uuid::Uuid; - -use crate::application::dto::public::PublicDocumentSummaryDto; -use crate::application::ports::public_repository::PublicRepository; -use crate::application::ports::realtime_port::RealtimeEngine; -use crate::application::services::errors::ServiceError; -use crate::application::use_cases::public::get_public::GetPublicByWorkspaceAndId; -use crate::application::use_cases::public::get_status::{GetPublishStatus, PublishStatusDto}; -use crate::application::use_cases::public::list_workspace::ListWorkspacePublic; -use crate::application::use_cases::public::publish::{PublishDocument, PublishResponseDto}; -use crate::application::use_cases::public::unpublish::UnpublishDocument; -use crate::domain::documents::document::Document; -use crate::domain::workspaces::permissions::{ - PERM_PUBLIC_PUBLISH, PERM_PUBLIC_UNPUBLISH, PermissionSet, -}; - -pub struct PublicService { - repo: Arc, - realtime: Arc, -} - -impl PublicService { - pub fn new(repo: Arc, realtime: Arc) -> Self { - Self { repo, realtime } - } - - pub async fn publish_document( - &self, - workspace_id: Uuid, - permissions: &PermissionSet, - doc_id: Uuid, - ) -> Result { - ensure_public_publish_permission(permissions)?; - let uc = PublishDocument { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_id, doc_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound) - } - - pub async fn unpublish_document( - &self, - workspace_id: Uuid, - permissions: &PermissionSet, - doc_id: Uuid, - ) -> Result { - ensure_public_unpublish_permission(permissions)?; - let uc = UnpublishDocument { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_id, doc_id) - .await - .map_err(ServiceError::from) - } - - pub async fn get_publish_status( - &self, - workspace_id: Uuid, - permissions: &PermissionSet, - doc_id: Uuid, - ) -> Result { - ensure_public_publish_permission(permissions)?; - let uc = GetPublishStatus { - repo: self.repo.as_ref(), - }; - let status: PublishStatusDto = uc - .execute(workspace_id, doc_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound)?; - Ok(PublishResponseDto { - slug: status.slug, - public_url: status.public_url, - }) - } - - pub async fn list_workspace_public_documents( - &self, - workspace_slug: &str, - ) -> Result, ServiceError> { - let uc = ListWorkspacePublic { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_slug).await.map_err(ServiceError::from) - } - - pub async fn get_public_by_workspace_and_id( - &self, - workspace_slug: &str, - doc_id: Uuid, - ) -> Result { - let uc = GetPublicByWorkspaceAndId { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_slug, doc_id) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound) - } - - pub async fn get_public_content_by_workspace_and_id( - &self, - workspace_slug: &str, - doc_id: Uuid, - ) -> Result { - let exists = self - .repo - .public_exists_by_workspace_and_id(workspace_slug, doc_id) - .await - .map_err(ServiceError::from)?; - if !exists { - return Err(ServiceError::NotFound); - } - let content = self - .realtime - .get_content(&doc_id.to_string()) - .await - .map_err(ServiceError::from)? - .unwrap_or_default(); - Ok(content) - } -} - -fn ensure_public_publish_permission(permissions: &PermissionSet) -> Result<(), ServiceError> { - if permissions.allows(PERM_PUBLIC_PUBLISH) { - Ok(()) - } else { - Err(ServiceError::Forbidden) - } -} - -fn ensure_public_unpublish_permission(permissions: &PermissionSet) -> Result<(), ServiceError> { - if permissions.allows(PERM_PUBLIC_UNPUBLISH) { - Ok(()) - } else { - Err(ServiceError::Forbidden) - } -} diff --git a/api/src/application/services/shares.rs b/api/src/application/services/shares.rs deleted file mode 100644 index fedda596..00000000 --- a/api/src/application/services/shares.rs +++ /dev/null @@ -1,319 +0,0 @@ -use std::sync::Arc; - -use uuid::Uuid; - -use crate::application::dto::shares::{ - ActiveShareItemDto, ApplicableShareDto, CreatedShareDto, ShareBrowseResponseDto, - ShareDocumentDto, ShareItemDto, ShareMountDto, -}; -use crate::application::ports::shares_repository::SharesRepository; -use crate::application::services::errors::ServiceError; -use crate::application::use_cases::shares::browse_share::BrowseShare; -use crate::application::use_cases::shares::create_share::CreateShare; -use crate::application::use_cases::shares::delete_share::DeleteShare; -use crate::application::use_cases::shares::list_active::ListActiveShares; -use crate::application::use_cases::shares::list_applicable::ListApplicableShares; -use crate::application::use_cases::shares::list_document_shares::ListDocumentShares; -use crate::application::use_cases::shares::validate_share::ValidateShare; -use crate::domain::workspaces::permissions::{ - PERM_DOC_VIEW, PERM_SHARE_CREATE, PERM_SHARE_DELETE, PermissionSet, -}; - -pub struct ShareService { - repo: Arc, -} - -pub struct ShareDocumentMeta { - pub document_id: Uuid, - pub owner_id: Uuid, - pub workspace_id: Uuid, -} - -impl ShareService { - pub fn new(repo: Arc) -> Self { - Self { repo } - } - - pub async fn create_share( - &self, - workspace_id: Uuid, - actor_id: Uuid, - permissions: &PermissionSet, - document_id: Uuid, - permission: &str, - expires_at: Option>, - ) -> Result { - ensure_share_create_permission(permissions)?; - let uc = CreateShare { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_id, actor_id, document_id, permission, expires_at) - .await - .map(|res| CreatedShareDto { - token: res.token, - document_id: res.document_id, - document_type: res.document_type, - }) - .map_err(ServiceError::from) - } - - pub async fn list_document_shares( - &self, - workspace_id: Uuid, - permissions: &PermissionSet, - document_id: Uuid, - ) -> Result, ServiceError> { - ensure_share_create_permission(permissions)?; - let uc = ListDocumentShares { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_id, document_id) - .await - .map_err(ServiceError::from) - } - - pub async fn delete_share( - &self, - workspace_id: Uuid, - permissions: &PermissionSet, - token: &str, - ) -> Result { - ensure_share_delete_permission(permissions)?; - let uc = DeleteShare { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_id, token) - .await - .map_err(ServiceError::from) - } - - pub async fn list_applicable( - &self, - workspace_id: Uuid, - permissions: &PermissionSet, - doc_id: Uuid, - ) -> Result, ServiceError> { - ensure_share_create_permission(permissions)?; - let uc = ListApplicableShares { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_id, doc_id) - .await - .map_err(ServiceError::from) - } - - pub async fn validate_token( - &self, - token: &str, - ) -> Result, ServiceError> { - let uc = ValidateShare { - repo: self.repo.as_ref(), - }; - uc.execute(token).await.map_err(ServiceError::from) - } - - pub async fn resolve_share_context( - &self, - token: &str, - ) -> Result< - Option<( - Uuid, - String, - Option>, - Uuid, - String, - Uuid, - )>, - ServiceError, - > { - self.repo - .resolve_share_by_token(token) - .await - .map_err(ServiceError::from) - } - - pub async fn list_active( - &self, - workspace_id: Uuid, - permissions: &PermissionSet, - ) -> Result, ServiceError> { - ensure_share_create_permission(permissions)?; - let uc = ListActiveShares { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_id).await.map_err(ServiceError::from) - } - - pub async fn browse_share( - &self, - token: &str, - ) -> Result, ServiceError> { - let uc = BrowseShare { - repo: self.repo.as_ref(), - }; - uc.execute(token).await.map_err(ServiceError::from) - } - - pub async fn materialize_folder_share( - &self, - workspace_id: Uuid, - actor_id: Uuid, - permissions: &PermissionSet, - token: &str, - ) -> Result { - ensure_share_create_permission(permissions)?; - self.repo - .materialize_folder_share(workspace_id, actor_id, token) - .await - .map_err(|err| match err.to_string().as_str() { - "not_found" => ServiceError::NotFound, - "forbidden" => ServiceError::Forbidden, - "bad_request" => ServiceError::BadRequest("invalid_share_scope"), - _ => ServiceError::Unexpected(err), - }) - } - - pub async fn save_share_mount( - &self, - workspace_id: Uuid, - actor_id: Uuid, - permissions: &PermissionSet, - token: &str, - parent_folder_id: Option, - ) -> Result { - ensure_doc_view_permission(permissions)?; - let resolved = self - .repo - .resolve_share_by_token(token) - .await - .map_err(ServiceError::from)? - .ok_or(ServiceError::NotFound)?; - let ( - _share_id, - permission, - expires_at, - target_document_id, - target_document_type, - _workspace_id, - ) = resolved; - if let Some(exp) = expires_at { - if exp < chrono::Utc::now() { - return Err(ServiceError::NotFound); - } - } - let target_title = self - .repo - .validate_share_token(token) - .await - .map_err(ServiceError::from)? - .map(|(_, _, _, title)| title) - .unwrap_or_else(|| "Shared document".to_string()); - let row = self - .repo - .create_share_mount( - workspace_id, - actor_id, - token, - target_document_id, - &target_document_type, - &target_title, - &permission, - parent_folder_id, - ) - .await - .map_err(|err| match err.to_string().as_str() { - "invalid_parent" => ServiceError::BadRequest("invalid_parent"), - _ => ServiceError::Unexpected(err), - })?; - Ok(ShareMountDto { - id: row.id, - token: row.token, - target_document_id: row.target_document_id, - target_document_type: row.target_document_type, - target_title: row.target_title, - permission: row.permission, - parent_folder_id: row.parent_folder_id, - created_at: row.created_at, - }) - } - - pub async fn list_share_mounts( - &self, - workspace_id: Uuid, - permissions: &PermissionSet, - ) -> Result, ServiceError> { - ensure_doc_view_permission(permissions)?; - let rows = self - .repo - .list_share_mounts(workspace_id) - .await - .map_err(ServiceError::from)?; - Ok(rows - .into_iter() - .map(|row| ShareMountDto { - id: row.id, - token: row.token, - target_document_id: row.target_document_id, - target_document_type: row.target_document_type, - target_title: row.target_title, - permission: row.permission, - parent_folder_id: row.parent_folder_id, - created_at: row.created_at, - }) - .collect()) - } - - pub async fn delete_share_mount( - &self, - workspace_id: Uuid, - permissions: &PermissionSet, - mount_id: Uuid, - ) -> Result { - ensure_doc_view_permission(permissions)?; - self.repo - .delete_share_mount(workspace_id, mount_id) - .await - .map_err(ServiceError::from) - } - - pub async fn share_document_meta( - &self, - token: &str, - ) -> Result, ServiceError> { - let meta = self - .repo - .get_share_document_meta(token) - .await - .map_err(ServiceError::from)? - .map(|(document_id, owner_id, workspace_id)| ShareDocumentMeta { - document_id, - owner_id, - workspace_id, - }); - Ok(meta) - } -} - -fn ensure_share_create_permission(permissions: &PermissionSet) -> Result<(), ServiceError> { - if permissions.allows(PERM_SHARE_CREATE) { - Ok(()) - } else { - Err(ServiceError::Forbidden) - } -} - -fn ensure_share_delete_permission(permissions: &PermissionSet) -> Result<(), ServiceError> { - if permissions.allows(PERM_SHARE_DELETE) { - Ok(()) - } else { - Err(ServiceError::Forbidden) - } -} - -fn ensure_doc_view_permission(permissions: &PermissionSet) -> Result<(), ServiceError> { - if permissions.allows(PERM_DOC_VIEW) { - Ok(()) - } else { - Err(ServiceError::Forbidden) - } -} diff --git a/api/src/application/services/storage_ingest.rs b/api/src/application/services/storage_ingest.rs deleted file mode 100644 index fbb8c62b..00000000 --- a/api/src/application/services/storage_ingest.rs +++ /dev/null @@ -1,837 +0,0 @@ -use std::io; -use std::path::{Component, Path, PathBuf}; -use std::sync::Arc; - -use async_trait::async_trait; -use serde::Deserialize; -use serde_json::{Value, json}; -use tracing::{debug, info, warn}; -use uuid::Uuid; - -use crate::application::ports::doc_event_log::DocEventLog; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::files_repository::FilesRepository; -use crate::application::ports::realtime_port::RealtimeEngine; -use crate::application::ports::storage_ingest_queue::{StorageIngestEvent, StorageIngestKind}; -use crate::application::ports::storage_port::{StorageProjectionPort, StorageResolverPort}; -use crate::application::services::documents::DocumentService; -use crate::application::services::errors::ServiceError; -use crate::application::services::realtime::snapshot::snapshot_from_markdown; -use crate::application::services::storage_projection_cache::RecentProjectionCache; -use crate::application::services::workspaces::{ - WorkspacePermissionResolver, permission_snapshot::permission_set_from_snapshot, -}; -use crate::application::utils::hash::sha256_hex; -use crate::domain::documents::document::Document as DomainDocument; -use crate::domain::workspaces::permissions::PermissionSet; - -pub fn normalize_repo_path(repo_path: &str) -> Option { - let trimmed = repo_path.trim().trim_start_matches('/'); - if trimmed.is_empty() { - return None; - } - let mut normalized = PathBuf::new(); - for component in Path::new(trimmed).components() { - match component { - Component::Normal(part) => normalized.push(part), - Component::CurDir => continue, - _ => return None, - } - } - if normalized.as_os_str().is_empty() { - return None; - } - Some(normalized.to_string_lossy().replace('\\', "/")) -} - -fn previous_path_from_payload(payload: Option<&Value>) -> Option { - payload - .and_then(|p| p.get("previous_path")) - .and_then(|v| v.as_str()) - .map(|s| s.to_string()) -} - -#[async_trait] -pub trait StorageIngestHandler: Send + Sync { - async fn handle_event(&self, event: &StorageIngestEvent) -> anyhow::Result<()>; -} - -pub struct StorageIngestService { - document_repo: Arc, - files_repo: Arc, - realtime: Arc, - storage: Arc, - storage_projection: Arc, - events: Arc, - document_service: Arc, - permission_resolver: Arc, - recent_exports: Arc, -} - -impl StorageIngestService { - pub fn new( - document_repo: Arc, - files_repo: Arc, - realtime: Arc, - storage: Arc, - storage_projection: Arc, - events: Arc, - document_service: Arc, - permission_resolver: Arc, - recent_exports: Arc, - ) -> Self { - Self { - document_repo, - files_repo, - realtime, - storage, - storage_projection, - events, - document_service, - permission_resolver, - recent_exports, - } - } - - fn relative_path(user_id: Uuid, repo_path: &str) -> String { - let mut path = PathBuf::from(user_id.to_string()); - path.push(repo_path); - path.to_string_lossy().replace('\\', "/") - } - - async fn permissions_for_event( - &self, - event: &StorageIngestEvent, - ) -> anyhow::Result { - let set = permission_set_from_snapshot(&event.permission_snapshot); - if !set.is_empty() { - return Ok(set); - } - let mut candidates = Vec::new(); - if let Some(actor_id) = event.actor_id { - candidates.push(("actor", actor_id, true)); - } - let warn_on_user_miss = event.user_id != event.workspace_id; - candidates.push(("user", event.user_id, warn_on_user_miss)); - for (source, user_id, warn_on_missing) in candidates { - match self - .permission_resolver - .load_permission_set(event.workspace_id, user_id) - .await - { - Ok(Some(resolved)) => { - info!( - workspace_id = %event.workspace_id, - user_id = %user_id, - source, - "storage_ingest_permissions_rehydrated" - ); - return Ok(resolved); - } - Ok(None) => { - if warn_on_missing { - warn!( - workspace_id = %event.workspace_id, - user_id = %user_id, - source, - "storage_ingest_member_missing_for_permissions" - ); - } else { - debug!( - workspace_id = %event.workspace_id, - user_id = %user_id, - source, - "storage_ingest_member_missing_for_permissions" - ); - } - } - Err(err) => { - warn!( - error = ?err, - workspace_id = %event.workspace_id, - user_id = %user_id, - source, - "storage_ingest_permission_resolve_failed" - ); - } - } - } - warn!( - workspace_id = %event.workspace_id, - "storage_ingest_permissions_fallback_all" - ); - Ok(PermissionSet::all()) - } - - async fn handle_doc_upsert( - &self, - doc: &ResolvedDocument, - repo_path: &str, - event: &StorageIngestEvent, - payload: MarkdownIngestPayload, - previous_repo_path: Option<&str>, - ) -> anyhow::Result<()> { - if event.backend == "fs_watcher" - && event.actor_id.is_none() - && self.recent_exports.is_recent_match( - event.workspace_id, - repo_path, - &payload.content_hash, - ) - { - debug!( - doc_id = %doc.id, - repo_path = repo_path, - "storage_ingest_doc_upsert_skipped_recent_projection" - ); - return Ok(()); - } - let snapshot = snapshot_from_markdown(&payload.body); - self.realtime - .apply_snapshot(&doc.id.to_string(), snapshot.as_slice()) - .await?; - // Persist back to storage only for API/actor initiated ingests; fs_watcher/reconcile events - // originate from the filesystem itself and writing would re-trigger the watcher endlessly. - if event.actor_id.is_some() { - if let Err(err) = self.realtime.force_persist(&doc.id.to_string()).await { - warn!( - error = ?err, - doc_id = %doc.id, - "storage_ingest_force_persist_failed" - ); - } - } - let mut payload_obj = serde_json::Map::new(); - payload_obj.insert("repo_path".into(), json!(repo_path)); - payload_obj.insert("backend".into(), json!(event.backend)); - payload_obj.insert("content_hash".into(), json!(payload.content_hash)); - payload_obj.insert("doc_type".into(), json!(doc.doc_type)); - if let Some(prev) = previous_repo_path { - payload_obj.insert("previous_path".into(), json!(prev)); - } - self.events - .append( - event.workspace_id, - doc.id, - "document.ingest_upsert", - Some(Value::Object(payload_obj)), - ) - .await?; - info!( - doc_id = %doc.id, - repo_path = repo_path, - backend = event.backend, - "storage_ingest_doc_upsert_applied" - ); - Ok(()) - } - - async fn handle_attachment_upsert( - &self, - file_id: Uuid, - doc_id: Uuid, - rel_path: &str, - repo_path: &str, - event: &StorageIngestEvent, - previous_repo_path: Option<&str>, - ) -> anyhow::Result<()> { - let abs = self.storage.absolute_from_relative(rel_path); - let bytes = match self.storage.read_bytes(abs.as_path()).await { - Ok(bytes) => bytes, - Err(err) if is_not_found_error(&err) => { - warn!( - file_id = %file_id, - doc_id = %doc_id, - repo_path = repo_path, - "storage_ingest_attachment_missing_skipped" - ); - self.storage_projection - .delete_relative_path(rel_path) - .await?; - return Ok(()); - } - Err(err) => return Err(err), - }; - let size = bytes.len() as i64; - let hash = sha256_hex(&bytes); - self.files_repo - .update_hash_and_size(file_id, size, &hash) - .await?; - let mut payload_obj = serde_json::Map::new(); - payload_obj.insert("repo_path".into(), json!(repo_path)); - payload_obj.insert("storage_path".into(), json!(rel_path)); - payload_obj.insert("backend".into(), json!(event.backend)); - payload_obj.insert("size".into(), json!(size)); - payload_obj.insert("content_hash".into(), json!(hash)); - if let Some(prev) = previous_repo_path { - payload_obj.insert("previous_path".into(), json!(prev)); - } - self.events - .append( - event.workspace_id, - doc_id, - "attachment.ingest_upsert", - Some(Value::Object(payload_obj)), - ) - .await?; - info!( - doc_id = %doc_id, - file_id = %file_id, - repo_path = repo_path, - backend = event.backend, - "storage_ingest_attachment_upsert_applied" - ); - Ok(()) - } - - async fn handle_attachment_delete( - &self, - file_id: Uuid, - doc_id: Uuid, - repo_path: &str, - event: &StorageIngestEvent, - ) -> anyhow::Result<()> { - self.files_repo.delete_by_id(file_id).await?; - self.events - .append( - event.workspace_id, - doc_id, - "attachment.ingest_delete", - Some(json!({ - "repo_path": repo_path, - "backend": event.backend, - })), - ) - .await?; - info!( - doc_id = %doc_id, - file_id = %file_id, - repo_path = repo_path, - backend = event.backend, - "storage_ingest_attachment_deleted" - ); - Ok(()) - } - - async fn load_markdown_payload(&self, rel_path: &str) -> anyhow::Result { - let abs = self.storage.absolute_from_relative(rel_path); - let bytes = self.storage.read_bytes(abs.as_path()).await?; - parse_markdown_payload(bytes) - } - - async fn resolve_doc_from_front_matter( - &self, - user_id: Uuid, - payload: &MarkdownIngestPayload, - ) -> anyhow::Result> { - let Some(doc_id) = payload.doc_id_hint else { - return Ok(None); - }; - let Some(meta) = self - .document_repo - .get_meta_for_owner(doc_id, user_id) - .await? - else { - return Ok(None); - }; - Ok(Some(ResolvedDocument::new( - doc_id, - meta.doc_type, - meta.path, - meta.archived_at.is_some(), - ))) - } - - async fn handle_doc_delete( - &self, - doc: &ResolvedDocument, - repo_path: &str, - event: &StorageIngestEvent, - permissions: &PermissionSet, - ) -> anyhow::Result<()> { - let actor_id = event.actor_id; - match self - .document_service - .delete_for_user(event.workspace_id, doc.id, actor_id, &permissions) - .await - { - Ok(true) => { - info!( - doc_id = %doc.id, - repo_path = repo_path, - backend = event.backend, - "storage_ingest_doc_delete_applied" - ); - Ok(()) - } - Ok(false) => Ok(()), - Err(ServiceError::NotFound) => Ok(()), - Err(err) => Err(err.into()), - } - } - - async fn handle_folder_upsert( - &self, - doc: &ResolvedDocument, - rel_path: &str, - repo_path: &str, - event: &StorageIngestEvent, - previous_repo_path: Option<&str>, - ) -> anyhow::Result<()> { - if !self - .reconcile_repo_path(doc, event.workspace_id, rel_path) - .await? - { - warn!( - doc_id = %doc.id, - repo_path = repo_path, - "storage_ingest_folder_repo_path_rejected" - ); - return Ok(()); - } - let mut payload_obj = serde_json::Map::new(); - payload_obj.insert("repo_path".into(), json!(repo_path)); - payload_obj.insert("doc_type".into(), json!(doc.doc_type)); - payload_obj.insert("owner_id".into(), json!(event.workspace_id)); - payload_obj.insert("backend".into(), json!(event.backend)); - if let Some(prev) = previous_repo_path { - payload_obj.insert("previous_path".into(), json!(prev)); - } - self.events - .append( - event.workspace_id, - doc.id, - "document.metadata_updated", - Some(Value::Object(payload_obj)), - ) - .await?; - info!( - doc_id = %doc.id, - repo_path = repo_path, - backend = event.backend, - "storage_ingest_folder_upsert_applied" - ); - Ok(()) - } - - async fn reconcile_repo_path( - &self, - doc: &ResolvedDocument, - owner_id: Uuid, - rel_path: &str, - ) -> anyhow::Result { - if doc.path.as_deref() == Some(rel_path) { - return Ok(true); - } - match self - .document_repo - .update_repo_path(doc.id, owner_id, rel_path) - .await - { - Ok(()) => Ok(true), - Err(err) => { - warn!( - doc_id = %doc.id, - error = ?err, - "storage_ingest_repo_path_update_failed" - ); - Ok(false) - } - } - } -} - -#[async_trait] -impl StorageIngestHandler for StorageIngestService { - async fn handle_event(&self, event: &StorageIngestEvent) -> anyhow::Result<()> { - let Some(repo_path) = normalize_repo_path(&event.repo_path) else { - warn!( - user_id = %event.workspace_id, - repo_path = event.repo_path.as_str(), - "storage_ingest_invalid_repo_path" - ); - return Ok(()); - }; - let rel_path = Self::relative_path(event.workspace_id, &repo_path); - let payload_previous_repo_path = previous_path_from_payload(event.payload.as_ref()); - - let mut doc_previous_repo_path: Option = None; - let mut doc = self - .document_repo - .get_by_owner_and_path(event.workspace_id, &rel_path) - .await? - .map(ResolvedDocument::from); - - if doc.is_none() { - if let Some(prev_repo) = payload_previous_repo_path.as_deref() { - let prev_rel = Self::relative_path(event.workspace_id, prev_repo); - if let Some(prev_doc) = self - .document_repo - .get_by_owner_and_path(event.workspace_id, &prev_rel) - .await? - .map(ResolvedDocument::from) - { - if let Err(err) = self - .document_repo - .update_repo_path(prev_doc.id, event.workspace_id, &rel_path) - .await - { - warn!( - doc_id = %prev_doc.id, - error = ?err, - "storage_ingest_repo_path_update_failed" - ); - } else { - doc_previous_repo_path = Some(prev_repo.to_string()); - let mut updated = prev_doc.clone(); - updated.path = Some(rel_path.clone()); - doc = Some(updated); - } - } - } - } - - if let Some(doc) = doc { - if doc.is_archived() { - warn!( - doc_id = %doc.id, - repo_path = repo_path, - "storage_ingest_archived_doc_skipped" - ); - return Ok(()); - } - match event.kind { - StorageIngestKind::Upsert => { - if doc.is_folder() { - self.handle_folder_upsert( - &doc, - &rel_path, - &repo_path, - event, - doc_previous_repo_path.as_deref(), - ) - .await?; - } else { - let payload = match self.load_markdown_payload(&rel_path).await { - Ok(payload) => payload, - Err(err) if is_not_found_error(&err) => { - warn!( - doc_id = %doc.id, - repo_path = repo_path, - "storage_ingest_doc_payload_missing" - ); - self.storage_projection - .delete_relative_path(&rel_path) - .await?; - return Ok(()); - } - Err(err) => return Err(err), - }; - self.handle_doc_upsert( - &doc, - &repo_path, - event, - payload, - doc_previous_repo_path.as_deref(), - ) - .await?; - } - } - StorageIngestKind::Delete => { - let permissions = self.permissions_for_event(event).await?; - self.handle_doc_delete(&doc, &repo_path, event, &permissions) - .await?; - } - } - return Ok(()); - } - - let mut attachment_previous_repo_path: Option = None; - let mut attachment = self.files_repo.find_by_storage_path(&rel_path).await?; - - if attachment.is_none() { - if let Some(prev_repo) = payload_previous_repo_path.as_deref() { - let prev_rel = Self::relative_path(event.workspace_id, prev_repo); - if let Some(file) = self.files_repo.find_by_storage_path(&prev_rel).await? { - self.files_repo - .update_storage_path(file.0, &rel_path) - .await?; - attachment_previous_repo_path = Some(prev_repo.to_string()); - attachment = Some(file); - } - } - } - - if let Some((file_id, doc_id, owner_id)) = attachment { - info!( - doc_id = %doc_id, - owner_id = %owner_id, - repo_path = repo_path, - "storage_ingest_attachment_detected" - ); - match event.kind { - StorageIngestKind::Upsert => { - self.handle_attachment_upsert( - file_id, - doc_id, - &rel_path, - &repo_path, - event, - attachment_previous_repo_path.as_deref(), - ) - .await?; - } - StorageIngestKind::Delete => { - self.handle_attachment_delete(file_id, doc_id, &repo_path, event) - .await?; - } - } - return Ok(()); - } - - if event.kind == StorageIngestKind::Upsert && rel_path.ends_with(".md") { - let payload = match self.load_markdown_payload(&rel_path).await { - Ok(payload) => payload, - Err(err) if is_not_found_error(&err) => { - info!( - user_id = %event.workspace_id, - repo_path = repo_path, - "storage_ingest_missing_source_skipped" - ); - self.storage_projection - .delete_relative_path(&rel_path) - .await?; - return Ok(()); - } - Err(err) => return Err(err), - }; - if let Some(doc) = self - .resolve_doc_from_front_matter(event.workspace_id, &payload) - .await? - { - if doc.is_folder() { - warn!( - doc_id = %doc.id, - repo_path = repo_path, - "storage_ingest_folder_event_skipped" - ); - } else if doc.is_archived() { - warn!( - doc_id = %doc.id, - repo_path = repo_path, - "storage_ingest_archived_doc_skipped" - ); - } else { - if !self - .reconcile_repo_path(&doc, event.workspace_id, &rel_path) - .await? - { - warn!( - doc_id = %doc.id, - repo_path = repo_path, - "storage_ingest_repo_path_rejected" - ); - return Ok(()); - } - self.handle_doc_upsert( - &doc, - &repo_path, - event, - payload, - payload_previous_repo_path.as_deref(), - ) - .await?; - } - return Ok(()); - } - } - - if event.kind == StorageIngestKind::Delete { - self.storage_projection - .delete_relative_path(&rel_path) - .await?; - info!( - user_id = %event.workspace_id, - repo_path = repo_path, - backend = event.backend, - "storage_ingest_orphan_deleted" - ); - } else { - warn!( - user_id = %event.workspace_id, - repo_path = repo_path, - backend = event.backend, - "storage_ingest_no_target_found" - ); - } - Ok(()) - } -} - -#[derive(Debug, Clone)] -struct ResolvedDocument { - id: Uuid, - doc_type: String, - path: Option, - archived: bool, -} - -impl ResolvedDocument { - fn new(id: Uuid, doc_type: String, path: Option, archived: bool) -> Self { - Self { - id, - doc_type, - path, - archived, - } - } - - fn is_folder(&self) -> bool { - self.doc_type == "folder" - } - - fn is_archived(&self) -> bool { - self.archived - } -} - -impl From for ResolvedDocument { - fn from(value: DomainDocument) -> Self { - Self::new( - value.id, - value.doc_type, - value.path, - value.archived_at.is_some(), - ) - } -} - -#[derive(Debug, Clone)] -struct MarkdownIngestPayload { - doc_id_hint: Option, - body: String, - content_hash: String, -} - -#[derive(Debug, Deserialize)] -struct MarkdownFrontMatter { - id: Option, -} - -fn parse_markdown_payload(bytes: Vec) -> anyhow::Result { - let content_hash = sha256_hex(&bytes); - // Accept lossy UTF-8 to avoid retry storms on malformed files; non-UTF8 bytes become U+FFFD. - let text = String::from_utf8_lossy(&bytes).to_string(); - let trimmed = text.trim_start_matches('\u{feff}'); - if let Some((front, body)) = split_front_matter(trimmed) { - if let Ok(front_matter) = serde_yaml::from_str::(front) { - if let Some(doc_id) = front_matter.id { - return Ok(MarkdownIngestPayload { - doc_id_hint: Some(doc_id), - body: body.to_string(), - content_hash, - }); - } - } - } - Ok(MarkdownIngestPayload { - doc_id_hint: None, - body: trimmed.to_string(), - content_hash, - }) -} - -fn split_front_matter(input: &str) -> Option<(&str, &str)> { - let Some(after_open) = input - .strip_prefix("---\r\n") - .or_else(|| input.strip_prefix("---\n")) - else { - return None; - }; - if let Some((front_len, body_start)) = find_front_matter_end(after_open) { - let front = &after_open[..front_len]; - let body = &after_open[body_start..]; - return Some((front, body)); - } - None -} - -fn find_front_matter_end(s: &str) -> Option<(usize, usize)> { - let bytes = s.as_bytes(); - let mut idx = 0; - while idx < bytes.len() { - if bytes[idx] == b'\n' { - let after_newline = &s[idx + 1..]; - if after_newline.starts_with("---") { - let mut body_start = idx + 1 + 3; - let mut remainder = &s[body_start..]; - // Skip any trailing newlines so we don't feed extra blank lines - // back into the realtime layer when the projection re-imports. - while remainder.starts_with("\r\n") || remainder.starts_with('\n') { - if remainder.starts_with("\r\n") { - body_start += 2; - let (_, rest) = remainder.split_at(2); - remainder = rest; - } else { - body_start += 1; - let (_, rest) = remainder.split_at(1); - remainder = rest; - } - } - return Some((idx, body_start)); - } - } - idx += 1; - } - None -} - -fn is_not_found_error(err: &anyhow::Error) -> bool { - err.chain().any(|cause| { - cause - .downcast_ref::() - .is_some_and(|io_err| io_err.kind() == io::ErrorKind::NotFound) - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use uuid::Uuid; - - #[test] - fn preserves_body_when_front_matter_has_no_id() { - let markdown = "---\ntitle: Foo\n---\n\nBody".to_string(); - let payload = parse_markdown_payload(markdown.clone().into_bytes()).unwrap(); - assert!(payload.doc_id_hint.is_none()); - assert_eq!(payload.body, markdown); - } - - #[test] - fn extracts_id_when_front_matter_is_valid() { - let doc_id = Uuid::new_v4(); - let markdown = format!("---\nid: {}\n---\n\nHello", doc_id); - let payload = parse_markdown_payload(markdown.into_bytes()).unwrap(); - assert_eq!(payload.doc_id_hint, Some(doc_id)); - assert_eq!(payload.body.trim_start_matches('\n'), "Hello"); - } - - #[test] - fn normalize_repo_path_rejects_traversal() { - assert!(normalize_repo_path("../secret").is_none()); - assert!(normalize_repo_path("foo/../bar").is_none()); - assert!(normalize_repo_path("").is_none()); - } - - #[test] - fn normalize_repo_path_trims_and_standardizes() { - assert_eq!( - normalize_repo_path("//docs//foo.md"), - Some("docs/foo.md".to_string()) - ); - assert_eq!( - normalize_repo_path("notes/./bar.md"), - Some("notes/bar.md".to_string()) - ); - } -} diff --git a/api/src/application/services/storage_reconcile_scheduler.rs b/api/src/application/services/storage_reconcile_scheduler.rs deleted file mode 100644 index d4cd22d2..00000000 --- a/api/src/application/services/storage_reconcile_scheduler.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use crate::application::ports::storage_reconcile_jobs::StorageReconcileJobs; -use crate::application::ports::workspace_repository::WorkspaceRepository; -use tracing::{error, info}; - -pub struct StorageReconcileScheduler { - jobs: Arc, - workspaces: Arc, - interval: Duration, -} - -impl StorageReconcileScheduler { - pub fn new( - jobs: Arc, - workspaces: Arc, - interval: Duration, - ) -> Self { - Self { - jobs, - workspaces, - interval, - } - } - - pub async fn run(self) { - loop { - match self.workspaces.list_all_workspace_ids().await { - Ok(ids) => { - for id in ids { - if let Err(err) = self.jobs.enqueue(id, "full").await { - error!( - error = ?err, - workspace_id = %id, - "storage_reconcile_enqueue_failed" - ); - } else { - info!(workspace_id = %id, "storage_reconcile_job_enqueued"); - } - } - } - Err(err) => error!( - error = ?err, - "storage_reconcile_scheduler_workspace_list_failed" - ), - } - tokio::time::sleep(self.interval).await; - } - } -} diff --git a/api/src/application/services/tags.rs b/api/src/application/services/tags.rs deleted file mode 100644 index 0d209027..00000000 --- a/api/src/application/services/tags.rs +++ /dev/null @@ -1,31 +0,0 @@ -use std::sync::Arc; - -use uuid::Uuid; - -use crate::application::dto::tags::TagItemDto; -use crate::application::ports::tag_repository::TagRepository; -use crate::application::services::errors::ServiceError; -use crate::application::use_cases::tags::list_tags::ListTags; - -pub struct TagService { - repo: Arc, -} - -impl TagService { - pub fn new(repo: Arc) -> Self { - Self { repo } - } - - pub async fn list( - &self, - workspace_id: Uuid, - filter: Option, - ) -> Result, ServiceError> { - let uc = ListTags { - repo: self.repo.as_ref(), - }; - uc.execute(workspace_id, filter) - .await - .map_err(ServiceError::from) - } -} diff --git a/api/src/application/use_cases/documents/archive_document.rs b/api/src/application/use_cases/documents/archive_document.rs deleted file mode 100644 index 31d83a91..00000000 --- a/api/src/application/use_cases/documents/archive_document.rs +++ /dev/null @@ -1,102 +0,0 @@ -use sqlx::{Postgres, Transaction}; -use uuid::Uuid; - -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::realtime_port::RealtimeEngine; -use crate::domain::documents::document::Document as DomainDocument; - -pub struct ArchiveDocument<'a, R, RT> -where - R: DocumentRepository + ?Sized, - RT: RealtimeEngine + ?Sized, -{ - pub repo: &'a R, - pub realtime: &'a RT, -} - -impl<'a, R, RT> ArchiveDocument<'a, R, RT> -where - R: DocumentRepository + ?Sized, - RT: RealtimeEngine + ?Sized, -{ - pub async fn execute( - &self, - workspace_id: Uuid, - doc_id: Uuid, - archived_by: Uuid, - ) -> anyhow::Result> { - let meta = match self.repo.get_meta_for_owner(doc_id, workspace_id).await? { - Some(meta) => meta, - None => return Ok(None), - }; - if meta.archived_at.is_some() { - return Ok(None); - } - - let subtree = self - .repo - .list_owned_subtree_documents(workspace_id, doc_id) - .await?; - for node in &subtree { - if node.doc_type != "folder" { - self.realtime.force_persist(&node.id.to_string()).await?; - } - } - - let doc = self - .repo - .archive_subtree(doc_id, workspace_id, archived_by) - .await?; - - if doc.is_some() { - for node in &subtree { - self.realtime - .set_document_editable(&node.id.to_string(), false) - .await?; - } - } - - Ok(doc) - } - - pub async fn execute_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - doc_id: Uuid, - archived_by: Uuid, - ) -> anyhow::Result> { - let meta = match self.repo.get_meta_for_owner(doc_id, workspace_id).await? { - Some(meta) => meta, - None => return Ok(None), - }; - if meta.archived_at.is_some() { - return Ok(None); - } - - let subtree = self - .repo - .list_owned_subtree_documents(workspace_id, doc_id) - .await?; - for node in &subtree { - if node.doc_type != "folder" { - self.realtime.force_persist(&node.id.to_string()).await?; - } - } - - let doc = self - .repo - .archive_subtree_tx(tx, doc_id, workspace_id, archived_by) - .await?; - - if doc.is_some() { - for node in &subtree { - self.realtime - .set_document_editable(&node.id.to_string(), false) - .await?; - } - } - - Ok(doc) - } -} diff --git a/api/src/application/use_cases/documents/create_document.rs b/api/src/application/use_cases/documents/create_document.rs deleted file mode 100644 index f0a828af..00000000 --- a/api/src/application/use_cases/documents/create_document.rs +++ /dev/null @@ -1,55 +0,0 @@ -use uuid::Uuid; - -use crate::application::ports::document_repository::DocumentRepository; -use crate::domain::documents::document::Document as DomainDocument; -use sqlx::{Postgres, Transaction}; - -pub struct CreateDocument<'a, R: DocumentRepository + ?Sized> { - pub repo: &'a R, -} - -impl<'a, R: DocumentRepository + ?Sized> CreateDocument<'a, R> { - pub async fn execute( - &self, - workspace_id: Uuid, - created_by: Uuid, - title: &str, - parent_id: Option, - doc_type: &str, - created_by_plugin: Option<&str>, - ) -> anyhow::Result { - self.repo - .create_for_user( - workspace_id, - created_by, - title, - parent_id, - doc_type, - created_by_plugin, - ) - .await - } - - pub async fn execute_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - created_by: Uuid, - title: &str, - parent_id: Option, - doc_type: &str, - created_by_plugin: Option<&str>, - ) -> anyhow::Result { - self.repo - .create_for_user_tx( - tx, - workspace_id, - created_by, - title, - parent_id, - doc_type, - created_by_plugin, - ) - .await - } -} diff --git a/api/src/application/use_cases/documents/delete_document.rs b/api/src/application/use_cases/documents/delete_document.rs deleted file mode 100644 index 50e47919..00000000 --- a/api/src/application/use_cases/documents/delete_document.rs +++ /dev/null @@ -1,29 +0,0 @@ -use uuid::Uuid; - -use crate::application::ports::document_repository::DocumentRepository; -use sqlx::{Postgres, Transaction}; - -pub struct DeleteDocument<'a, R> -where - R: DocumentRepository + ?Sized, -{ - pub repo: &'a R, -} - -impl<'a, R> DeleteDocument<'a, R> -where - R: DocumentRepository + ?Sized, -{ - pub async fn execute(&self, id: Uuid, workspace_id: Uuid) -> anyhow::Result> { - self.repo.delete_owned(id, workspace_id).await - } - - pub async fn execute_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result> { - self.repo.delete_owned_tx(tx, id, workspace_id).await - } -} diff --git a/api/src/application/use_cases/documents/get_backlinks.rs b/api/src/application/use_cases/documents/get_backlinks.rs deleted file mode 100644 index 25a73269..00000000 --- a/api/src/application/use_cases/documents/get_backlinks.rs +++ /dev/null @@ -1,18 +0,0 @@ -use uuid::Uuid; - -use crate::application::ports::document_repository::DocumentRepository; -use crate::domain::documents::document::BacklinkInfo; - -pub struct GetBacklinks<'a, R: DocumentRepository + ?Sized> { - pub repo: &'a R, -} - -impl<'a, R: DocumentRepository + ?Sized> GetBacklinks<'a, R> { - pub async fn execute( - &self, - workspace_id: Uuid, - doc_id: Uuid, - ) -> anyhow::Result> { - self.repo.backlinks_for(workspace_id, doc_id).await - } -} diff --git a/api/src/application/use_cases/documents/get_outgoing_links.rs b/api/src/application/use_cases/documents/get_outgoing_links.rs deleted file mode 100644 index ee9e8ca2..00000000 --- a/api/src/application/use_cases/documents/get_outgoing_links.rs +++ /dev/null @@ -1,18 +0,0 @@ -use uuid::Uuid; - -use crate::application::ports::document_repository::DocumentRepository; -use crate::domain::documents::document::OutgoingLink; - -pub struct GetOutgoingLinks<'a, R: DocumentRepository + ?Sized> { - pub repo: &'a R, -} - -impl<'a, R: DocumentRepository + ?Sized> GetOutgoingLinks<'a, R> { - pub async fn execute( - &self, - workspace_id: Uuid, - doc_id: Uuid, - ) -> anyhow::Result> { - self.repo.outgoing_links_for(workspace_id, doc_id).await - } -} diff --git a/api/src/application/use_cases/documents/unarchive_document.rs b/api/src/application/use_cases/documents/unarchive_document.rs deleted file mode 100644 index 0deb9911..00000000 --- a/api/src/application/use_cases/documents/unarchive_document.rs +++ /dev/null @@ -1,97 +0,0 @@ -use sqlx::{Postgres, Transaction}; -use uuid::Uuid; - -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::realtime_port::RealtimeEngine; -use crate::domain::documents::document::Document as DomainDocument; - -pub struct UnarchiveDocument<'a, R, RT> -where - R: DocumentRepository + ?Sized, - RT: RealtimeEngine + ?Sized, -{ - pub repo: &'a R, - pub realtime: &'a RT, -} - -impl<'a, R, RT> UnarchiveDocument<'a, R, RT> -where - R: DocumentRepository + ?Sized, - RT: RealtimeEngine + ?Sized, -{ - pub async fn execute( - &self, - workspace_id: Uuid, - doc_id: Uuid, - ) -> anyhow::Result> { - let meta = match self.repo.get_meta_for_owner(doc_id, workspace_id).await? { - Some(meta) => meta, - None => return Ok(None), - }; - if meta.archived_at.is_none() { - return Ok(None); - } - - let subtree = self - .repo - .list_owned_subtree_documents(workspace_id, doc_id) - .await?; - - let doc = self.repo.unarchive_subtree(doc_id, workspace_id).await?; - - if doc.is_some() { - for node in &subtree { - self.realtime - .set_document_editable(&node.id.to_string(), true) - .await?; - } - for node in &subtree { - if node.doc_type != "folder" { - self.realtime.force_persist(&node.id.to_string()).await?; - } - } - } - - Ok(doc) - } - - pub async fn execute_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - doc_id: Uuid, - ) -> anyhow::Result> { - let meta = match self.repo.get_meta_for_owner(doc_id, workspace_id).await? { - Some(meta) => meta, - None => return Ok(None), - }; - if meta.archived_at.is_none() { - return Ok(None); - } - - let subtree = self - .repo - .list_owned_subtree_documents(workspace_id, doc_id) - .await?; - - let doc = self - .repo - .unarchive_subtree_tx(tx, doc_id, workspace_id) - .await?; - - if doc.is_some() { - for node in &subtree { - self.realtime - .set_document_editable(&node.id.to_string(), true) - .await?; - } - for node in &subtree { - if node.doc_type != "folder" { - self.realtime.force_persist(&node.id.to_string()).await?; - } - } - } - - Ok(doc) - } -} diff --git a/api/src/application/use_cases/documents/update_document.rs b/api/src/application/use_cases/documents/update_document.rs deleted file mode 100644 index b355fa0d..00000000 --- a/api/src/application/use_cases/documents/update_document.rs +++ /dev/null @@ -1,43 +0,0 @@ -use uuid::Uuid; - -use crate::application::ports::document_repository::DocumentRepository; -use crate::domain::documents::document::Document as DomainDocument; -use sqlx::{Postgres, Transaction}; - -pub struct UpdateDocument<'a, R> -where - R: DocumentRepository + ?Sized, -{ - pub repo: &'a R, -} - -impl<'a, R> UpdateDocument<'a, R> -where - R: DocumentRepository + ?Sized, -{ - // parent_id: None => not provided; Some(None) => set null; Some(Some(uuid)) => set value - pub async fn execute( - &self, - id: Uuid, - workspace_id: Uuid, - title: Option, - parent_id: Option>, - ) -> anyhow::Result> { - self.repo - .update_title_and_parent_for_user(id, workspace_id, title, parent_id) - .await - } - - pub async fn execute_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - id: Uuid, - workspace_id: Uuid, - title: Option, - parent_id: Option>, - ) -> anyhow::Result> { - self.repo - .update_title_and_parent_for_user_tx(tx, id, workspace_id, title, parent_id) - .await - } -} diff --git a/api/src/application/use_cases/git/get_config.rs b/api/src/application/use_cases/git/get_config.rs deleted file mode 100644 index c66abcdf..00000000 --- a/api/src/application/use_cases/git/get_config.rs +++ /dev/null @@ -1,25 +0,0 @@ -use crate::application::dto::git::GitConfigDto; -use crate::application::ports::git_repository::GitRepository; -use uuid::Uuid; - -pub struct GetGitConfig<'a, R: GitRepository + ?Sized> { - pub repo: &'a R, -} - -impl<'a, R: GitRepository + ?Sized> GetGitConfig<'a, R> { - pub async fn execute(&self, workspace_id: Uuid) -> anyhow::Result> { - Ok(self.repo.get_config(workspace_id).await?.map( - |(id, repository_url, branch_name, auth_type, auto_sync, created_at, updated_at)| { - GitConfigDto { - id, - repository_url, - branch_name, - auth_type, - auto_sync, - created_at, - updated_at, - } - }, - )) - } -} diff --git a/api/src/application/use_cases/mod.rs b/api/src/application/use_cases/mod.rs deleted file mode 100644 index af322d76..00000000 --- a/api/src/application/use_cases/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -pub mod api_tokens; -pub mod auth; -pub mod documents; -pub mod files; -pub mod git; -pub mod plugins; -pub mod public; -pub mod shares; -pub mod tags; -pub mod user_shortcuts; diff --git a/api/src/application/use_cases/shares/browse_share.rs b/api/src/application/use_cases/shares/browse_share.rs deleted file mode 100644 index eca4d949..00000000 --- a/api/src/application/use_cases/shares/browse_share.rs +++ /dev/null @@ -1,75 +0,0 @@ -use crate::application::dto::shares::{ShareBrowseResponseDto, ShareBrowseTreeItemDto}; -use crate::application::ports::shares_repository::SharesRepository; - -pub struct BrowseShare<'a, R: SharesRepository + ?Sized> { - pub repo: &'a R, -} - -impl<'a, R: SharesRepository + ?Sized> BrowseShare<'a, R> { - pub async fn execute(&self, token: &str) -> anyhow::Result> { - let row = self.repo.resolve_share_by_token(token).await?; - let (share_id, _perm, expires_at, shared_id, shared_type, _workspace_id) = match row { - Some(r) => r, - None => return Ok(None), - }; - if let Some(exp) = expires_at { - if exp < chrono::Utc::now() { - return Ok(None); - } - } - // If token targets a document (not folder), return single node - if shared_type != "folder" { - let mut tree = Vec::new(); - let doc_rows = self.repo.list_subtree_nodes(shared_id).await?; - if let Some((id, title, typ, _parent_id, created_at, updated_at)) = doc_rows - .into_iter() - .find(|(id, _, _, _, _, _)| *id == shared_id) - { - tree.push(ShareBrowseTreeItemDto { - id, - title, - parent_id: None, - r#type: typ, - created_at, - updated_at, - }); - } else { - let fallback_title = self - .repo - .validate_share_token(token) - .await? - .map(|(_, _, _, title)| title) - .unwrap_or_default(); - tree.push(ShareBrowseTreeItemDto { - id: shared_id, - title: fallback_title, - parent_id: None, - r#type: shared_type.clone(), - created_at: chrono::Utc::now(), - updated_at: chrono::Utc::now(), - }); - } - return Ok(Some(ShareBrowseResponseDto { tree })); - } - // Folder: list subtree and filter to materialized shares under this folder share - let rows = self.repo.list_subtree_nodes(shared_id).await?; - let allowed = self.repo.list_materialized_children(share_id).await?; - let tree: Vec = rows - .into_iter() - .filter_map(|(id, title, typ, parent_id, created_at, updated_at)| { - if typ == "document" && !allowed.contains(&id) { - return None; - } - Some(ShareBrowseTreeItemDto { - id, - title, - parent_id, - r#type: typ, - created_at, - updated_at, - }) - }) - .collect(); - Ok(Some(ShareBrowseResponseDto { tree })) - } -} diff --git a/api/src/application/use_cases/shares/validate_share.rs b/api/src/application/use_cases/shares/validate_share.rs deleted file mode 100644 index 0bc33238..00000000 --- a/api/src/application/use_cases/shares/validate_share.rs +++ /dev/null @@ -1,28 +0,0 @@ -use crate::application::dto::shares::ShareDocumentDto; -use crate::application::ports::shares_repository::SharesRepository; - -pub struct ValidateShare<'a, R: SharesRepository + ?Sized> { - pub repo: &'a R, -} - -impl<'a, R: SharesRepository + ?Sized> ValidateShare<'a, R> { - pub async fn execute(&self, token: &str) -> anyhow::Result> { - if let Some((document_id, permission, expires_at, title)) = - self.repo.validate_share_token(token).await? - { - if let Some(exp) = expires_at { - if exp < chrono::Utc::now() { - return Ok(None); - } - } - Ok(Some(ShareDocumentDto { - id: document_id, - title, - permission, - content: None, - })) - } else { - Ok(None) - } - } -} diff --git a/api/src/bin/api.rs b/api/src/bin/api.rs new file mode 100644 index 00000000..fce05034 --- /dev/null +++ b/api/src/bin/api.rs @@ -0,0 +1,4 @@ +#[tokio::main] +async fn main() -> anyhow::Result<()> { + api_app::run().await +} diff --git a/api/src/bin/cli.rs b/api/src/bin/cli.rs new file mode 100644 index 00000000..2f746c4e --- /dev/null +++ b/api/src/bin/cli.rs @@ -0,0 +1,4 @@ +#[tokio::main] +async fn main() -> anyhow::Result<()> { + cli_app::run().await +} diff --git a/api/src/bin/export-openapi.rs b/api/src/bin/export-openapi.rs deleted file mode 100644 index 581716f4..00000000 --- a/api/src/bin/export-openapi.rs +++ /dev/null @@ -1,256 +0,0 @@ -use api::presentation::{ - http::{ - api_tokens, auth, documents, files, git, health, markdown, plugins, public, shares, - shortcuts, tags, workspaces, - }, - ws, -}; -use utoipa::OpenApi; - -#[derive(OpenApi)] -#[openapi( - paths( - auth::register, - auth::login, - auth::oauth_state, - auth::oauth_login, - auth::list_oauth_providers, - auth::refresh_session, - auth::logout, - auth::me, - auth::list_sessions, - auth::revoke_session, - api_tokens::list_api_tokens, - api_tokens::create_api_token, - api_tokens::revoke_api_token, - shortcuts::get_user_shortcuts, - shortcuts::update_user_shortcuts, - auth::delete_account, - ws::axum_ws_entry, - tags::list_tags, - documents::list_documents, - documents::create_document, - documents::get_document, - documents::update_document, - documents::duplicate_document, - documents::delete_document, - documents::get_document_content, - documents::update_document_content, - documents::patch_document_content, - documents::archive_document, - documents::unarchive_document, - documents::download_document, - documents::list_document_snapshots, - documents::get_document_snapshot_diff, - documents::restore_document_snapshot, - documents::download_document_snapshot, - documents::search_documents, - documents::get_backlinks, - documents::get_outgoing_links, - files::upload_file, - files::get_file, - files::get_file_by_name, - shares::create_share, - shares::delete_share, - shares::list_document_shares, - shares::validate_share_token, - shares::browse_share, - shares::list_active_shares, - shares::create_share_mount, - shares::list_share_mounts, - shares::delete_share_mount, - shares::list_applicable_shares, - shares::materialize_folder_share, - public::publish_document, - public::unpublish_document, - public::get_publish_status, - public::list_workspace_public_documents, - public::get_public_by_workspace_and_id, - public::get_public_content_by_workspace_and_id, - git::get_config, - git::create_or_update_config, - git::delete_config, - git::get_status, - git::get_changes, - git::get_history, - git::get_working_diff, - git::get_commit_diff, - git::sync_now, - git::import_repository, - git::pull_repository, - git::start_pull_session, - git::get_pull_session, - git::resolve_pull_session, - git::finalize_pull_session, - git::init_repository, - git::deinit_repository, - git::ignore_document, - git::ignore_folder, - git::get_gitignore_patterns, - git::add_gitignore_patterns, - git::check_path_ignored, - markdown::render_markdown, - markdown::render_markdown_many, - plugins::get_manifest, - plugins::exec_action, - plugins::list_records, - plugins::create_record, - plugins::update_record, - plugins::delete_record, - plugins::get_kv_value, - plugins::put_kv_value, - plugins::install_from_url, - plugins::uninstall, - plugins::sse_updates, - workspaces::list_workspaces, - workspaces::create_workspace, - workspaces::get_workspace_detail, - workspaces::update_workspace, - workspaces::delete_workspace, - workspaces::switch_workspace, - workspaces::list_members, - workspaces::update_member_role, - workspaces::remove_member, - workspaces::get_workspace_permissions, - workspaces::list_roles, - workspaces::create_role, - workspaces::update_role, - workspaces::delete_role, - workspaces::list_invitations, - workspaces::create_invitation, - workspaces::revoke_invitation, - workspaces::accept_invitation, - workspaces::download_workspace_archive, - health::health, - ), - components(schemas( - auth::RegisterRequest, - auth::LoginRequest, - auth::LoginResponse, - auth::OAuthLoginRequest, - auth::OAuthStateResponse, - auth::AuthProvidersResponse, - auth::AuthProviderInfoResponse, - auth::UserResponse, - auth::WorkspaceMembershipResponse, - auth::SessionResponse, - auth::RefreshResponse, - api_tokens::ApiTokenItem, - api_tokens::ApiTokenCreateRequest, - api_tokens::ApiTokenCreateResponse, - shortcuts::UserShortcutResponse, - shortcuts::UpdateUserShortcutRequest, - tags::TagItem, - documents::Document, - documents::DocumentListResponse, - documents::CreateDocumentRequest, - documents::UpdateDocumentRequest, - documents::DuplicateDocumentRequest, - documents::UpdateDocumentContentRequest, - documents::DocumentPatchOperationRequest, - documents::PatchDocumentContentRequest, - documents::SearchResult, - documents::BacklinkInfo, - documents::BacklinksResponse, - documents::OutgoingLink, - documents::OutgoingLinksResponse, - documents::DocumentDownloadBinary, - documents::DocumentArchiveBinary, - documents::DownloadFormat, - documents::DownloadDocumentQuery, - documents::SnapshotSummary, - documents::SnapshotListResponse, - documents::SnapshotDiffKind, - documents::SnapshotDiffSideResponse, - documents::SnapshotDiffResponse, - documents::SnapshotDiffBaseParam, - documents::SnapshotRestoreResponse, - files::UploadFileResponse, - files::UploadFileMultipart, - shares::CreateShareRequest, - shares::CreateShareResponse, - shares::CreateShareMountRequest, - shares::ShareItem, - shares::ShareDocumentResponse, - shares::ShareBrowseTreeItem, - shares::ShareBrowseResponse, - shares::ApplicableShareItem, - shares::ActiveShareItem, - shares::ShareMountItem, - shares::MaterializeResponse, - public::PublishResponse, - public::PublicDocumentSummary, - git::GitConfigResponse, - git::GitRemoteCheckResponse, - git::CreateGitConfigRequest, - git::UpdateGitConfigRequest, - git::GitStatus, - git::GitSyncRequest, - git::GitSyncResponse, - git::GitPullRequest, - git::GitPullResponse, - git::GitImportResponse, - git::GitPullSessionResponse, - git::GitPullResolution, - git::GitPullConflictItem, - git::GitChangeItem, - git::GitChangesResponse, - git::GitCommitItem, - git::GitHistoryResponse, - api::application::dto::diff::TextDiffLineType, - api::application::dto::diff::TextDiffLine, - api::application::dto::diff::TextDiffResult, - git::AddPatternsRequest, - git::CheckIgnoredRequest, - markdown::RenderOptionsPayload, - markdown::PlaceholderItemPayload, - markdown::RenderResponseBody, - markdown::RenderRequest, - markdown::RenderManyRequest, - markdown::RenderManyResponse, - plugins::ManifestItem, - plugins::RecordsResponse, - plugins::CreateRecordBody, - plugins::UpdateRecordBody, - plugins::KvValueResponse, - plugins::KvValueBody, - plugins::ExecBody, - plugins::ExecResultResponse, - plugins::InstallFromUrlBody, - plugins::InstallResponse, - plugins::UninstallBody, - workspaces::WorkspaceResponse, - workspaces::CreateWorkspaceRequest, - workspaces::WorkspaceMemberResponse, - workspaces::UpdateMemberRoleRequest, - workspaces::UpdateWorkspaceRequest, - workspaces::WorkspaceRoleResponse, - workspaces::PermissionOverridePayload, - workspaces::CreateWorkspaceRoleRequest, - workspaces::UpdateWorkspaceRoleRequest, - workspaces::SwitchWorkspaceResponse, - workspaces::WorkspacePermissionsResponse, - workspaces::WorkspaceInvitationResponse, - workspaces::CreateWorkspaceInvitationRequest, - workspaces::DownloadWorkspaceQuery, - health::HealthResp, - )), - tags( - (name = "Auth", description = "Authentication"), - (name = "Documents", description = "Documents management"), - (name = "Files", description = "File management"), - (name = "Sharing", description = "Document sharing"), - (name = "Public Documents", description = "Public pages"), - (name = "Realtime", description = "Yjs WebSocket endpoint (/yjs/:id)"), - (name = "Git", description = "Git integration"), - (name = "Markdown", description = "Markdown rendering"), - (name = "Plugins", description = "Plugins management & data APIs"), - (name = "Health", description = "System health checks") - ) -)] -struct ApiDoc; - -fn main() { - let json = ApiDoc::openapi().to_json().expect("serialize OpenAPI JSON"); - println!("{}", json); -} diff --git a/api/src/bin/refmd.rs b/api/src/bin/refmd.rs deleted file mode 100644 index cbe3ad2e..00000000 --- a/api/src/bin/refmd.rs +++ /dev/null @@ -1,1498 +0,0 @@ -use std::path::PathBuf; -use std::sync::Arc; - -use anyhow::{Context, Result, anyhow, bail, ensure}; -use argon2::{ - Argon2, - password_hash::{PasswordHasher, SaltString}, -}; -use chrono::{DateTime, Utc}; -use clap::{Parser, Subcommand, ValueEnum}; -use password_hash::rand_core::OsRng; -use sqlx::{Row, types::Json}; -use uuid::Uuid; - -use api::application::ports::api_token_repository::ApiTokenRepository; -use api::application::ports::git_rebuild_job_queue::GitRebuildJobQueue; -use api::application::ports::git_storage::GitStorage; -use api::application::ports::git_workspace::GitWorkspacePort; -use api::application::ports::plugin_asset_store::PluginAssetStore; -use api::application::ports::shares_repository::SharesRepository; -use api::application::ports::storage_ingest_queue::{StorageIngestKind, StorageIngestQueue}; -use api::application::ports::storage_reconcile_jobs::StorageReconcileJobs; -use api::application::ports::user_session_repository::UserSessionRepository; -use api::application::services::api_tokens::generate_api_token; -use api::application::services::workspaces::WorkspaceService; -use api::application::use_cases::auth::delete_account::DeleteAccount; -use api::application::use_cases::auth::register::{Register, RegisterRequest}; -use api::bootstrap::config::Config; -use api::domain::workspaces::permissions::PermissionSet; -use api::infrastructure::db; -use api::infrastructure::db::PgPool; -use api::infrastructure::db::repositories::api_token_repository_sqlx::SqlxApiTokenRepository; -use api::infrastructure::db::repositories::document_repository_sqlx::SqlxDocumentRepository; -use api::infrastructure::db::repositories::files_repository_sqlx::SqlxFilesRepository; -use api::infrastructure::db::repositories::plugin_installation_repository_sqlx::SqlxPluginInstallationRepository; -use api::infrastructure::db::repositories::plugin_repository_sqlx::SqlxPluginRepository; -use api::infrastructure::db::repositories::shares_repository_sqlx::SqlxSharesRepository; -use api::infrastructure::db::repositories::user_repository_sqlx::SqlxUserRepository; -use api::infrastructure::db::repositories::user_session_repository_sqlx::SqlxUserSessionRepository; -use api::infrastructure::db::repositories::workspace_repository_sqlx::SqlxWorkspaceRepository; -use api::infrastructure::git::PgGitRebuildJobQueue; -use api::infrastructure::git::storage::{GitStorageDriverConfig, build_git_storage}; -use api::infrastructure::plugins::filesystem_store::{ - FilesystemPluginStore, PluginExecutionLimits, -}; -use api::infrastructure::plugins::s3_store::{S3BackedPluginStore, S3PluginStoreConfig}; -use api::infrastructure::storage::PgStorageIngestQueue; -use api::infrastructure::storage::PgStorageProjectionQueue; -use api::infrastructure::storage::PgStorageReconcileJobs; - -#[derive(Parser)] -#[command(name = "refmd", about = "Admin CLI for managing a refmd node", version)] -struct Cli { - /// Override the database URL (defaults to DATABASE_URL env / config) - #[arg(long)] - database_url: Option, - - #[command(subcommand)] - command: Command, -} - -#[derive(Subcommand)] -enum Command { - /// User lifecycle and session management - Users { - #[command(subcommand)] - command: UserCommand, - }, - /// Queue-level maintenance and enqueue helpers - Jobs { - #[command(subcommand)] - command: JobsCommand, - }, - /// Workspace lifecycle and membership helpers - Workspaces { - #[command(subcommand)] - command: WorkspaceCommand, - }, - /// Git workspace helpers - Git { - #[command(subcommand)] - command: GitCommand, - }, - /// Plugin asset utilities - Plugins { - #[command(subcommand)] - command: PluginCommand, - }, - /// API token management - Tokens { - #[command(subcommand)] - command: TokenCommand, - }, - /// Share management - Shares { - #[command(subcommand)] - command: ShareCommand, - }, -} - -#[derive(Subcommand)] -enum UserCommand { - /// List all users with their default workspace IDs - List, - /// Create a new user and provision their personal workspace - Create { - #[arg(long)] - email: String, - #[arg(long)] - name: String, - #[arg(long)] - password: String, - /// Optional explicit user ID (defaults to a new UUID v4) - #[arg(long)] - user_id: Option, - }, - /// Update a user's password hash (optionally revoking active sessions) - SetPassword { - #[arg(long)] - user_id: Uuid, - #[arg(long)] - password: String, - #[arg(long, default_value_t = false)] - revoke_sessions: bool, - }, - /// Delete a user (runs full account deletion path) - Delete { - #[arg(long)] - user_id: Uuid, - }, - /// List sessions for a user - Sessions { - #[arg(long)] - user_id: Uuid, - }, - /// Revoke all active sessions for a user - RevokeSessions { - #[arg(long)] - user_id: Uuid, - }, -} - -#[derive(Subcommand)] -enum JobsCommand { - /// Storage ingest queue operations - Ingest { - #[command(subcommand)] - command: IngestCommand, - }, - /// Storage projection job operations - Projection { - #[command(subcommand)] - command: ProjectionCommand, - }, - /// Storage reconcile job operations - Reconcile { - #[command(subcommand)] - command: ReconcileCommand, - }, - /// Git rebuild job operations - GitRebuild { - #[command(subcommand)] - command: GitRebuildCommand, - }, -} - -#[derive(Subcommand)] -enum IngestCommand { - /// Print queue depth and age metrics - Stats, - /// Enqueue an ingest event for a workspace path - Enqueue { - #[arg(long)] - workspace_id: Uuid, - #[arg(long)] - user_id: Uuid, - #[arg(long)] - repo_path: String, - #[arg(long, default_value = "fs")] - backend: String, - #[arg(long, value_enum)] - kind: IngestKindArg, - #[arg(long)] - content_hash: Option, - /// Optional actor ID to attribute enqueueing - #[arg(long)] - actor_id: Option, - }, -} - -#[derive(Clone, Copy, ValueEnum, Debug)] -enum IngestKindArg { - Upsert, - Delete, -} - -impl From for StorageIngestKind { - fn from(value: IngestKindArg) -> StorageIngestKind { - match value { - IngestKindArg::Upsert => StorageIngestKind::Upsert, - IngestKindArg::Delete => StorageIngestKind::Delete, - } - } -} - -#[derive(Subcommand)] -enum ProjectionCommand { - /// Print projection queue metrics - Stats, -} - -#[derive(Subcommand)] -enum ReconcileCommand { - /// Print reconcile queue metrics - Stats, - /// Enqueue a reconcile job for a workspace and scope (e.g. "full") - Enqueue { - #[arg(long)] - workspace_id: Uuid, - #[arg(long)] - scope: String, - }, -} - -#[derive(Subcommand)] -enum GitRebuildCommand { - /// Print git rebuild queue metrics - Stats, - /// Enqueue a git rebuild job for a workspace - Enqueue { - #[arg(long)] - workspace_id: Uuid, - #[arg(long)] - actor_id: Option, - }, -} - -#[derive(Subcommand)] -enum WorkspaceCommand { - /// List all workspaces - List, - /// Show members for a workspace - Members { - #[arg(long)] - workspace_id: Uuid, - }, - /// Delete a workspace (cascades documents/files/shares) - Delete { - #[arg(long)] - workspace_id: Uuid, - }, -} - -#[derive(Subcommand)] -enum TokenCommand { - /// List API tokens for a workspace - List { - #[arg(long)] - workspace_id: Uuid, - }, - /// Create a new API token (prints plaintext once) - Create { - #[arg(long)] - workspace_id: Uuid, - #[arg(long)] - owner_id: Uuid, - #[arg(long)] - name: Option, - }, - /// Revoke an API token - Revoke { - #[arg(long)] - workspace_id: Uuid, - #[arg(long)] - token_id: Uuid, - }, -} - -#[derive(Subcommand)] -enum ShareCommand { - /// List shares for a document - List { - #[arg(long)] - workspace_id: Uuid, - #[arg(long)] - document_id: Uuid, - }, - /// Revoke a share token - Revoke { - #[arg(long)] - workspace_id: Uuid, - #[arg(long)] - token: String, - }, -} - -#[derive(Subcommand)] -enum GitCommand { - /// Show git workspace status summary - Status { - #[arg(long)] - workspace_id: Uuid, - }, - /// List dirty changes tracked for a workspace - Changes { - #[arg(long)] - workspace_id: Uuid, - }, - /// Remove git workspace data (DB + storage) - Remove { - #[arg(long)] - workspace_id: Uuid, - }, -} - -#[derive(Subcommand)] -enum PluginCommand { - /// List latest global plugin manifests - ListGlobal, - /// Load a user-scoped plugin manifest - UserManifest { - #[arg(long)] - user_id: Uuid, - #[arg(long)] - plugin_id: String, - #[arg(long)] - version: String, - }, - /// Remove a user's plugin directory for a plugin - RemoveUserDir { - #[arg(long)] - user_id: Uuid, - #[arg(long)] - plugin_id: String, - }, -} - -struct Deps { - pool: PgPool, - user_repo: SqlxUserRepository, - workspace_service: Arc, - ingest_queue: PgStorageIngestQueue, - reconcile_jobs: PgStorageReconcileJobs, - git_rebuild_jobs: PgGitRebuildJobQueue, - session_repo: SqlxUserSessionRepository, - document_repo: SqlxDocumentRepository, - files_repo: SqlxFilesRepository, - plugin_installations: SqlxPluginInstallationRepository, - plugin_repo: SqlxPluginRepository, - api_tokens: SqlxApiTokenRepository, - shares_repo: SqlxSharesRepository, - plugin_assets: Arc, - git_repo: api::infrastructure::db::repositories::git_repository_sqlx::SqlxGitRepository, - storage_jobs: PgStorageProjectionQueue, - git_workspace: Arc, -} - -struct CliGitWorkspace { - pool: PgPool, - git_storage: Arc, -} - -impl CliGitWorkspace { - fn new(pool: PgPool, git_storage: Arc) -> Self { - Self { pool, git_storage } - } - - async fn load_repository_state( - &self, - workspace_id: Uuid, - ) -> anyhow::Result> { - let row = sqlx::query( - "SELECT initialized, default_branch FROM git_repository_state WHERE workspace_id = $1", - ) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| (r.get("initialized"), r.get("default_branch")))) - } - - async fn latest_commit_meta( - &self, - workspace_id: Uuid, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT commit_id, parent_commit_id, message, author_name, author_email, - committed_at, pack_key, file_hash_index - FROM git_commits - WHERE workspace_id = $1 - ORDER BY committed_at DESC - LIMIT 1"#, - ) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - - row.map(|r| row_to_commit_meta(r)).transpose() - } - - async fn fetch_dirty(&self, workspace_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT path, is_text, op, content_hash - FROM git_dirty_files - WHERE workspace_id = $1 - ORDER BY created_at ASC"#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - - let mut out = Vec::new(); - for r in rows { - let path: String = r.get("path"); - let op: String = r.get("op"); - let content_hash: Option = r.try_get("content_hash").ok(); - out.push(DirtyRow { - path, - op, - content_hash, - }); - } - Ok(out) - } -} - -struct DirtyRow { - path: String, - op: String, - content_hash: Option, -} - -#[async_trait::async_trait] -impl GitWorkspacePort for CliGitWorkspace { - async fn ensure_repository( - &self, - _workspace_id: Uuid, - _default_branch: &str, - ) -> anyhow::Result<()> { - bail!("ensure_repository not supported in refmd CLI"); - } - - async fn remove_repository(&self, workspace_id: Uuid) -> anyhow::Result<()> { - let mut tx = self.pool.begin().await?; - sqlx::query("DELETE FROM git_dirty_files WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&mut *tx) - .await?; - sqlx::query("DELETE FROM git_commits WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&mut *tx) - .await?; - sqlx::query( - "UPDATE git_repository_state SET initialized = false, updated_at = now() WHERE workspace_id = $1", - ) - .bind(workspace_id) - .execute(&mut *tx) - .await?; - tx.commit().await?; - self.git_storage.delete_all(workspace_id).await?; - Ok(()) - } - - async fn status( - &self, - workspace_id: Uuid, - ) -> anyhow::Result { - let state = self.load_repository_state(workspace_id).await?; - let Some((initialized, branch)) = state else { - return Ok(api::application::dto::git::GitWorkspaceStatus { - repository_initialized: false, - current_branch: None, - uncommitted_changes: 0, - untracked_files: 0, - }); - }; - if !initialized { - return Ok(api::application::dto::git::GitWorkspaceStatus { - repository_initialized: false, - current_branch: Some(branch), - uncommitted_changes: 0, - untracked_files: 0, - }); - } - - let latest = self.latest_commit_meta(workspace_id).await?; - let previous_index: std::collections::HashMap = latest - .as_ref() - .map(|c| c.file_hash_index.clone()) - .unwrap_or_default(); - - let dirty = self.fetch_dirty(workspace_id).await?; - let mut added: u32 = 0; - let mut modified: u32 = 0; - let mut deleted: u32 = 0; - - for d in dirty.iter() { - match d.op.as_str() { - "upsert" => { - if let Some(prev_hash) = previous_index.get(&d.path) { - match d.content_hash.as_ref() { - Some(h) if h == prev_hash => {} - _ => modified += 1, - } - } else { - added += 1; - } - } - "delete" => { - deleted += 1; - } - _ => {} - } - } - - Ok(api::application::dto::git::GitWorkspaceStatus { - repository_initialized: true, - current_branch: Some(branch), - uncommitted_changes: modified + deleted, - untracked_files: added, - }) - } - - async fn list_changes( - &self, - workspace_id: Uuid, - ) -> anyhow::Result> { - if let Some((initialized, _)) = self.load_repository_state(workspace_id).await? { - if !initialized { - return Ok(Vec::new()); - } - } else { - return Ok(Vec::new()); - } - - let latest = self.latest_commit_meta(workspace_id).await?; - let previous_index: std::collections::HashMap = latest - .as_ref() - .map(|c| c.file_hash_index.clone()) - .unwrap_or_default(); - let dirty = self.fetch_dirty(workspace_id).await?; - - let mut out = Vec::new(); - for d in dirty { - let status = match d.op.as_str() { - "delete" => "deleted", - "upsert" => { - if previous_index.contains_key(&d.path) { - "modified" - } else { - "added" - } - } - _ => "unknown", - }; - out.push(api::application::dto::git::GitChangeItem { - path: d.path, - status: status.to_string(), - }); - } - Ok(out) - } - - async fn working_diff( - &self, - _workspace_id: Uuid, - ) -> anyhow::Result> { - bail!("working_diff not supported in refmd CLI"); - } - - async fn commit_diff( - &self, - _workspace_id: Uuid, - _from: &str, - _to: &str, - ) -> anyhow::Result> { - bail!("commit_diff not supported in refmd CLI"); - } - - async fn history( - &self, - _workspace_id: Uuid, - ) -> anyhow::Result> { - bail!("history not supported in refmd CLI"); - } - - async fn sync( - &self, - _workspace_id: Uuid, - _req: &api::application::dto::git::GitSyncRequestDto, - _cfg: Option<&api::application::ports::git_repository::UserGitCfg>, - ) -> anyhow::Result { - bail!("sync not supported in refmd CLI"); - } - - async fn pull( - &self, - _workspace_id: Uuid, - _actor_id: Uuid, - _req: &api::application::dto::git::GitPullRequestDto, - _cfg: &api::application::ports::git_repository::UserGitCfg, - ) -> anyhow::Result { - bail!("pull not supported in refmd CLI"); - } - - async fn import_repository( - &self, - _workspace_id: Uuid, - _actor_id: Uuid, - _cfg: &api::application::ports::git_repository::UserGitCfg, - ) -> anyhow::Result { - bail!("import not supported in refmd CLI"); - } - - async fn head_commit(&self, workspace_id: Uuid) -> anyhow::Result>> { - Ok(self - .latest_commit_meta(workspace_id) - .await? - .map(|m| m.commit_id)) - } - - async fn remote_head( - &self, - _workspace_id: Uuid, - _cfg: &api::application::ports::git_repository::UserGitCfg, - ) -> anyhow::Result>> { - Ok(None) - } - - async fn has_pending_changes(&self, workspace_id: Uuid) -> anyhow::Result { - let dirty_rows = self.fetch_dirty(workspace_id).await?; - Ok(!dirty_rows.is_empty()) - } - - async fn drift_since_commit( - &self, - workspace_id: Uuid, - base_commit: &[u8], - ) -> anyhow::Result { - // CLI helper: fallback to dirty check when full state comparison is not available. - if self.has_pending_changes(workspace_id).await? { - return Ok(true); - } - // If the base commit is not the latest, consider it stale. - let latest = self.latest_commit_meta(workspace_id).await?; - if let Some(meta) = latest { - if meta.commit_id.as_slice() != base_commit { - return Ok(true); - } - } - Ok(false) - } - - async fn check_remote( - &self, - _workspace_id: Uuid, - _cfg: &api::application::ports::git_repository::UserGitCfg, - ) -> anyhow::Result { - Ok(api::application::dto::git::GitRemoteCheckDto { - ok: false, - message: "remote check not supported in CLI".to_string(), - reason: Some("unsupported".to_string()), - }) - } -} - -fn row_to_commit_meta( - row: sqlx::postgres::PgRow, -) -> anyhow::Result { - let commit_id: Vec = row.get("commit_id"); - let parent_commit_id: Option> = row.try_get("parent_commit_id").ok(); - let message: Option = row.try_get("message").ok(); - let author_name: Option = row.try_get("author_name").ok(); - let author_email: Option = row.try_get("author_email").ok(); - let committed_at: DateTime = row.get("committed_at"); - let pack_key: String = row.get("pack_key"); - let file_hash_index: Json> = - row.get("file_hash_index"); - - Ok(api::application::ports::git_storage::CommitMeta { - commit_id, - parent_commit_id, - message, - author_name, - author_email, - committed_at, - pack_key, - file_hash_index: file_hash_index.0, - }) -} - -#[tokio::main] -async fn main() -> Result<()> { - dotenvy::dotenv().ok(); - let cli = Cli::parse(); - - let cfg = Config::from_env()?; - let database_url = cli.database_url.unwrap_or(cfg.database_url.clone()); - - let pool = db::connect_pool(&database_url) - .await - .context("failed to connect to database")?; - - let user_repo = SqlxUserRepository::new(pool.clone()); - let workspace_repo = SqlxWorkspaceRepository::new(pool.clone()); - let workspace_service = Arc::new(WorkspaceService::new(Arc::new(workspace_repo))); - let ingest_queue = PgStorageIngestQueue::new(pool.clone()); - let storage_jobs = PgStorageProjectionQueue::new(pool.clone()); - let reconcile_jobs = PgStorageReconcileJobs::new(pool.clone()); - let git_rebuild_jobs = PgGitRebuildJobQueue::new(pool.clone()); - let session_repo = SqlxUserSessionRepository::new(pool.clone()); - let document_repo = SqlxDocumentRepository::new(pool.clone()); - let files_repo = SqlxFilesRepository::new(pool.clone()); - let plugin_installations = SqlxPluginInstallationRepository::new(pool.clone()); - let plugin_repo = SqlxPluginRepository::new(pool.clone()); - let api_tokens = SqlxApiTokenRepository::new(pool.clone()); - let shares_repo = SqlxSharesRepository::new(pool.clone()); - let plugin_limits = { - let timeout = if cfg.plugin_timeout_secs == 0 { - None - } else { - Some(std::time::Duration::from_secs(cfg.plugin_timeout_secs)) - }; - let memory_pages_raw = cfg.plugin_memory_max_mb.saturating_mul(16); - let memory_max_pages = if memory_pages_raw == 0 { - None - } else { - Some(memory_pages_raw.min(u32::MAX as u64) as u32) - }; - let fuel_limit = cfg - .plugin_fuel_limit - .and_then(|limit| if limit == 0 { None } else { Some(limit) }); - PluginExecutionLimits::new(timeout, memory_max_pages, fuel_limit) - }; - let plugin_assets: Arc = match cfg.storage_backend { - api::bootstrap::config::StorageBackend::Filesystem => { - Arc::new(FilesystemPluginStore::new(&cfg.plugin_dir, plugin_limits)?) - } - api::bootstrap::config::StorageBackend::S3 => { - let s3_cfg = S3PluginStoreConfig { - plugin_dir: cfg.plugin_dir.clone(), - bucket: cfg - .s3_bucket - .clone() - .context("S3_BUCKET must be configured when using S3 storage backend")?, - region: cfg.s3_region.clone(), - endpoint: cfg.s3_endpoint.clone(), - access_key: cfg.s3_access_key.clone(), - secret_key: cfg.s3_secret_key.clone(), - use_path_style: cfg.s3_use_path_style, - }; - Arc::new(S3BackedPluginStore::new(&s3_cfg, plugin_limits).await?) - } - }; - let git_repo = - api::infrastructure::db::repositories::git_repository_sqlx::SqlxGitRepository::new( - pool.clone(), - cfg.encryption_key.clone(), - ); - let git_storage_cfg = match cfg.storage_backend { - api::bootstrap::config::StorageBackend::Filesystem => GitStorageDriverConfig::Filesystem { - root: PathBuf::from(cfg.storage_root.clone()), - }, - api::bootstrap::config::StorageBackend::S3 => { - let s3_settings = api::infrastructure::git::storage::S3GitStorageConfig { - storage_root_prefix: cfg.storage_root.clone(), - bucket: cfg - .s3_bucket - .clone() - .context("S3_BUCKET must be configured when using S3 storage backend")?, - region: cfg.s3_region.clone(), - endpoint: cfg.s3_endpoint.clone(), - access_key: cfg.s3_access_key.clone(), - secret_key: cfg.s3_secret_key.clone(), - use_path_style: cfg.s3_use_path_style, - }; - GitStorageDriverConfig::S3(s3_settings) - } - }; - let git_storage = build_git_storage(git_storage_cfg).await?; - let git_workspace = Arc::new(CliGitWorkspace::new(pool.clone(), git_storage.clone())); - - let deps = Deps { - pool, - user_repo, - workspace_service, - ingest_queue, - storage_jobs, - reconcile_jobs, - git_rebuild_jobs, - session_repo, - document_repo, - files_repo, - plugin_installations, - plugin_repo, - api_tokens, - shares_repo, - plugin_assets, - git_repo, - git_workspace, - }; - - match cli.command { - Command::Users { command } => handle_users(&deps, command).await?, - Command::Jobs { command } => handle_jobs(&deps, command).await?, - Command::Workspaces { command } => handle_workspaces(&deps, command).await?, - Command::Git { command } => handle_git(&deps, command).await?, - Command::Plugins { command } => handle_plugins(&deps, command).await?, - Command::Tokens { command } => handle_tokens(&deps, command).await?, - Command::Shares { command } => handle_shares(&deps, command).await?, - } - - Ok(()) -} - -async fn handle_users(deps: &Deps, cmd: UserCommand) -> Result<()> { - match cmd { - UserCommand::List => list_users(&deps.pool).await, - UserCommand::Create { - email, - name, - password, - user_id, - } => { - create_user( - &deps.user_repo, - deps.workspace_service.as_ref(), - email, - name, - password, - user_id, - ) - .await - } - UserCommand::SetPassword { - user_id, - password, - revoke_sessions, - } => { - set_password( - &deps.pool, - &deps.session_repo, - user_id, - password, - revoke_sessions, - ) - .await - } - UserCommand::Delete { user_id } => delete_user(deps, user_id).await, - UserCommand::Sessions { user_id } => list_sessions(&deps.session_repo, user_id).await, - UserCommand::RevokeSessions { user_id } => { - deps.session_repo.revoke_all_for_user(user_id).await?; - println!("revoked sessions for user {user_id}"); - Ok(()) - } - } -} - -async fn handle_jobs(deps: &Deps, cmd: JobsCommand) -> Result<()> { - match cmd { - JobsCommand::Ingest { command } => match command { - IngestCommand::Stats => print_ingest_stats(&deps.ingest_queue).await, - IngestCommand::Enqueue { - workspace_id, - user_id, - repo_path, - backend, - kind, - content_hash, - actor_id, - } => { - enqueue_ingest( - &deps.ingest_queue, - workspace_id, - user_id, - actor_id, - repo_path, - backend, - kind, - content_hash, - ) - .await - } - }, - JobsCommand::Projection { command } => match command { - ProjectionCommand::Stats => print_projection_stats(&deps.pool).await, - }, - JobsCommand::Reconcile { command } => match command { - ReconcileCommand::Stats => print_reconcile_stats(&deps.pool).await, - ReconcileCommand::Enqueue { - workspace_id, - scope, - } => { - deps.reconcile_jobs - .enqueue(workspace_id, scope.trim()) - .await?; - println!( - "enqueued reconcile job workspace={workspace_id} scope={}", - scope.trim() - ); - Ok(()) - } - }, - JobsCommand::GitRebuild { command } => match command { - GitRebuildCommand::Stats => print_git_rebuild_stats(&deps.pool).await, - GitRebuildCommand::Enqueue { - workspace_id, - actor_id, - } => { - let permissions = PermissionSet::all().to_vec(); - deps.git_rebuild_jobs - .enqueue(workspace_id, actor_id, &permissions) - .await?; - println!( - "enqueued git rebuild workspace={} actor_id={:?}", - workspace_id, actor_id - ); - Ok(()) - } - }, - } -} - -async fn handle_workspaces(deps: &Deps, cmd: WorkspaceCommand) -> Result<()> { - match cmd { - WorkspaceCommand::List => list_workspaces(&deps.pool).await, - WorkspaceCommand::Members { workspace_id } => { - list_workspace_members(&deps.pool, workspace_id).await - } - WorkspaceCommand::Delete { workspace_id } => { - match deps - .workspace_service - .delete_workspace(workspace_id) - .await? - { - true => println!("deleted workspace {}", workspace_id), - false => println!("workspace {} not found", workspace_id), - } - Ok(()) - } - } -} - -async fn handle_tokens(deps: &Deps, cmd: TokenCommand) -> Result<()> { - match cmd { - TokenCommand::List { workspace_id } => list_tokens(&deps.api_tokens, workspace_id).await, - TokenCommand::Create { - workspace_id, - owner_id, - name, - } => create_token(&deps.api_tokens, workspace_id, owner_id, name.as_deref()).await, - TokenCommand::Revoke { - workspace_id, - token_id, - } => { - let revoked = deps.api_tokens.revoke(workspace_id, token_id).await?; - if revoked { - println!("revoked token {}", token_id); - } else { - println!("token {} not found or already revoked", token_id); - } - Ok(()) - } - } -} - -async fn handle_shares(deps: &Deps, cmd: ShareCommand) -> Result<()> { - match cmd { - ShareCommand::List { - workspace_id, - document_id, - } => list_shares(&deps.shares_repo, workspace_id, document_id).await, - ShareCommand::Revoke { - workspace_id, - token, - } => { - let removed = deps - .shares_repo - .delete_share(workspace_id, token.trim()) - .await?; - if removed { - println!("revoked share token {}", token.trim()); - } else { - println!("share token {} not found", token.trim()); - } - Ok(()) - } - } -} - -async fn handle_git(deps: &Deps, cmd: GitCommand) -> Result<()> { - match cmd { - GitCommand::Status { workspace_id } => { - let status = deps.git_workspace.status(workspace_id).await?; - println!( - "initialized={} branch={:?} uncommitted_changes={} untracked_files={}", - status.repository_initialized, - status.current_branch, - status.uncommitted_changes, - status.untracked_files - ); - Ok(()) - } - GitCommand::Changes { workspace_id } => { - let changes = deps.git_workspace.list_changes(workspace_id).await?; - println!("{} change(s)", changes.len()); - for c in changes { - println!("{} {}", c.status, c.path); - } - Ok(()) - } - GitCommand::Remove { workspace_id } => { - deps.git_workspace.remove_repository(workspace_id).await?; - println!("removed git workspace {}", workspace_id); - Ok(()) - } - } -} - -async fn handle_plugins(deps: &Deps, cmd: PluginCommand) -> Result<()> { - match cmd { - PluginCommand::ListGlobal => { - let manifests = deps.plugin_assets.list_latest_global_manifests().await?; - println!("{} global plugin(s)", manifests.len()); - for (plugin_id, version, manifest) in manifests { - println!( - "{}@{} manifest={}", - plugin_id, - version, - serde_json::to_string(&manifest)? - ); - } - Ok(()) - } - PluginCommand::UserManifest { - user_id, - plugin_id, - version, - } => { - match deps - .plugin_assets - .load_user_manifest(&user_id, &plugin_id, &version) - .await? - { - Some(manifest) => { - println!( - "manifest for {} user {}:\n{}", - plugin_id, - user_id, - serde_json::to_string_pretty(&manifest)? - ); - } - None => println!( - "manifest not found for plugin={} user={} version={}", - plugin_id, user_id, version - ), - } - Ok(()) - } - PluginCommand::RemoveUserDir { user_id, plugin_id } => { - deps.plugin_assets - .remove_user_plugin_dir(&user_id, &plugin_id) - .await?; - println!( - "removed plugin data for user {} plugin {}", - user_id, plugin_id - ); - Ok(()) - } - } -} - -async fn list_users(pool: &PgPool) -> Result<()> { - let rows = sqlx::query( - r#"SELECT id, email, name, default_workspace_id, created_at - FROM users - ORDER BY created_at"#, - ) - .fetch_all(pool) - .await?; - - println!("{} user(s)", rows.len()); - for row in rows { - let id: Uuid = row.get("id"); - let email: String = row.get("email"); - let name: String = row.get("name"); - let workspace_id: Uuid = row.get("default_workspace_id"); - let created_at: DateTime = row.get("created_at"); - println!( - "{id} | {email} | {name} | default_ws={workspace_id} | created_at={}", - created_at.to_rfc3339() - ); - } - Ok(()) -} - -async fn list_sessions(repo: &SqlxUserSessionRepository, user_id: Uuid) -> Result<()> { - let sessions = repo.list_for_user(user_id).await?; - println!("{} session(s) for user {}", sessions.len(), user_id); - for s in sessions { - println!( - "{} | workspace={} | remember={} | last_seen={} | created_at={} | revoked_at={}", - s.id, - s.workspace_id, - s.remember_me, - s.last_seen_at.to_rfc3339(), - s.created_at.to_rfc3339(), - s.revoked_at - .map(|t| t.to_rfc3339()) - .unwrap_or_else(|| "-".to_string()) - ); - } - Ok(()) -} - -async fn create_user( - user_repo: &SqlxUserRepository, - workspace_service: &WorkspaceService, - email: String, - name: String, - password: String, - explicit_user_id: Option, -) -> Result<()> { - let normalized_email = email.trim(); - ensure!(!normalized_email.is_empty(), "email must not be empty"); - ensure!(!password.trim().is_empty(), "password must not be empty"); - - let user_id = explicit_user_id.unwrap_or_else(Uuid::new_v4); - workspace_service - .create_personal_workspace_shell(user_id, name.trim()) - .await?; - - let register = Register { repo: user_repo }; - let req = RegisterRequest { - id: user_id, - email: normalized_email.to_string(), - name: name.trim().to_string(), - password, - default_workspace_id: user_id, - }; - - let user = match register.execute(&req).await { - Ok(user) => user, - Err(err) => { - let _ = workspace_service.delete_workspace(user_id).await; - return Err(err.context("failed to create user")); - } - }; - - workspace_service - .ensure_owner_membership(user_id, user_id) - .await?; - - println!( - "created user id={} email={} default_workspace={}", - user.id, user.email, user_id - ); - Ok(()) -} - -async fn delete_user(deps: &Deps, user_id: Uuid) -> Result<()> { - let uc = DeleteAccount { - user_repo: &deps.user_repo, - document_repo: &deps.document_repo, - plugin_installations: &deps.plugin_installations, - plugin_repo: &deps.plugin_repo, - plugin_assets: deps.plugin_assets.clone(), - git_repo: &deps.git_repo, - git_workspace: deps.git_workspace.as_ref(), - storage_jobs: &deps.storage_jobs, - files_repo: &deps.files_repo, - }; - uc.execute(user_id).await?; - let _ = deps.workspace_service.delete_workspace(user_id).await?; - println!("deleted user {}", user_id); - Ok(()) -} - -async fn set_password( - pool: &PgPool, - session_repo: &SqlxUserSessionRepository, - user_id: Uuid, - password: String, - revoke_sessions: bool, -) -> Result<()> { - ensure!(!password.trim().is_empty(), "password must not be empty"); - - let salt = SaltString::generate(&mut OsRng); - let hash = Argon2::default() - .hash_password(password.as_bytes(), &salt) - .map_err(|e| anyhow!(e.to_string()))? - .to_string(); - - let res = sqlx::query("UPDATE users SET password_hash = $2, updated_at = now() WHERE id = $1") - .bind(user_id) - .bind(hash) - .execute(pool) - .await?; - - if res.rows_affected() == 0 { - bail!("user not found"); - } - - if revoke_sessions { - session_repo.revoke_all_for_user(user_id).await?; - println!("password updated and sessions revoked for user {user_id}"); - } else { - println!("password updated for user {user_id}"); - } - - Ok(()) -} - -async fn list_workspaces(pool: &PgPool) -> Result<()> { - let rows = sqlx::query( - r#"SELECT id, name, slug, is_personal, created_at - FROM workspaces - ORDER BY created_at"#, - ) - .fetch_all(pool) - .await?; - println!("{} workspace(s)", rows.len()); - for row in rows { - let id: Uuid = row.get("id"); - let name: String = row.get("name"); - let slug: String = row.get("slug"); - let is_personal: bool = row.get("is_personal"); - let created_at: DateTime = row.get("created_at"); - println!( - "{} | {} | slug={} | personal={} | created_at={}", - id, - name, - slug, - is_personal, - created_at.to_rfc3339() - ); - } - Ok(()) -} - -async fn list_workspace_members(pool: &PgPool, workspace_id: Uuid) -> Result<()> { - let rows = sqlx::query( - r#"SELECT m.user_id, u.email, u.name, m.role_kind, m.system_role, m.custom_role_id, m.is_default, m.joined_at - FROM workspace_members m - JOIN users u ON u.id = m.user_id - WHERE m.workspace_id = $1 - ORDER BY m.joined_at"#, - ) - .bind(workspace_id) - .fetch_all(pool) - .await?; - println!("{} member(s) for workspace {}", rows.len(), workspace_id); - for row in rows { - let user_id: Uuid = row.get("user_id"); - let email: String = row.get("email"); - let name: String = row.get("name"); - let role_kind: String = row.get("role_kind"); - let system_role: Option = row.try_get("system_role").ok(); - let custom_role_id: Option = row.try_get("custom_role_id").ok(); - let is_default: bool = row.get("is_default"); - let joined_at: DateTime = row.get("joined_at"); - println!( - "{} | {} | {} | role_kind={} system_role={:?} custom_role_id={:?} default={} joined_at={}", - user_id, - email, - name, - role_kind, - system_role, - custom_role_id, - is_default, - joined_at.to_rfc3339() - ); - } - Ok(()) -} - -async fn list_tokens(repo: &SqlxApiTokenRepository, workspace_id: Uuid) -> Result<()> { - let tokens = repo.list_active(workspace_id).await?; - println!("{} token(s) in workspace {}", tokens.len(), workspace_id); - for t in tokens { - println!( - "{} | name={} | owner={} | created_at={} | last_used={:?} | revoked={:?}", - t.id, - t.name, - t.owner_id, - t.created_at.to_rfc3339(), - t.last_used_at.map(|d| d.to_rfc3339()), - t.revoked_at.map(|d| d.to_rfc3339()) - ); - } - Ok(()) -} - -async fn create_token( - repo: &SqlxApiTokenRepository, - workspace_id: Uuid, - owner_id: Uuid, - name: Option<&str>, -) -> Result<()> { - let generated = generate_api_token()?; - let stored = repo - .create( - workspace_id, - owner_id, - name.unwrap_or("cli-token"), - &generated.token_hash, - &generated.token_digest, - ) - .await?; - println!("created token {} name={}", stored.id, stored.name); - println!("plaintext={}", generated.plaintext); - println!("digest={}", generated.token_digest); - Ok(()) -} - -async fn list_shares( - repo: &SqlxSharesRepository, - workspace_id: Uuid, - document_id: Uuid, -) -> Result<()> { - let shares = repo.list_document_shares(workspace_id, document_id).await?; - println!( - "{} share(s) for document {} in workspace {}", - shares.len(), - document_id, - workspace_id - ); - for s in shares { - println!( - "{} | token={} | perm={} | expires_at={:?} | parent_share_id={:?} | created_at={}", - s.id, - s.token, - s.permission, - s.expires_at.map(|d| d.to_rfc3339()), - s.parent_share_id, - s.created_at.to_rfc3339() - ); - } - Ok(()) -} - -async fn print_ingest_stats(queue: &PgStorageIngestQueue) -> Result<()> { - let stats = queue.stats().await?; - println!("storage_ingest.pending={}", stats.pending); - println!("storage_ingest.locked={}", stats.locked); - println!("storage_ingest.distinct_users={}", stats.distinct_users); - match stats.oldest_created_at { - Some(ts) => println!( - "storage_ingest.oldest_pending_age_secs={}", - (Utc::now() - ts).num_seconds() - ), - None => println!("storage_ingest.oldest_pending_age_secs=-"), - } - Ok(()) -} - -async fn enqueue_ingest( - queue: &PgStorageIngestQueue, - workspace_id: Uuid, - user_id: Uuid, - actor_id: Option, - repo_path: String, - backend: String, - kind: IngestKindArg, - content_hash: Option, -) -> Result<()> { - let permissions = PermissionSet::all().to_vec(); - queue - .enqueue_event( - workspace_id, - user_id, - actor_id.or(Some(user_id)), - repo_path.trim(), - backend.trim(), - kind.into(), - content_hash.as_deref(), - None, - &permissions, - ) - .await?; - - println!( - "enqueued ingest workspace={} user={} repo_path={} backend={} kind={:?}", - workspace_id, - user_id, - repo_path.trim(), - backend.trim(), - kind - ); - Ok(()) -} - -async fn print_projection_stats(pool: &PgPool) -> Result<()> { - let row = sqlx::query( - r#"SELECT - COUNT(*) FILTER (WHERE locked_at IS NULL) AS pending, - COUNT(*) FILTER (WHERE locked_at IS NOT NULL) AS locked, - COUNT(*) FILTER (WHERE pending_retry) AS retrying, - COUNT(*) AS total, - MIN(created_at) FILTER (WHERE locked_at IS NULL) AS oldest_created_at - FROM storage_projection_jobs"#, - ) - .fetch_one(pool) - .await?; - - let pending: i64 = row.try_get("pending").unwrap_or(0); - let locked: i64 = row.try_get("locked").unwrap_or(0); - let retrying: i64 = row.try_get("retrying").unwrap_or(0); - let total: i64 = row.try_get("total").unwrap_or(0); - let oldest_created_at: Option> = row.try_get("oldest_created_at").ok(); - - println!("storage_projection.total={total}"); - println!("storage_projection.pending={pending}"); - println!("storage_projection.locked={locked}"); - println!("storage_projection.retrying={retrying}"); - match oldest_created_at { - Some(ts) => println!( - "storage_projection.oldest_pending_age_secs={}", - (Utc::now() - ts).num_seconds() - ), - None => println!("storage_projection.oldest_pending_age_secs=-"), - } - Ok(()) -} - -async fn print_reconcile_stats(pool: &PgPool) -> Result<()> { - let row = sqlx::query( - r#"SELECT - COUNT(*) FILTER (WHERE locked_at IS NULL) AS pending, - COUNT(*) FILTER (WHERE locked_at IS NOT NULL) AS locked, - COUNT(*) FILTER (WHERE pending_retry) AS retrying, - COUNT(*) AS total, - MIN(created_at) FILTER (WHERE locked_at IS NULL) AS oldest_created_at - FROM storage_reconcile_jobs"#, - ) - .fetch_one(pool) - .await?; - - let pending: i64 = row.try_get("pending").unwrap_or(0); - let locked: i64 = row.try_get("locked").unwrap_or(0); - let retrying: i64 = row.try_get("retrying").unwrap_or(0); - let total: i64 = row.try_get("total").unwrap_or(0); - let oldest_created_at: Option> = row.try_get("oldest_created_at").ok(); - - println!("storage_reconcile.total={total}"); - println!("storage_reconcile.pending={pending}"); - println!("storage_reconcile.locked={locked}"); - println!("storage_reconcile.retrying={retrying}"); - match oldest_created_at { - Some(ts) => println!( - "storage_reconcile.oldest_pending_age_secs={}", - (Utc::now() - ts).num_seconds() - ), - None => println!("storage_reconcile.oldest_pending_age_secs=-"), - } - Ok(()) -} - -async fn print_git_rebuild_stats(pool: &PgPool) -> Result<()> { - let row = sqlx::query( - r#"SELECT - COUNT(*) FILTER (WHERE locked_at IS NULL) AS pending, - COUNT(*) FILTER (WHERE locked_at IS NOT NULL) AS locked, - COUNT(*) FILTER (WHERE pending_retry) AS retrying, - COUNT(*) AS total, - MIN(updated_at) FILTER (WHERE locked_at IS NOT NULL) AS oldest_locked_at, - MIN(created_at) FILTER (WHERE locked_at IS NULL) AS oldest_pending_created - FROM git_rebuild_jobs"#, - ) - .fetch_one(pool) - .await?; - - let pending: i64 = row.try_get("pending").unwrap_or(0); - let locked: i64 = row.try_get("locked").unwrap_or(0); - let retrying: i64 = row.try_get("retrying").unwrap_or(0); - let total: i64 = row.try_get("total").unwrap_or(0); - let oldest_locked_at: Option> = row.try_get("oldest_locked_at").ok(); - let oldest_pending: Option> = row.try_get("oldest_pending_created").ok(); - - println!("git_rebuild.total={total}"); - println!("git_rebuild.pending={pending}"); - println!("git_rebuild.locked={locked}"); - println!("git_rebuild.retrying={retrying}"); - match oldest_pending { - Some(ts) => println!( - "git_rebuild.oldest_pending_age_secs={}", - (Utc::now() - ts).num_seconds() - ), - None => println!("git_rebuild.oldest_pending_age_secs=-"), - } - match oldest_locked_at { - Some(ts) => println!( - "git_rebuild.oldest_locked_age_secs={}", - (Utc::now() - ts).num_seconds() - ), - None => println!("git_rebuild.oldest_locked_age_secs=-"), - } - Ok(()) -} diff --git a/api/src/bin/storage_ingest.rs b/api/src/bin/storage_ingest.rs deleted file mode 100644 index b9389ac5..00000000 --- a/api/src/bin/storage_ingest.rs +++ /dev/null @@ -1,114 +0,0 @@ -use anyhow::Context; -use clap::{Parser, Subcommand, ValueEnum}; -use uuid::Uuid; - -use api::application::ports::storage_ingest_queue::{StorageIngestKind, StorageIngestQueue}; -use api::bootstrap::config::Config; -use api::domain::workspaces::permissions::PermissionSet; -use api::infrastructure::db; -use api::infrastructure::storage::PgStorageIngestQueue; - -#[derive(Parser)] -#[command(about = "Inspect and enqueue storage ingest events", version)] -struct Cli { - /// Override the database URL (defaults to DATABASE_URL env / config) - #[arg(long)] - database_url: Option, - - #[command(subcommand)] - command: Command, -} - -#[derive(Subcommand)] -enum Command { - /// Print queue depth and age metrics - Stats, - /// Enqueue a new ingest event (for CI/Bot flows) - Enqueue { - #[arg(long)] - user_id: Uuid, - #[arg(long)] - repo_path: String, - #[arg(long, default_value = "fs")] - backend: String, - #[arg(long, value_enum)] - kind: KindArg, - #[arg(long)] - content_hash: Option, - }, -} - -#[derive(Clone, Copy, ValueEnum, Debug)] -enum KindArg { - Upsert, - Delete, -} - -impl From for StorageIngestKind { - fn from(value: KindArg) -> StorageIngestKind { - match value { - KindArg::Upsert => StorageIngestKind::Upsert, - KindArg::Delete => StorageIngestKind::Delete, - } - } -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - dotenvy::dotenv().ok(); - let cli = Cli::parse(); - - let database_url = match cli.database_url { - Some(url) => url, - None => Config::from_env()?.database_url, - }; - - let pool = db::connect_pool(&database_url) - .await - .context("failed to connect to database")?; - let queue = PgStorageIngestQueue::new(pool); - - match cli.command { - Command::Stats => { - let stats = queue.stats().await?; - println!("pending: {}", stats.pending); - println!("locked: {}", stats.locked); - println!("distinct_users: {}", stats.distinct_users); - if let Some(oldest) = stats.oldest_created_at { - println!( - "oldest_pending_age_secs: {}", - (chrono::Utc::now() - oldest).num_seconds() - ); - } else { - println!("oldest_pending_age_secs: -"); - } - } - Command::Enqueue { - user_id, - repo_path, - backend, - kind, - content_hash, - } => { - let permissions = PermissionSet::all().to_vec(); - queue - .enqueue_event( - user_id, - user_id, - None, - repo_path.trim(), - backend.trim(), - kind.into(), - content_hash.as_deref(), - None, - &permissions, - ) - .await?; - println!( - "enqueued ingest event user={} path={} backend={} kind={:?}", - user_id, repo_path, backend, kind - ); - } - } - Ok(()) -} diff --git a/api/src/bootstrap/mod.rs b/api/src/bootstrap/mod.rs deleted file mode 100644 index ef68c369..00000000 --- a/api/src/bootstrap/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod config; diff --git a/api/src/domain/documents/document.rs b/api/src/domain/documents/document.rs deleted file mode 100644 index c446efd9..00000000 --- a/api/src/domain/documents/document.rs +++ /dev/null @@ -1,54 +0,0 @@ -use uuid::Uuid; - -#[derive(Debug, Clone)] -pub struct Document { - pub id: Uuid, - pub owner_id: Uuid, - pub owner_user_id: Option, - pub workspace_id: Uuid, - pub title: String, - pub parent_id: Option, - pub doc_type: String, - pub created_at: chrono::DateTime, - pub updated_at: chrono::DateTime, - pub created_by_plugin: Option, - pub slug: String, - pub desired_path: String, - pub path: Option, - pub created_by: Option, - pub archived_at: Option>, - pub archived_by: Option, - pub archived_parent_id: Option, -} - -#[derive(Debug, Clone)] -pub struct SearchHit { - pub id: Uuid, - pub title: String, - pub doc_type: String, - pub path: Option, - pub updated_at: chrono::DateTime, -} - -#[derive(Debug, Clone)] -pub struct BacklinkInfo { - pub document_id: Uuid, - pub title: String, - pub document_type: String, - pub file_path: Option, - pub link_type: String, - pub link_text: Option, - pub link_count: i64, -} - -#[derive(Debug, Clone)] -pub struct OutgoingLink { - pub document_id: Uuid, - pub title: String, - pub document_type: String, - pub file_path: Option, - pub link_type: String, - pub link_text: Option, - pub position_start: Option, - pub position_end: Option, -} diff --git a/api/src/domain/documents/mod.rs b/api/src/domain/documents/mod.rs deleted file mode 100644 index b3316091..00000000 --- a/api/src/domain/documents/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod document; diff --git a/api/src/domain/mod.rs b/api/src/domain/mod.rs deleted file mode 100644 index f2c5f89d..00000000 --- a/api/src/domain/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod documents; -pub mod workspaces; diff --git a/api/src/infrastructure/db/repositories/access_repository_sqlx.rs b/api/src/infrastructure/db/repositories/access_repository_sqlx.rs deleted file mode 100644 index d501b084..00000000 --- a/api/src/infrastructure/db/repositories/access_repository_sqlx.rs +++ /dev/null @@ -1,142 +0,0 @@ -use async_trait::async_trait; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::access_repository::{AccessRepository, DocumentUserAccess}; -use crate::domain::workspaces::permissions::{ - PermissionSet, apply_custom_overrides, system_role_permissions, -}; -use crate::infrastructure::db::PgPool; - -pub struct SqlxAccessRepository { - pub pool: PgPool, -} - -impl SqlxAccessRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl AccessRepository for SqlxAccessRepository { - async fn resolve_user_document_access( - &self, - doc_id: Uuid, - user_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT d.workspace_id, - d.archived_at, - m.role_kind, - m.system_role, - m.custom_role_id, - r.base_role AS custom_base_role, - p.permission, - p.allowed - FROM documents d - JOIN workspace_members m - ON m.workspace_id = d.workspace_id - AND m.user_id = $2 - LEFT JOIN workspace_roles r ON r.id = m.custom_role_id - LEFT JOIN workspace_role_permissions p ON p.workspace_role_id = r.id - WHERE d.id = $1"#, - ) - .bind(doc_id) - .bind(user_id) - .fetch_all(&self.pool) - .await?; - - if rows.is_empty() { - return Ok(None); - } - - let first = &rows[0]; - let workspace_id = first.get("workspace_id"); - let archived = first - .try_get::>, _>("archived_at") - .ok() - .flatten() - .is_some(); - - let role_kind: String = first.get("role_kind"); - let system_role = first - .try_get::, _>("system_role") - .ok() - .flatten(); - let custom_base_role = first - .try_get::, _>("custom_base_role") - .ok() - .flatten(); - - let mut overrides = Vec::new(); - for row in rows { - if let (Some(permission), Some(allowed)) = ( - row.try_get::, _>("permission") - .ok() - .flatten(), - row.try_get::, _>("allowed").ok().flatten(), - ) { - overrides.push((permission, allowed)); - } - } - - let permissions = build_permission_set( - &role_kind, - system_role.as_deref(), - custom_base_role.as_deref(), - overrides, - ); - - Ok(Some(DocumentUserAccess { - workspace_id, - is_archived: archived, - permissions, - })) - } - - async fn is_document_public(&self, doc_id: Uuid) -> anyhow::Result { - let count = sqlx::query_scalar::<_, i64>( - "SELECT COUNT(1) FROM public_documents WHERE document_id = $1", - ) - .bind(doc_id) - .fetch_one(&self.pool) - .await?; - Ok(count > 0) - } - - async fn is_document_archived(&self, doc_id: Uuid) -> anyhow::Result { - let archived = sqlx::query_scalar::<_, bool>( - "SELECT archived_at IS NOT NULL FROM documents WHERE id = $1", - ) - .bind(doc_id) - .fetch_optional(&self.pool) - .await? - .unwrap_or(false); - Ok(archived) - } -} - -fn build_permission_set( - role_kind: &str, - system_role: Option<&str>, - custom_base_role: Option<&str>, - overrides: Vec<(String, bool)>, -) -> PermissionSet { - let set = match role_kind { - "system" => { - let role = system_role.unwrap_or("viewer"); - system_role_permissions(role) - } - "custom" => { - let base = custom_base_role.unwrap_or("viewer"); - system_role_permissions(base) - } - _ => system_role_permissions("viewer"), - }; - if overrides.is_empty() { - set - } else { - apply_custom_overrides(set, overrides) - } -} diff --git a/api/src/infrastructure/db/repositories/api_token_repository_sqlx.rs b/api/src/infrastructure/db/repositories/api_token_repository_sqlx.rs deleted file mode 100644 index 66bb36ff..00000000 --- a/api/src/infrastructure/db/repositories/api_token_repository_sqlx.rs +++ /dev/null @@ -1,129 +0,0 @@ -use async_trait::async_trait; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::api_token_repository::{ - ApiToken, ApiTokenRepository, ApiTokenSecret, -}; -use crate::infrastructure::db::PgPool; - -pub struct SqlxApiTokenRepository { - pool: PgPool, -} - -impl SqlxApiTokenRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl ApiTokenRepository for SqlxApiTokenRepository { - async fn create( - &self, - workspace_id: Uuid, - owner_id: Uuid, - name: &str, - token_hash: &str, - token_digest: &str, - ) -> anyhow::Result { - let row = sqlx::query( - r#"INSERT INTO api_tokens (workspace_id, owner_id, name, token_hash, token_digest) - VALUES ($1, $2, $3, $4, $5) - RETURNING id, workspace_id, owner_id, name, created_at, last_used_at, revoked_at"#, - ) - .bind(workspace_id) - .bind(owner_id) - .bind(name) - .bind(token_hash) - .bind(token_digest) - .fetch_one(&self.pool) - .await?; - - Ok(ApiToken { - id: row.get("id"), - workspace_id: row.get("workspace_id"), - owner_id: row.get("owner_id"), - name: row.get("name"), - created_at: row.get("created_at"), - last_used_at: row.try_get("last_used_at").ok(), - revoked_at: row.try_get("revoked_at").ok(), - }) - } - - async fn list_active(&self, workspace_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT id, workspace_id, owner_id, name, created_at, last_used_at, revoked_at - FROM api_tokens - WHERE workspace_id = $1 - ORDER BY created_at DESC"#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - - Ok(rows - .into_iter() - .map(|row| ApiToken { - id: row.get("id"), - workspace_id: row.get("workspace_id"), - owner_id: row.get("owner_id"), - name: row.get("name"), - created_at: row.get("created_at"), - last_used_at: row.try_get("last_used_at").ok(), - revoked_at: row.try_get("revoked_at").ok(), - }) - .collect()) - } - - async fn revoke(&self, workspace_id: Uuid, token_id: Uuid) -> anyhow::Result { - let row = sqlx::query( - r#"UPDATE api_tokens - SET revoked_at = now() - WHERE id = $1 AND workspace_id = $2 AND revoked_at IS NULL - RETURNING id"#, - ) - .bind(token_id) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.is_some()) - } - - async fn find_by_digest(&self, digest: &str) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT id, workspace_id, owner_id, name, created_at, last_used_at, revoked_at, token_hash, token_digest - FROM api_tokens - WHERE token_digest = $1 - LIMIT 1"#, - ) - .bind(digest) - .fetch_optional(&self.pool) - .await?; - - Ok(row.map(|row| { - let token = ApiToken { - id: row.get("id"), - workspace_id: row.get("workspace_id"), - owner_id: row.get("owner_id"), - name: row.get("name"), - created_at: row.get("created_at"), - last_used_at: row.try_get("last_used_at").ok(), - revoked_at: row.try_get("revoked_at").ok(), - }; - ApiTokenSecret { - token, - token_hash: row.get("token_hash"), - token_digest: row.get("token_digest"), - } - })) - } - - async fn touch_last_used(&self, token_id: Uuid) -> anyhow::Result<()> { - sqlx::query("UPDATE api_tokens SET last_used_at = now() WHERE id = $1") - .bind(token_id) - .execute(&self.pool) - .await?; - Ok(()) - } -} diff --git a/api/src/infrastructure/db/repositories/document_repository_sqlx.rs b/api/src/infrastructure/db/repositories/document_repository_sqlx.rs deleted file mode 100644 index 7fe68d4c..00000000 --- a/api/src/infrastructure/db/repositories/document_repository_sqlx.rs +++ /dev/null @@ -1,1061 +0,0 @@ -use std::borrow::Cow; - -use anyhow::{anyhow, bail}; -use async_trait::async_trait; -use sha2::{Digest, Sha256}; -use sqlx::{Postgres, Row, Transaction, postgres::PgRow}; -use uuid::Uuid; - -use crate::application::ports::document_repository::{ - DocMeta, DocumentListState, DocumentPathConflictError, DocumentRepository, SubtreeDocument, -}; -use crate::domain::documents::document::{ - BacklinkInfo as DomBacklinkInfo, Document as DomainDocument, OutgoingLink as DomOutgoingLink, - SearchHit, -}; -use crate::infrastructure::db::PgPool; - -pub struct SqlxDocumentRepository { - pub pool: PgPool, -} - -impl SqlxDocumentRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } - - fn map_row_to_meta(row: &PgRow) -> DocMeta { - DocMeta { - workspace_id: row.get("workspace_id"), - doc_type: row.get("type"), - path: row.try_get("path").ok(), - slug: row.get("slug"), - desired_path: row.get("desired_path"), - title: row.get("title"), - archived_at: row.try_get("archived_at").ok(), - } - } - - fn map_row_to_document(row: &PgRow) -> DomainDocument { - DomainDocument { - id: row.get("id"), - owner_id: row.get("owner_id"), - owner_user_id: row.try_get("owner_user_id").ok(), - workspace_id: row.get("workspace_id"), - title: row.get("title"), - parent_id: row.get("parent_id"), - doc_type: row.get("type"), - created_at: row.get("created_at"), - updated_at: row.get("updated_at"), - created_by_plugin: row.try_get("created_by_plugin").ok(), - slug: row.get("slug"), - desired_path: row.get("desired_path"), - path: row.try_get("path").ok(), - created_by: row.try_get("created_by").ok(), - archived_at: row.try_get("archived_at").ok(), - archived_by: row.try_get("archived_by").ok(), - archived_parent_id: row.try_get("archived_parent_id").ok(), - } - } - - fn slugify(title: &str) -> String { - let trimmed = title.trim(); - if trimmed.is_empty() { - return "untitled".to_string(); - } - - let mut slug = String::with_capacity(trimmed.len()); - let mut last_was_space = false; - for ch in trimmed.chars() { - if ch.is_control() { - continue; - } - if ch.is_whitespace() { - if !last_was_space { - slug.push(' '); - last_was_space = true; - } - continue; - } - last_was_space = false; - let safe = match ch { - '/' | '\\' | ':' | '*' | '?' | '"' | '<' | '>' | '|' => '-', - _ => ch, - }; - slug.push(safe); - } - - let mut slug = slug - .trim_matches(|c: char| matches!(c, ' ' | '-')) - .to_string(); - if slug.is_empty() { - slug.push_str("untitled"); - } - if slug.len() > 100 { - slug.truncate(100); - } - slug - } - - fn apply_slug_suffix(base: &str, attempt: usize) -> String { - if attempt == 0 { - base.to_string() - } else { - format!("{base}-{}", attempt + 1) - } - } - - async fn build_desired_path( - &self, - parent_id: Option, - slug: &str, - doc_type: &str, - ) -> anyhow::Result { - let prefix = if let Some(pid) = parent_id { - let path = sqlx::query_scalar::<_, Option>( - "SELECT desired_path FROM documents WHERE id = $1", - ) - .bind(pid) - .fetch_optional(&self.pool) - .await? - .flatten() - .ok_or_else(|| anyhow!("parent_document_not_found"))?; - if path.is_empty() { - String::new() - } else { - format!("{path}/") - } - } else { - String::new() - }; - - let desired = if doc_type == "folder" { - format!("{prefix}{slug}") - } else { - format!("{prefix}{slug}.md") - }; - Ok(desired.trim_start_matches('/').to_string()) - } - - fn hash_path(desired_path: &str) -> Vec { - Sha256::digest(desired_path.as_bytes()).to_vec() - } - - fn owner_relative_path(owner_id: Uuid, desired_path: &str) -> String { - format!("{owner_id}/{}", desired_path.trim_start_matches('/')) - } - - fn parent_desired_path(desired_path: &str) -> Option { - let mut parts = desired_path.rsplitn(2, '/'); - parts.next()?; // skip current file/folder - parts.next().map(|parent| parent.to_string()) - } - - fn slug_from_desired_path(desired_path: &str) -> anyhow::Result { - let segment = desired_path - .rsplit('/') - .next() - .ok_or_else(|| anyhow!("invalid_desired_path"))?; - let trimmed = segment.trim(); - if trimmed.is_empty() { - bail!("invalid_desired_path_segment"); - } - let slug = trimmed - .strip_suffix(".md") - .unwrap_or(trimmed) - .trim_matches('/'); - if slug.is_empty() { - bail!("invalid_slug_from_path"); - } - Ok(slug.to_string()) - } - - async fn resolve_parent_folder_id( - &self, - workspace_id: Uuid, - desired_parent_path: Option<&str>, - ) -> anyhow::Result> { - let Some(path) = desired_parent_path.filter(|p| !p.is_empty()) else { - return Ok(None); - }; - let row = sqlx::query( - r#"SELECT id, archived_at FROM documents - WHERE workspace_id = $1 AND desired_path = $2 AND type = 'folder' - LIMIT 1"#, - ) - .bind(workspace_id) - .bind(path) - .fetch_optional(&self.pool) - .await?; - - match row { - Some(row) => { - let archived_at: Option> = - row.try_get("archived_at").ok(); - if archived_at.is_some() { - Err(anyhow!("parent_folder_archived")) - } else { - Ok(Some(row.get("id"))) - } - } - None => Err(anyhow!("parent_folder_not_found")), - } - } - - async fn update_descendant_paths_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - root_id: Uuid, - ) -> anyhow::Result<()> { - sqlx::query( - r#" - WITH RECURSIVE tree AS ( - SELECT id, desired_path, type - FROM documents - WHERE id = $1 - UNION ALL - SELECT d.id, - CASE - WHEN d.type = 'folder' THEN tree.desired_path || '/' || d.slug - ELSE tree.desired_path || '/' || d.slug || '.md' - END AS desired_path, - d.type - FROM documents d - JOIN tree ON d.parent_id = tree.id - ) - UPDATE documents AS doc - SET desired_path = tree.desired_path, - path_digest = digest(tree.desired_path, 'sha256'), - updated_at = now() - FROM tree - WHERE doc.id = tree.id - AND doc.id <> $1 - "#, - ) - .bind(root_id) - .execute(tx.as_mut()) - .await?; - Ok(()) - } - - fn is_unique_violation(err: &sqlx::Error) -> bool { - match err { - sqlx::Error::Database(db_err) => { - matches!(db_err.code(), Some(code) if code == Cow::Borrowed("23505")) - } - _ => false, - } - } - - fn is_anyhow_unique_violation(err: &anyhow::Error) -> bool { - err.downcast_ref::() - .is_some_and(|sqlx_err| Self::is_unique_violation(sqlx_err)) - } -} - -#[cfg(test)] -mod tests { - use super::SqlxDocumentRepository; - - #[test] - fn slug_preserves_unicode_and_case() { - assert_eq!(SqlxDocumentRepository::slugify("Main"), "Main"); - assert_eq!(SqlxDocumentRepository::slugify("Résumé2025"), "Résumé2025"); - } - - #[test] - fn slug_sanitizes_forbidden_chars() { - assert_eq!(SqlxDocumentRepository::slugify(" Foo / Bar "), "Foo - Bar"); - assert_eq!(SqlxDocumentRepository::slugify("////"), "untitled"); - } -} - -#[async_trait] -impl DocumentRepository for SqlxDocumentRepository { - async fn list_for_user( - &self, - workspace_id: Uuid, - query: Option, - tag: Option, - state: DocumentListState, - ) -> anyhow::Result> { - let archived_condition = match state { - DocumentListState::Active => "d.archived_at IS NULL", - DocumentListState::Archived => "d.archived_at IS NOT NULL", - DocumentListState::All => "TRUE", - }; - - let rows = if let Some(t) = tag.as_ref().filter(|s| !s.trim().is_empty()) { - let sql = format!( - r#"SELECT d.* - FROM document_tags dt - JOIN tags t ON t.id = dt.tag_id - JOIN documents d ON d.id = dt.document_id - WHERE d.workspace_id = $1 AND {archived_condition} AND t.name ILIKE $2 - ORDER BY d.updated_at DESC LIMIT 100"#, - archived_condition = archived_condition, - ); - sqlx::query(&sql) - .bind(workspace_id) - .bind(t) - .fetch_all(&self.pool) - .await? - } else if let Some(ref qq) = query.as_ref().filter(|s| !s.trim().is_empty()) { - let like = format!("%{}%", qq); - let sql = format!( - r#"SELECT d.* - FROM documents d - WHERE d.workspace_id = $1 AND {archived_condition} AND d.title ILIKE $2 - ORDER BY d.updated_at DESC LIMIT 100"#, - archived_condition = archived_condition, - ); - sqlx::query(&sql) - .bind(workspace_id) - .bind(like) - .fetch_all(&self.pool) - .await? - } else { - let sql = format!( - r#"SELECT d.* - FROM documents d - WHERE d.workspace_id = $1 AND {archived_condition} - ORDER BY d.updated_at DESC LIMIT 100"#, - archived_condition = archived_condition, - ); - sqlx::query(&sql) - .bind(workspace_id) - .fetch_all(&self.pool) - .await? - }; - - Ok(rows - .into_iter() - .map(|r| Self::map_row_to_document(&r)) - .collect()) - } - - async fn list_ids_for_user(&self, workspace_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query("SELECT id FROM documents WHERE workspace_id = $1") - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - Ok(rows.into_iter().map(|r| r.get("id")).collect()) - } - - async fn list_paths_for_user(&self, workspace_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#" - SELECT path - FROM documents - WHERE workspace_id = $1 - AND path IS NOT NULL - AND type <> 'folder' - "#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .filter_map(|r| r.try_get::("path").ok()) - .collect()) - } - - async fn list_workspace_documents( - &self, - workspace_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query("SELECT * FROM documents WHERE workspace_id = $1") - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .map(|r| Self::map_row_to_document(&r)) - .collect()) - } - - async fn get_by_id(&self, id: Uuid) -> anyhow::Result> { - let row = sqlx::query(r#"SELECT * FROM documents WHERE id = $1"#) - .bind(id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| Self::map_row_to_document(&r))) - } - - async fn search_for_user( - &self, - workspace_id: Uuid, - query: Option, - limit: i64, - ) -> anyhow::Result> { - let q = query.unwrap_or_default(); - let like = format!("%{}%", q); - let rows = if q.trim().is_empty() { - sqlx::query( - r#"SELECT id, title, type, path, updated_at, archived_at - FROM documents WHERE workspace_id = $1 - AND archived_at IS NULL - ORDER BY updated_at DESC - LIMIT $2"#, - ) - .bind(workspace_id) - .bind(limit) - .fetch_all(&self.pool) - .await? - } else { - sqlx::query( - r#"SELECT id, title, type, path, updated_at, archived_at FROM documents - WHERE workspace_id = $1 AND archived_at IS NULL - AND (LOWER(title) LIKE LOWER($2) OR title ILIKE $2) - ORDER BY CASE WHEN LOWER(title) = LOWER($3) THEN 0 ELSE 1 END, LENGTH(title), updated_at DESC - LIMIT $4"# - ) - .bind(workspace_id) - .bind(like) - .bind(&q) - .bind(limit) - .fetch_all(&self.pool) - .await? - }; - let out = rows - .into_iter() - .map(|r| SearchHit { - id: r.get("id"), - title: r.get("title"), - doc_type: r.get::("type"), - path: r.try_get("path").ok(), - updated_at: r.get("updated_at"), - }) - .collect(); - Ok(out) - } - - async fn create_for_user( - &self, - workspace_id: Uuid, - created_by: Uuid, - title: &str, - parent_id: Option, - doc_type: &str, - created_by_plugin: Option<&str>, - ) -> anyhow::Result { - let mut tx = self.pool.begin().await?; - let doc = self - .create_for_user_tx( - &mut tx, - workspace_id, - created_by, - title, - parent_id, - doc_type, - created_by_plugin, - ) - .await?; - tx.commit().await?; - Ok(doc) - } - - async fn create_for_user_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - created_by: Uuid, - title: &str, - parent_id: Option, - doc_type: &str, - created_by_plugin: Option<&str>, - ) -> anyhow::Result { - sqlx::query("SAVEPOINT document_create") - .execute(tx.as_mut()) - .await?; - let base_slug = Self::slugify(title); - let mut attempt = 0usize; - loop { - let slug = Self::apply_slug_suffix(&base_slug, attempt); - let desired_path = self.build_desired_path(parent_id, &slug, doc_type).await?; - let repo_path = Self::owner_relative_path(workspace_id, &desired_path); - let path_digest = Self::hash_path(&desired_path); - let row = sqlx::query( - r#"INSERT INTO documents (title, owner_id, owner_user_id, workspace_id, created_by, created_by_plugin, parent_id, type, slug, desired_path, path, path_digest) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) - RETURNING *"#, - ) - .bind(title) - .bind(workspace_id) - .bind(created_by) - .bind(workspace_id) - .bind(created_by) - .bind(created_by_plugin) - .bind(parent_id) - .bind(doc_type) - .bind(&slug) - .bind(&desired_path) - .bind(&repo_path) - .bind(&path_digest) - .fetch_one(tx.as_mut()) - .await; - match row { - Ok(row) => { - sqlx::query("RELEASE SAVEPOINT document_create") - .execute(tx.as_mut()) - .await - .ok(); - return Ok(Self::map_row_to_document(&row)); - } - Err(err) if Self::is_unique_violation(&err) => { - sqlx::query("ROLLBACK TO SAVEPOINT document_create") - .execute(tx.as_mut()) - .await?; - attempt += 1; - continue; - } - Err(err) => { - sqlx::query("ROLLBACK TO SAVEPOINT document_create") - .execute(tx.as_mut()) - .await - .ok(); - if Self::is_unique_violation(&err) { - return Err(DocumentPathConflictError.into()); - } - return Err(err.into()); - } - } - } - } - - async fn update_title_and_parent_for_user( - &self, - id: Uuid, - workspace_id: Uuid, - title: Option, - parent_id: Option>, - ) -> anyhow::Result> { - let mut tx = self.pool.begin().await?; - let doc = self - .update_title_and_parent_for_user_tx(&mut tx, id, workspace_id, title, parent_id) - .await?; - tx.commit().await?; - Ok(doc) - } - - async fn update_title_and_parent_for_user_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - id: Uuid, - workspace_id: Uuid, - title: Option, - parent_id: Option>, - ) -> anyhow::Result> { - let current = sqlx::query( - r#"SELECT title, parent_id, type, slug - FROM documents - WHERE id = $1 AND workspace_id = $2"#, - ) - .bind(id) - .bind(workspace_id) - .fetch_optional(tx.as_mut()) - .await?; - let Some(current) = current else { - return Ok(None); - }; - - let next_title = title.clone().unwrap_or_else(|| current.get("title")); - let next_parent: Option = match parent_id { - None => current.get("parent_id"), - Some(new_parent) => new_parent, - }; - let doc_type: String = current.get("type"); - let base_slug = if title.is_some() { - Self::slugify(&next_title) - } else { - current.get("slug") - }; - - sqlx::query("SAVEPOINT document_update") - .execute(tx.as_mut()) - .await?; - let mut attempt = 0usize; - loop { - let slug = Self::apply_slug_suffix(&base_slug, attempt); - let desired_path = self - .build_desired_path(next_parent, &slug, &doc_type) - .await?; - let path_digest = Self::hash_path(&desired_path); - let row = sqlx::query( - r#"UPDATE documents SET - title = $1, - parent_id = $2, - slug = $3, - desired_path = $4, - path_digest = $5, - updated_at = now() - WHERE id = $6 AND workspace_id = $7 - RETURNING *"#, - ) - .bind(&next_title) - .bind(next_parent) - .bind(&slug) - .bind(&desired_path) - .bind(&path_digest) - .bind(id) - .bind(workspace_id) - .fetch_optional(tx.as_mut()) - .await; - match row { - Ok(Some(row)) => { - let doc = Self::map_row_to_document(&row); - if doc.doc_type == "folder" { - sqlx::query("SAVEPOINT document_update_descendants") - .execute(tx.as_mut()) - .await?; - let result = - self.update_descendant_paths_tx(tx, doc.id) - .await - .map_err(|err| { - if Self::is_anyhow_unique_violation(&err) { - anyhow::Error::new(DocumentPathConflictError) - } else { - err - } - }); - match result { - Ok(()) => { - sqlx::query("RELEASE SAVEPOINT document_update_descendants") - .execute(tx.as_mut()) - .await - .ok(); - } - Err(err) => { - sqlx::query("ROLLBACK TO SAVEPOINT document_update_descendants") - .execute(tx.as_mut()) - .await - .ok(); - sqlx::query("ROLLBACK TO SAVEPOINT document_update") - .execute(tx.as_mut()) - .await - .ok(); - return Err(err); - } - } - } - sqlx::query("RELEASE SAVEPOINT document_update") - .execute(tx.as_mut()) - .await - .ok(); - return Ok(Some(doc)); - } - Ok(None) => { - sqlx::query("RELEASE SAVEPOINT document_update") - .execute(tx.as_mut()) - .await - .ok(); - return Ok(None); - } - Err(err) if Self::is_unique_violation(&err) => { - sqlx::query("ROLLBACK TO SAVEPOINT document_update") - .execute(tx.as_mut()) - .await?; - attempt += 1; - continue; - } - Err(err) => { - sqlx::query("ROLLBACK TO SAVEPOINT document_update") - .execute(tx.as_mut()) - .await - .ok(); - return Err(err.into()); - } - } - } - } - - async fn delete_owned(&self, id: Uuid, workspace_id: Uuid) -> anyhow::Result> { - let mut tx = self.pool.begin().await?; - let res = self.delete_owned_tx(&mut tx, id, workspace_id).await?; - tx.commit().await?; - Ok(res) - } - - async fn delete_owned_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result> { - // fetch type - let row = sqlx::query(r#"SELECT type FROM documents WHERE id = $1 AND workspace_id = $2"#) - .bind(id) - .bind(workspace_id) - .fetch_optional(tx.as_mut()) - .await?; - let dtype: String = match row { - Some(r) => r.get("type"), - None => return Ok(None), - }; - let res = sqlx::query(r#"DELETE FROM documents WHERE id = $1 AND workspace_id = $2"#) - .bind(id) - .bind(workspace_id) - .execute(tx.as_mut()) - .await?; - if res.rows_affected() > 0 { - Ok(Some(dtype)) - } else { - Ok(None) - } - } - - async fn backlinks_for( - &self, - workspace_id: Uuid, - target_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT d.id as document_id, d.title, d.type as document_type, d.path as file_path, - dl.link_type, dl.link_text, COUNT(*)::BIGINT as link_count - FROM document_links dl - JOIN documents d ON d.id = dl.source_document_id - WHERE dl.target_document_id = $1 AND d.workspace_id = $2 - GROUP BY d.id, d.title, d.type, d.path, dl.link_type, dl.link_text - ORDER BY link_count DESC, d.title"#, - ) - .bind(target_id) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - let out = rows - .into_iter() - .map(|r| DomBacklinkInfo { - document_id: r.get("document_id"), - title: r.get("title"), - document_type: r.get("document_type"), - file_path: r.try_get("file_path").ok(), - link_type: r.get("link_type"), - link_text: r.try_get("link_text").ok(), - link_count: r.try_get("link_count").unwrap_or(1_i64), - }) - .collect(); - Ok(out) - } - - async fn outgoing_links_for( - &self, - workspace_id: Uuid, - source_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT d.id as document_id, d.title, d.type as document_type, d.path as file_path, - dl.link_type, dl.link_text, dl.position_start, dl.position_end - FROM document_links dl - JOIN documents d ON d.id = dl.target_document_id - WHERE dl.source_document_id = $1 AND d.workspace_id = $2 - ORDER BY dl.position_start"#, - ) - .bind(source_id) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - let out = rows - .into_iter() - .map(|r| DomOutgoingLink { - document_id: r.get("document_id"), - title: r.get("title"), - document_type: r.get("document_type"), - file_path: r.try_get("file_path").ok(), - link_type: r.get("link_type"), - link_text: r.try_get("link_text").ok(), - position_start: r.try_get("position_start").ok(), - position_end: r.try_get("position_end").ok(), - }) - .collect(); - Ok(out) - } - - async fn get_meta_for_owner( - &self, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result> { - let row = sqlx::query( - "SELECT workspace_id, type, path, slug, desired_path, title, archived_at FROM documents WHERE id = $1 AND workspace_id = $2", - ) - .bind(doc_id) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.as_ref().map(SqlxDocumentRepository::map_row_to_meta)) - } - - async fn get_meta_for_owner_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result> { - let row = sqlx::query( - "SELECT workspace_id, type, path, slug, desired_path, title, archived_at FROM documents WHERE id = $1 AND workspace_id = $2 FOR UPDATE", - ) - .bind(doc_id) - .bind(workspace_id) - .fetch_optional(tx.as_mut()) - .await?; - Ok(row.as_ref().map(SqlxDocumentRepository::map_row_to_meta)) - } - - async fn archive_subtree( - &self, - doc_id: Uuid, - workspace_id: Uuid, - archived_by: Uuid, - ) -> anyhow::Result> { - let mut tx = self.pool.begin().await?; - let doc = self - .archive_subtree_tx(&mut tx, doc_id, workspace_id, archived_by) - .await?; - tx.commit().await?; - Ok(doc) - } - - async fn archive_subtree_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - doc_id: Uuid, - workspace_id: Uuid, - archived_by: Uuid, - ) -> anyhow::Result> { - let updated = sqlx::query_scalar::<_, Uuid>( - r#" - WITH RECURSIVE subtree AS ( - SELECT id FROM documents WHERE id = $1 AND workspace_id = $2 - UNION ALL - SELECT d.id - FROM documents d - JOIN subtree sb ON d.parent_id = sb.id - WHERE d.workspace_id = $2 - ), - removed_shares AS ( - DELETE FROM shares s - USING subtree sb - WHERE s.document_id = sb.id - RETURNING 1 - ), - updated AS ( - UPDATE documents AS d - SET archived_at = now(), - archived_by = $3, - archived_parent_id = d.parent_id, - parent_id = NULL, - updated_at = now() - FROM subtree sb - WHERE d.id = sb.id AND d.archived_at IS NULL - RETURNING d.id - ) - SELECT id FROM updated WHERE id = $1 LIMIT 1 - "#, - ) - .bind(doc_id) - .bind(workspace_id) - .bind(archived_by) - .fetch_optional(tx.as_mut()) - .await?; - - let root = if let Some(root_id) = updated { - sqlx::query(r#"SELECT * FROM documents WHERE id = $1"#) - .bind(root_id) - .fetch_optional(tx.as_mut()) - .await? - .map(|r| Self::map_row_to_document(&r)) - } else { - None - }; - - Ok(root) - } - - async fn unarchive_subtree( - &self, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result> { - let mut tx = self.pool.begin().await?; - let doc = self - .unarchive_subtree_tx(&mut tx, doc_id, workspace_id) - .await?; - tx.commit().await?; - Ok(doc) - } - - async fn unarchive_subtree_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result> { - let updated = sqlx::query_scalar::<_, Uuid>( - r#" - WITH RECURSIVE subtree AS ( - SELECT id FROM documents WHERE id = $1 AND workspace_id = $2 - UNION ALL - SELECT d.id - FROM documents d - JOIN subtree sb ON d.archived_parent_id = sb.id - WHERE d.workspace_id = $2 - ), - updated AS ( - UPDATE documents AS d - SET parent_id = archived_parent_id, - archived_parent_id = NULL, - archived_at = NULL, - archived_by = NULL, - updated_at = now() - FROM subtree sb - WHERE d.id = sb.id AND d.archived_at IS NOT NULL - RETURNING d.id - ) - SELECT id FROM updated WHERE id = $1 LIMIT 1 - "#, - ) - .bind(doc_id) - .bind(workspace_id) - .fetch_optional(tx.as_mut()) - .await?; - - let root = if let Some(root_id) = updated { - sqlx::query(r#"SELECT * FROM documents WHERE id = $1"#) - .bind(root_id) - .fetch_optional(tx.as_mut()) - .await? - .map(|r| Self::map_row_to_document(&r)) - } else { - None - }; - - Ok(root) - } - - async fn list_owned_subtree_documents( - &self, - workspace_id: Uuid, - root_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#" - WITH RECURSIVE subtree AS ( - SELECT id, type FROM documents WHERE id = $1 AND workspace_id = $2 - UNION ALL - SELECT d.id, d.type - FROM documents d - JOIN subtree sb ON COALESCE(d.parent_id, d.archived_parent_id) = sb.id - WHERE d.workspace_id = $2 - ) - SELECT id, type FROM subtree - "#, - ) - .bind(root_id) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .map(|r| SubtreeDocument { - id: r.get("id"), - doc_type: r.get("type"), - }) - .collect()) - } - - async fn list_owned_subtree_documents_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - root_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#" - WITH RECURSIVE subtree AS ( - SELECT id, type FROM documents WHERE id = $1 AND workspace_id = $2 - UNION ALL - SELECT d.id, d.type - FROM documents d - JOIN subtree sb ON COALESCE(d.parent_id, d.archived_parent_id) = sb.id - WHERE d.workspace_id = $2 - ) - SELECT id, type FROM subtree FOR UPDATE - "#, - ) - .bind(root_id) - .bind(workspace_id) - .fetch_all(tx.as_mut()) - .await?; - Ok(rows - .into_iter() - .map(|r| SubtreeDocument { - id: r.get("id"), - doc_type: r.get("type"), - }) - .collect()) - } - - async fn get_by_owner_and_path( - &self, - workspace_id: Uuid, - relative_path: &str, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT * - FROM documents - WHERE workspace_id = $1 AND path = $2 - LIMIT 1"#, - ) - .bind(workspace_id) - .bind(relative_path) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| Self::map_row_to_document(&r))) - } - - async fn update_repo_path( - &self, - doc_id: Uuid, - workspace_id: Uuid, - relative_path: &str, - ) -> anyhow::Result<()> { - let trimmed = relative_path.trim_start_matches('/'); - let owner_prefix = workspace_id.to_string(); - let desired_path = if let Some(rest) = trimmed.strip_prefix(&owner_prefix) { - rest.trim_start_matches('/').to_string() - } else { - trimmed.to_string() - }; - if desired_path.is_empty() { - return Err(anyhow!("invalid_relative_path")); - } - let slug = Self::slug_from_desired_path(&desired_path)?; - let parent_path = Self::parent_desired_path(&desired_path); - let parent_id = self - .resolve_parent_folder_id(workspace_id, parent_path.as_deref()) - .await?; - let normalized_path = Self::owner_relative_path(workspace_id, &desired_path); - let path_digest = Self::hash_path(&desired_path); - sqlx::query( - r#"UPDATE documents SET - path = $3, - desired_path = $4, - path_digest = $5, - slug = $6, - parent_id = $7, - updated_at = now() - WHERE id = $1 AND workspace_id = $2"#, - ) - .bind(doc_id) - .bind(workspace_id) - .bind(&normalized_path) - .bind(&desired_path) - .bind(&path_digest) - .bind(&slug) - .bind(parent_id) - .execute(&self.pool) - .await?; - Ok(()) - } -} diff --git a/api/src/infrastructure/db/repositories/files_repository_sqlx.rs b/api/src/infrastructure/db/repositories/files_repository_sqlx.rs deleted file mode 100644 index dbf79a35..00000000 --- a/api/src/infrastructure/db/repositories/files_repository_sqlx.rs +++ /dev/null @@ -1,225 +0,0 @@ -use async_trait::async_trait; -use sqlx::{Postgres, Row, Transaction}; -use uuid::Uuid; - -use crate::application::ports::files_repository::{FileRecord, FilesRepository}; -use crate::infrastructure::db::PgPool; - -pub struct SqlxFilesRepository { - pub pool: PgPool, -} - -impl SqlxFilesRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl FilesRepository for SqlxFilesRepository { - async fn is_workspace_document( - &self, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result { - let n = sqlx::query_scalar::<_, i64>( - "SELECT COUNT(1) FROM documents WHERE id = $1 AND workspace_id = $2", - ) - .bind(doc_id) - .bind(workspace_id) - .fetch_one(&self.pool) - .await?; - Ok(n > 0) - } - - async fn insert_file( - &self, - doc_id: Uuid, - filename: &str, - content_type: Option<&str>, - size: i64, - storage_path: &str, - content_hash: &str, - ) -> anyhow::Result { - let row = sqlx::query( - r#"INSERT INTO files (document_id, filename, content_type, size, storage_path, content_hash) - VALUES ($1, $2, $3, $4, $5, $6) - RETURNING id"#, - ) - .bind(doc_id) - .bind(filename) - .bind(content_type) - .bind(size) - .bind(storage_path) - .bind(content_hash) - .fetch_one(&self.pool) - .await?; - Ok(row.get("id")) - } - - async fn get_file_meta( - &self, - file_id: Uuid, - ) -> anyhow::Result, Uuid)>> { - let row = sqlx::query( - r#"SELECT f.storage_path, f.content_type, d.workspace_id - FROM files f JOIN documents d ON f.document_id = d.id - WHERE f.id = $1"#, - ) - .bind(file_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| { - ( - r.get("storage_path"), - r.try_get("content_type").ok(), - r.get("workspace_id"), - ) - })) - } - - async fn get_file_path_by_doc_and_name( - &self, - doc_id: Uuid, - filename: &str, - ) -> anyhow::Result)>> { - let row = sqlx::query( - r#"SELECT storage_path, content_type FROM files WHERE document_id = $1 AND filename = $2"#, - ) - .bind(doc_id) - .bind(filename) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| (r.get("storage_path"), r.try_get("content_type").ok()))) - } - - async fn list_storage_paths_for_document(&self, doc_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query("SELECT storage_path FROM files WHERE document_id = $1") - .bind(doc_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .filter_map(|r| r.try_get::("storage_path").ok()) - .collect()) - } - - async fn list_storage_paths_for_document_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - doc_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query("SELECT storage_path FROM files WHERE document_id = $1 FOR UPDATE") - .bind(doc_id) - .fetch_all(tx.as_mut()) - .await?; - Ok(rows - .into_iter() - .filter_map(|r| r.try_get::("storage_path").ok()) - .collect()) - } - - async fn list_files_for_document(&self, doc_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT id, filename, content_type, size, storage_path, content_hash - FROM files - WHERE document_id = $1"#, - ) - .bind(doc_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .map(|r| FileRecord { - id: r.get("id"), - filename: r.get("filename"), - content_type: r.try_get("content_type").ok(), - size: r.get("size"), - storage_path: r.get("storage_path"), - content_hash: r.get("content_hash"), - }) - .collect()) - } - - async fn list_storage_paths_for_workspace( - &self, - workspace_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#" - SELECT f.storage_path - FROM files f - JOIN documents d ON d.id = f.document_id - WHERE d.workspace_id = $1 - "#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .filter_map(|r| r.try_get::("storage_path").ok()) - .collect()) - } - - async fn find_by_storage_path( - &self, - storage_path: &str, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT f.id as file_id, f.document_id, d.workspace_id - FROM files f - JOIN documents d ON d.id = f.document_id - WHERE f.storage_path = $1 - LIMIT 1"#, - ) - .bind(storage_path) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| { - ( - r.get("file_id"), - r.get("document_id"), - r.get("workspace_id"), - ) - })) - } - - async fn update_storage_path(&self, file_id: Uuid, storage_path: &str) -> anyhow::Result<()> { - sqlx::query( - r#"UPDATE files SET storage_path = $2, updated_at = now() - WHERE id = $1"#, - ) - .bind(file_id) - .bind(storage_path) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn update_hash_and_size( - &self, - file_id: Uuid, - size: i64, - content_hash: &str, - ) -> anyhow::Result<()> { - sqlx::query( - r#"UPDATE files SET size = $2, content_hash = $3, updated_at = now() - WHERE id = $1"#, - ) - .bind(file_id) - .bind(size) - .bind(content_hash) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn delete_by_id(&self, file_id: Uuid) -> anyhow::Result<()> { - sqlx::query("DELETE FROM files WHERE id = $1") - .bind(file_id) - .execute(&self.pool) - .await?; - Ok(()) - } -} diff --git a/api/src/infrastructure/db/repositories/git_pull_session_repository_sqlx.rs b/api/src/infrastructure/db/repositories/git_pull_session_repository_sqlx.rs deleted file mode 100644 index 3aa4339e..00000000 --- a/api/src/infrastructure/db/repositories/git_pull_session_repository_sqlx.rs +++ /dev/null @@ -1,89 +0,0 @@ -use async_trait::async_trait; -use sqlx::types::Json; -use sqlx::{PgPool, Row}; -use uuid::Uuid; - -use crate::application::dto::git::{ - GitPullConflictItemDto, GitPullResolutionDto, GitPullSessionDto, -}; -use crate::application::ports::git_pull_session_repository::GitPullSessionRepository; - -pub struct GitPullSessionRepositorySqlx { - pool: PgPool, -} - -impl GitPullSessionRepositorySqlx { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl GitPullSessionRepository for GitPullSessionRepositorySqlx { - async fn upsert(&self, session: GitPullSessionDto) -> anyhow::Result<()> { - let GitPullSessionDto { - id, - workspace_id, - status, - conflicts, - resolutions, - message, - base_commit, - remote_commit, - } = session; - sqlx::query( - r#"INSERT INTO git_pull_sessions (id, workspace_id, status, conflicts, resolutions, created_at, updated_at, message, base_commit, remote_commit) - VALUES ($1, $2, $3, $4, $5, now(), now(), $6, $7, $8) - ON CONFLICT (id) DO UPDATE SET - status = EXCLUDED.status, - conflicts = EXCLUDED.conflicts, - resolutions = EXCLUDED.resolutions, - message = EXCLUDED.message, - base_commit = EXCLUDED.base_commit, - remote_commit = EXCLUDED.remote_commit, - updated_at = now()"#, - ) - .bind(id) - .bind(workspace_id) - .bind(status) - .bind(Json(conflicts)) - .bind(Json(resolutions)) - .bind(message.clone()) - .bind(base_commit.clone()) - .bind(remote_commit.clone()) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn get(&self, workspace_id: Uuid, id: Uuid) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT id, workspace_id, status, conflicts, resolutions, message, base_commit, remote_commit FROM git_pull_sessions - WHERE id = $1 AND workspace_id = $2"#, - ) - .bind(id) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - - let Some(row) = row else { - return Ok(None); - }; - let conflicts: Vec = row - .get::>, _>("conflicts") - .0; - let resolutions: Vec = row - .get::>, _>("resolutions") - .0; - Ok(Some(GitPullSessionDto { - id, - workspace_id, - status: row.get::("status"), - conflicts, - resolutions, - message: row.try_get::, _>("message").unwrap_or(None), - base_commit: row.get::>, _>("base_commit"), - remote_commit: row.get::>, _>("remote_commit"), - })) - } -} diff --git a/api/src/infrastructure/db/repositories/git_repository_sqlx.rs b/api/src/infrastructure/db/repositories/git_repository_sqlx.rs deleted file mode 100644 index f149b119..00000000 --- a/api/src/infrastructure/db/repositories/git_repository_sqlx.rs +++ /dev/null @@ -1,334 +0,0 @@ -use async_trait::async_trait; -use sqlx::{Row, error::DatabaseError}; -use std::sync::atomic::{AtomicBool, Ordering}; -use tokio::sync::Mutex; -use tracing::warn; -use uuid::Uuid; - -use crate::application::ports::git_repository::{GitRepository, UserGitCfg}; -use crate::infrastructure::crypto; -use crate::infrastructure::db::PgPool; - -pub struct SqlxGitRepository { - pub pool: PgPool, - encryption_key: String, - workspace_constraint_checked: AtomicBool, - workspace_constraint_check_lock: Mutex<()>, -} - -impl SqlxGitRepository { - pub fn new(pool: PgPool, encryption_key: impl Into) -> Self { - Self { - pool, - encryption_key: encryption_key.into(), - workspace_constraint_checked: AtomicBool::new(false), - workspace_constraint_check_lock: Mutex::new(()), - } - } - - async fn ensure_workspace_unique_constraint_ready(&self) -> anyhow::Result<()> { - if self.workspace_constraint_checked.load(Ordering::Relaxed) { - return Ok(()); - } - - let _guard = self.workspace_constraint_check_lock.lock().await; - if self.workspace_constraint_checked.load(Ordering::Relaxed) { - return Ok(()); - } - - let constraint_exists: bool = sqlx::query_scalar( - "SELECT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'git_configs_workspace_unique')", - ) - .fetch_one(&self.pool) - .await?; - - if !constraint_exists { - self.repair_workspace_unique_constraint().await?; - } - - self.workspace_constraint_checked - .store(true, Ordering::Relaxed); - Ok(()) - } - - async fn repair_workspace_unique_constraint(&self) -> anyhow::Result<()> { - let mut tx = self.pool.begin().await?; - sqlx::query("LOCK TABLE git_configs IN EXCLUSIVE MODE") - .execute(&mut *tx) - .await?; - - let constraint_exists: bool = sqlx::query_scalar( - "SELECT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'git_configs_workspace_unique')", - ) - .fetch_one(&mut *tx) - .await?; - - if constraint_exists { - tx.commit().await?; - return Ok(()); - } - - let dedup = sqlx::query( - r#"WITH ranked AS ( - SELECT - id, - ROW_NUMBER() OVER ( - PARTITION BY workspace_id - ORDER BY updated_at DESC, created_at DESC, id DESC - ) AS rn - FROM git_configs - ) - DELETE FROM git_configs gc - USING ranked r - WHERE gc.id = r.id - AND r.rn > 1;"#, - ) - .execute(&mut *tx) - .await?; - if dedup.rows_affected() > 0 { - warn!( - rows = dedup.rows_affected(), - "git_configs_workspace_unique_repair_deduped" - ); - } - - if let Err(err) = sqlx::query( - "ALTER TABLE git_configs ADD CONSTRAINT git_configs_workspace_unique UNIQUE (workspace_id)", - ) - .execute(&mut *tx) - .await - { - match err { - sqlx::Error::Database(db_err) => { - let is_duplicate = db_err.code().map(|c| c == "42710").unwrap_or(false); - if !is_duplicate { - return Err(sqlx::Error::Database(db_err).into()); - } - } - other => return Err(other.into()), - } - } - - tx.commit().await?; - Ok(()) - } -} - -#[async_trait] -impl GitRepository for SqlxGitRepository { - async fn get_config( - &self, - workspace_id: Uuid, - ) -> anyhow::Result< - Option<( - Uuid, - String, - String, - String, - bool, - chrono::DateTime, - chrono::DateTime, - )>, - > { - let row = sqlx::query("SELECT id, repository_url, branch_name, auth_type, auto_sync, created_at, updated_at FROM git_configs WHERE workspace_id = $1 LIMIT 1") - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| { - ( - r.get("id"), - r.get("repository_url"), - r.get("branch_name"), - r.get("auth_type"), - r.get("auto_sync"), - r.get("created_at"), - r.get("updated_at"), - ) - })) - } - - async fn upsert_config( - &self, - workspace_id: Uuid, - repository_url: &str, - branch_name: Option<&str>, - auth_type: &str, - auth_data: &serde_json::Value, - auto_sync: Option, - ) -> anyhow::Result<( - Uuid, - String, - String, - String, - bool, - chrono::DateTime, - chrono::DateTime, - )> { - self.ensure_workspace_unique_constraint_ready().await?; - let enc_auth = crypto::encrypt_auth_data(&self.encryption_key, auth_data); - let mut repaired_constraint = false; - loop { - let query = sqlx::query( - r#"INSERT INTO git_configs (workspace_id, repository_url, branch_name, auth_type, auth_data, auto_sync) - VALUES ($1, $2, COALESCE($3, 'main'), $4, $5, COALESCE($6, true)) - ON CONFLICT ON CONSTRAINT git_configs_workspace_unique DO UPDATE SET - repository_url = EXCLUDED.repository_url, - branch_name = EXCLUDED.branch_name, - auth_type = EXCLUDED.auth_type, - auth_data = EXCLUDED.auth_data, - auto_sync = EXCLUDED.auto_sync, - updated_at = now() - RETURNING id, repository_url, branch_name, auth_type, auto_sync, created_at, updated_at"# - ) - .bind(workspace_id) - .bind(repository_url) - .bind(branch_name) - .bind(auth_type) - .bind(&enc_auth) - .bind(auto_sync); - - match query.fetch_one(&self.pool).await { - Ok(row) => { - break Ok(( - row.get("id"), - row.get("repository_url"), - row.get("branch_name"), - row.get("auth_type"), - row.get("auto_sync"), - row.get("created_at"), - row.get("updated_at"), - )); - } - Err(sqlx::Error::Database(db_err)) => { - if !repaired_constraint && is_missing_workspace_unique_error(db_err.as_ref()) { - warn!( - workspace_id = %workspace_id, - "git_configs_workspace_unique_missing_repair" - ); - self.repair_workspace_unique_constraint().await?; - repaired_constraint = true; - continue; - } - break Err(sqlx::Error::Database(db_err).into()); - } - Err(err) => break Err(err.into()), - } - } - } - - async fn delete_config(&self, workspace_id: Uuid) -> anyhow::Result { - let res = sqlx::query("DELETE FROM git_configs WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&self.pool) - .await?; - Ok(res.rows_affected() > 0) - } - - async fn load_user_git_cfg(&self, workspace_id: Uuid) -> anyhow::Result> { - let row = sqlx::query("SELECT repository_url, branch_name, auth_type, auth_data, auto_sync FROM git_configs WHERE workspace_id = $1 LIMIT 1") - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| { - let repository_url: String = r.get("repository_url"); - let branch_name: String = r.get("branch_name"); - let auth_type: Option = r.try_get("auth_type").ok(); - let raw_auth: Option = r.try_get("auth_data").ok(); - let auth_data = raw_auth.map(|v| crypto::decrypt_auth_data(&self.encryption_key, &v)); - let auto_sync: bool = r.try_get("auto_sync").unwrap_or(true); - UserGitCfg { - repository_url, - branch_name, - auth_type, - auth_data, - auto_sync, - } - })) - } - - async fn get_last_sync_log( - &self, - workspace_id: Uuid, - ) -> anyhow::Result< - Option<( - Option>, - Option, - Option, - Option, - )>, - > { - let row = sqlx::query("SELECT status, message, commit_hash, created_at FROM git_sync_logs WHERE workspace_id = $1 ORDER BY created_at DESC LIMIT 1") - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| { - ( - r.try_get("created_at").ok(), - r.try_get("status").ok(), - r.try_get("message").ok(), - r.try_get("commit_hash").ok(), - ) - })) - } - - async fn log_sync_operation( - &self, - workspace_id: Uuid, - operation: &str, - status: &str, - message: Option<&str>, - commit_hash: Option<&str>, - ) -> anyhow::Result<()> { - let _ = sqlx::query("INSERT INTO git_sync_logs (workspace_id, operation, status, message, commit_hash) VALUES ($1, $2, $3, $4, $5)") - .bind(workspace_id) - .bind(operation) - .bind(status) - .bind(message) - .bind(commit_hash) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn delete_sync_logs(&self, workspace_id: Uuid) -> anyhow::Result<()> { - sqlx::query("DELETE FROM git_sync_logs WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn delete_repository_state(&self, workspace_id: Uuid) -> anyhow::Result<()> { - sqlx::query("DELETE FROM git_repository_state WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn list_auto_sync_workspaces(&self) -> anyhow::Result> { - let rows = sqlx::query( - "SELECT workspace_id FROM git_configs WHERE auto_sync IS DISTINCT FROM false", - ) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .filter_map(|r| r.try_get("workspace_id").ok()) - .collect()) - } -} - -fn is_missing_workspace_unique_error(err: &dyn DatabaseError) -> bool { - let code_matches = err - .code() - .map(|c| c == "42P10" || c == "42704") - .unwrap_or(false); - code_matches - || err.message().contains( - "there is no unique or exclusion constraint matching the ON CONFLICT specification", - ) - || err.message().contains( - "constraint \"git_configs_workspace_unique\" for table \"git_configs\" does not exist", - ) -} diff --git a/api/src/infrastructure/db/repositories/linkgraph_repository_sqlx.rs b/api/src/infrastructure/db/repositories/linkgraph_repository_sqlx.rs deleted file mode 100644 index 347593f3..00000000 --- a/api/src/infrastructure/db/repositories/linkgraph_repository_sqlx.rs +++ /dev/null @@ -1,87 +0,0 @@ -use async_trait::async_trait; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::linkgraph_repository::LinkGraphRepository; -use crate::infrastructure::db::PgPool; - -pub struct SqlxLinkGraphRepository { - pub pool: PgPool, -} - -impl SqlxLinkGraphRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl LinkGraphRepository for SqlxLinkGraphRepository { - async fn clear_links_for_source(&self, source_id: Uuid) -> anyhow::Result<()> { - sqlx::query("DELETE FROM document_links WHERE source_document_id = $1") - .bind(source_id) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn exists_doc_for_owner(&self, doc_id: Uuid, owner_id: Uuid) -> anyhow::Result { - let n = sqlx::query_scalar::<_, i64>( - "SELECT COUNT(1) FROM documents WHERE id = $1 AND owner_id = $2", - ) - .bind(doc_id) - .bind(owner_id) - .fetch_one(&self.pool) - .await?; - Ok(n > 0) - } - - async fn find_doc_id_by_owner_and_title( - &self, - owner_id: Uuid, - title: &str, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT id FROM documents - WHERE owner_id = $1 AND LOWER(title) = LOWER($2) - ORDER BY updated_at DESC LIMIT 1"#, - ) - .bind(owner_id) - .bind(title) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| r.get::("id"))) - } - - async fn upsert_link( - &self, - source_id: Uuid, - target_id: Uuid, - link_type: &str, - link_text: Option, - position_start: i32, - position_end: i32, - ) -> anyhow::Result<()> { - sqlx::query( - r#"INSERT INTO document_links ( - source_document_id, target_document_id, link_type, - link_text, position_start, position_end, created_at, updated_at - ) VALUES ($1, $2, $3, $4, $5, $6, now(), now()) - ON CONFLICT (source_document_id, target_document_id, position_start) - DO UPDATE SET link_type = EXCLUDED.link_type, - link_text = EXCLUDED.link_text, - position_end = EXCLUDED.position_end, - updated_at = now() - "#, - ) - .bind(source_id) - .bind(target_id) - .bind(link_type) - .bind(link_text) - .bind(position_start) - .bind(position_end) - .execute(&self.pool) - .await?; - Ok(()) - } -} diff --git a/api/src/infrastructure/db/repositories/mod.rs b/api/src/infrastructure/db/repositories/mod.rs deleted file mode 100644 index 2841954e..00000000 --- a/api/src/infrastructure/db/repositories/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -pub mod access_repository_sqlx; -pub mod api_token_repository_sqlx; -pub mod document_repository_sqlx; -pub mod document_snapshot_archive_repository_sqlx; -pub mod files_repository_sqlx; -pub mod git_pull_session_repository_sqlx; -pub mod git_repository_sqlx; -pub mod linkgraph_repository_sqlx; -pub mod plugin_installation_repository_sqlx; -pub mod plugin_repository_sqlx; -pub mod public_repository_sqlx; -pub mod shares_repository_sqlx; -pub mod tag_repository_sqlx; -pub mod tagging_repository_sqlx; -pub mod user_repository_sqlx; -pub mod user_session_repository_sqlx; -pub mod user_shortcut_repository_sqlx; -pub mod workspace_repository_sqlx; diff --git a/api/src/infrastructure/db/repositories/plugin_installation_repository_sqlx.rs b/api/src/infrastructure/db/repositories/plugin_installation_repository_sqlx.rs deleted file mode 100644 index b1d48c57..00000000 --- a/api/src/infrastructure/db/repositories/plugin_installation_repository_sqlx.rs +++ /dev/null @@ -1,127 +0,0 @@ -use async_trait::async_trait; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::plugin_installation_repository::{ - PluginInstallation, PluginInstallationRepository, -}; -use crate::infrastructure::db::PgPool; - -pub struct SqlxPluginInstallationRepository { - pub pool: PgPool, -} - -impl SqlxPluginInstallationRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl PluginInstallationRepository for SqlxPluginInstallationRepository { - async fn upsert( - &self, - workspace_id: Uuid, - plugin_id: &str, - version: &str, - scope: &str, - origin_url: Option<&str>, - status: &str, - ) -> anyhow::Result<()> { - sqlx::query( - r#"INSERT INTO plugin_installations - (workspace_id, plugin_id, version, scope, origin_url, status) - VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (workspace_id, plugin_id) - DO UPDATE SET - version = EXCLUDED.version, - scope = EXCLUDED.scope, - origin_url = EXCLUDED.origin_url, - status = EXCLUDED.status, - updated_at = now()"#, - ) - .bind(workspace_id) - .bind(plugin_id) - .bind(version) - .bind(scope) - .bind(origin_url) - .bind(status) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn list_for_workspace( - &self, - workspace_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT workspace_id, plugin_id, version, scope, origin_url, status, installed_at, updated_at - FROM plugin_installations - WHERE workspace_id = $1"#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - - let mut out = Vec::with_capacity(rows.len()); - for row in rows { - out.push(PluginInstallation { - workspace_id: row.get("workspace_id"), - plugin_id: row.get("plugin_id"), - version: row.get("version"), - scope: row.get("scope"), - origin_url: row.try_get("origin_url").ok(), - status: row.get("status"), - installed_at: row.get("installed_at"), - updated_at: row.get("updated_at"), - }); - } - - Ok(out) - } - - async fn list_all(&self) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT workspace_id, plugin_id, version, scope, origin_url, status, installed_at, updated_at - FROM plugin_installations"#, - ) - .fetch_all(&self.pool) - .await?; - - let mut out = Vec::with_capacity(rows.len()); - for row in rows { - out.push(PluginInstallation { - workspace_id: row.get("workspace_id"), - plugin_id: row.get("plugin_id"), - version: row.get("version"), - scope: row.get("scope"), - origin_url: row.try_get("origin_url").ok(), - status: row.get("status"), - installed_at: row.get("installed_at"), - updated_at: row.get("updated_at"), - }); - } - - Ok(out) - } - - async fn remove(&self, workspace_id: Uuid, plugin_id: &str) -> anyhow::Result { - let res = sqlx::query( - "DELETE FROM plugin_installations WHERE workspace_id = $1 AND plugin_id = $2", - ) - .bind(workspace_id) - .bind(plugin_id) - .execute(&self.pool) - .await?; - Ok(res.rows_affected() > 0) - } - - async fn remove_all_for_workspace(&self, workspace_id: Uuid) -> anyhow::Result<()> { - sqlx::query("DELETE FROM plugin_installations WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&self.pool) - .await?; - Ok(()) - } -} diff --git a/api/src/infrastructure/db/repositories/plugin_repository_sqlx.rs b/api/src/infrastructure/db/repositories/plugin_repository_sqlx.rs deleted file mode 100644 index 6e793c51..00000000 --- a/api/src/infrastructure/db/repositories/plugin_repository_sqlx.rs +++ /dev/null @@ -1,214 +0,0 @@ -use async_trait::async_trait; -use serde_json::Value as JsonValue; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::plugin_repository::{PluginRecord, PluginRepository}; -use crate::infrastructure::db::PgPool; - -pub struct SqlxPluginRepository { - pub pool: PgPool, -} - -impl SqlxPluginRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl PluginRepository for SqlxPluginRepository { - async fn kv_get( - &self, - plugin: &str, - scope: &str, - scope_id: Option, - key: &str, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT value FROM plugin_kv WHERE plugin = $1 AND scope = $2 AND scope_id IS NOT DISTINCT FROM $3 AND key = $4"#, - ) - .bind(plugin) - .bind(scope) - .bind(scope_id) - .bind(key) - .fetch_optional(&self.pool) - .await?; - Ok(row.and_then(|r| r.try_get::("value").ok())) - } - - async fn kv_set( - &self, - plugin: &str, - scope: &str, - scope_id: Option, - key: &str, - value: &JsonValue, - ) -> anyhow::Result<()> { - sqlx::query( - r#"INSERT INTO plugin_kv (plugin, scope, scope_id, key, value) - VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (plugin, scope, scope_id, key) - DO UPDATE SET value = EXCLUDED.value, updated_at = now()"#, - ) - .bind(plugin) - .bind(scope) - .bind(scope_id) - .bind(key) - .bind(value) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn insert_record( - &self, - plugin: &str, - scope: &str, - scope_id: Uuid, - kind: &str, - data: &JsonValue, - ) -> anyhow::Result { - let row = sqlx::query( - r#"INSERT INTO plugin_records (plugin, scope, scope_id, kind, data) - VALUES ($1, $2, $3, $4, $5) - RETURNING id, plugin, scope, scope_id, kind, data, created_at, updated_at"#, - ) - .bind(plugin) - .bind(scope) - .bind(scope_id) - .bind(kind) - .bind(data) - .fetch_one(&self.pool) - .await?; - Ok(PluginRecord { - id: row.get("id"), - plugin: row.get("plugin"), - scope: row.get("scope"), - scope_id: row.get("scope_id"), - kind: row.get("kind"), - data: row.get("data"), - created_at: row.get("created_at"), - updated_at: row.get("updated_at"), - }) - } - - async fn update_record_data( - &self, - record_id: Uuid, - patch: &JsonValue, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"UPDATE plugin_records SET data = data || $2::jsonb, updated_at = now() - WHERE id = $1 - RETURNING id, plugin, scope, scope_id, kind, data, created_at, updated_at"#, - ) - .bind(record_id) - .bind(patch) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| PluginRecord { - id: r.get("id"), - plugin: r.get("plugin"), - scope: r.get("scope"), - scope_id: r.get("scope_id"), - kind: r.get("kind"), - data: r.get("data"), - created_at: r.get("created_at"), - updated_at: r.get("updated_at"), - })) - } - - async fn delete_record(&self, record_id: Uuid) -> anyhow::Result { - let res = sqlx::query("DELETE FROM plugin_records WHERE id = $1") - .bind(record_id) - .execute(&self.pool) - .await?; - Ok(res.rows_affected() > 0) - } - - async fn get_record(&self, record_id: Uuid) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT id, plugin, scope, scope_id, kind, data, created_at, updated_at - FROM plugin_records WHERE id = $1"#, - ) - .bind(record_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| PluginRecord { - id: r.get("id"), - plugin: r.get("plugin"), - scope: r.get("scope"), - scope_id: r.get("scope_id"), - kind: r.get("kind"), - data: r.get("data"), - created_at: r.get("created_at"), - updated_at: r.get("updated_at"), - })) - } - - async fn list_records( - &self, - plugin: &str, - scope: &str, - scope_id: Uuid, - kind: &str, - limit: i64, - offset: i64, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT id, plugin, scope, scope_id, kind, data, created_at, updated_at - FROM plugin_records - WHERE plugin = $1 AND scope = $2 AND scope_id = $3 AND kind = $4 - ORDER BY COALESCE((data->>'pinned')::boolean,false) DESC, created_at DESC - LIMIT $5 OFFSET $6"#, - ) - .bind(plugin) - .bind(scope) - .bind(scope_id) - .bind(kind) - .bind(limit) - .bind(offset) - .fetch_all(&self.pool) - .await?; - - let mut out = Vec::with_capacity(rows.len()); - for r in rows { - out.push(PluginRecord { - id: r.get("id"), - plugin: r.get("plugin"), - scope: r.get("scope"), - scope_id: r.get("scope_id"), - kind: r.get("kind"), - data: r.get("data"), - created_at: r.get("created_at"), - updated_at: r.get("updated_at"), - }); - } - Ok(out) - } - - async fn delete_scoped_kv(&self, scope: &str, scope_ids: &[Uuid]) -> anyhow::Result<()> { - if scope_ids.is_empty() { - return Ok(()); - } - sqlx::query("DELETE FROM plugin_kv WHERE scope = $1 AND scope_id = ANY($2)") - .bind(scope) - .bind(scope_ids) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn delete_scoped_records(&self, scope: &str, scope_ids: &[Uuid]) -> anyhow::Result<()> { - if scope_ids.is_empty() { - return Ok(()); - } - sqlx::query("DELETE FROM plugin_records WHERE scope = $1 AND scope_id = ANY($2)") - .bind(scope) - .bind(scope_ids) - .execute(&self.pool) - .await?; - Ok(()) - } -} diff --git a/api/src/infrastructure/db/repositories/public_repository_sqlx.rs b/api/src/infrastructure/db/repositories/public_repository_sqlx.rs deleted file mode 100644 index dd7998a1..00000000 --- a/api/src/infrastructure/db/repositories/public_repository_sqlx.rs +++ /dev/null @@ -1,205 +0,0 @@ -use async_trait::async_trait; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::public_repository::PublicRepository; -use crate::domain::documents::document::Document; -use crate::infrastructure::db::PgPool; - -pub struct SqlxPublicRepository { - pub pool: PgPool, -} - -impl SqlxPublicRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl PublicRepository for SqlxPublicRepository { - async fn ensure_workspace_title_and_slug( - &self, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result> { - let row = sqlx::query( - "SELECT d.title, w.slug as workspace_slug FROM documents d JOIN workspaces w ON d.workspace_id = w.id WHERE d.id = $1 AND d.workspace_id = $2", - ) - .bind(doc_id) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| (r.get("title"), r.get("workspace_slug")))) - } - - async fn upsert_public_document(&self, doc_id: Uuid, slug: &str) -> anyhow::Result<()> { - let _ = sqlx::query("INSERT INTO public_documents (document_id, slug, published_at) VALUES ($1, $2, now()) ON CONFLICT (document_id) DO UPDATE SET slug = EXCLUDED.slug, published_at = now()") - .bind(doc_id) - .bind(slug) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn slug_exists(&self, slug: &str) -> anyhow::Result { - let n = - sqlx::query_scalar::<_, i64>("SELECT COUNT(1) FROM public_documents WHERE slug = $1") - .bind(slug) - .fetch_one(&self.pool) - .await?; - Ok(n > 0) - } - - async fn is_workspace_document( - &self, - doc_id: Uuid, - workspace_id: Uuid, - ) -> anyhow::Result { - let n = sqlx::query_scalar::<_, i64>( - "SELECT COUNT(1) FROM documents WHERE id = $1 AND workspace_id = $2", - ) - .bind(doc_id) - .bind(workspace_id) - .fetch_one(&self.pool) - .await?; - Ok(n > 0) - } - - async fn delete_public_document(&self, doc_id: Uuid) -> anyhow::Result { - let res = sqlx::query("DELETE FROM public_documents WHERE document_id = $1") - .bind(doc_id) - .execute(&self.pool) - .await?; - Ok(res.rows_affected() > 0) - } - - async fn get_publish_status( - &self, - workspace_id: Uuid, - doc_id: Uuid, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT p.slug, w.slug as workspace_slug - FROM public_documents p - JOIN documents d ON p.document_id = d.id - JOIN workspaces w ON d.workspace_id = w.id - WHERE p.document_id = $1 AND d.workspace_id = $2"#, - ) - .bind(doc_id) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| (r.get("slug"), r.get("workspace_slug")))) - } - - async fn list_workspace_public_documents( - &self, - workspace_slug: &str, - ) -> anyhow::Result< - Vec<( - Uuid, - String, - chrono::DateTime, - chrono::DateTime, - )>, - > { - let rows = sqlx::query( - r#"SELECT d.id, d.title, d.updated_at, p.published_at - FROM public_documents p - JOIN documents d ON p.document_id = d.id - JOIN workspaces w ON d.workspace_id = w.id - WHERE w.slug = $1 - OR (w.is_personal AND EXISTS ( - SELECT 1 - FROM users u - WHERE u.id = w.id AND lower(u.name) = lower($1) - )) - ORDER BY d.updated_at DESC LIMIT 200"#, - ) - .bind(workspace_slug) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .map(|r| { - ( - r.get("id"), - r.get("title"), - r.get("updated_at"), - r.get("published_at"), - ) - }) - .collect()) - } - - async fn get_public_meta_by_workspace_and_id( - &self, - workspace_slug: &str, - doc_id: Uuid, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT d.id, d.owner_id, d.owner_user_id, d.workspace_id, d.title, d.parent_id, d.type, d.created_at, d.updated_at, - d.slug, d.desired_path, d.path, d.created_by, d.created_by_plugin, - d.archived_at, d.archived_by, d.archived_parent_id - FROM public_documents p - JOIN documents d ON p.document_id = d.id - JOIN workspaces w ON d.workspace_id = w.id - WHERE (w.slug = $1 - OR (w.is_personal AND EXISTS ( - SELECT 1 - FROM users u - WHERE u.id = w.id AND lower(u.name) = lower($1) - ))) - AND d.id = $2"#, - ) - .bind(workspace_slug) - .bind(doc_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| Document { - id: r.get("id"), - owner_id: r.get("owner_id"), - owner_user_id: r.try_get("owner_user_id").ok(), - workspace_id: r.get("workspace_id"), - title: r.get("title"), - parent_id: r.try_get("parent_id").ok(), - doc_type: r.get("type"), - created_at: r.get("created_at"), - updated_at: r.get("updated_at"), - slug: r.get("slug"), - desired_path: r.get("desired_path"), - path: r.try_get("path").ok(), - created_by: r.try_get("created_by").ok(), - created_by_plugin: r.try_get("created_by_plugin").ok(), - archived_at: r.try_get("archived_at").ok(), - archived_by: r.try_get("archived_by").ok(), - archived_parent_id: r.try_get("archived_parent_id").ok(), - })) - } - - async fn public_exists_by_workspace_and_id( - &self, - workspace_slug: &str, - doc_id: Uuid, - ) -> anyhow::Result { - let n = sqlx::query_scalar::<_, i64>( - r#"SELECT COUNT(1) - FROM public_documents p - JOIN documents d ON p.document_id = d.id - JOIN workspaces w ON d.workspace_id = w.id - WHERE (w.slug = $1 - OR (w.is_personal AND EXISTS ( - SELECT 1 - FROM users u - WHERE u.id = w.id AND lower(u.name) = lower($1) - ))) - AND d.id = $2"#, - ) - .bind(workspace_slug) - .bind(doc_id) - .fetch_one(&self.pool) - .await?; - Ok(n > 0) - } -} diff --git a/api/src/infrastructure/db/repositories/shares_repository_sqlx.rs b/api/src/infrastructure/db/repositories/shares_repository_sqlx.rs deleted file mode 100644 index c4cd5ec5..00000000 --- a/api/src/infrastructure/db/repositories/shares_repository_sqlx.rs +++ /dev/null @@ -1,580 +0,0 @@ -use async_trait::async_trait; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::share_access_port::ShareAccessPort; -use crate::application::ports::shares_repository::{ShareMountRow, ShareRow, SharesRepository}; -use crate::infrastructure::db::PgPool; - -pub struct SqlxSharesRepository { - pub pool: PgPool, -} - -impl SqlxSharesRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } - - async fn fetch_share_resolution( - &self, - token: &str, - ) -> anyhow::Result< - Option<( - Uuid, - String, - Option>, - Uuid, - String, - Uuid, - )>, - > { - let row = sqlx::query( - r#"SELECT s.id as share_id, s.permission, s.expires_at, d.id as shared_id, d.type as shared_type, d.workspace_id - FROM shares s - JOIN documents d ON s.document_id = d.id - WHERE s.token = $1"#, - ) - .bind(token) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| { - ( - r.get("share_id"), - r.get("permission"), - r.try_get("expires_at").ok(), - r.get("shared_id"), - r.get("shared_type"), - r.get("workspace_id"), - ) - })) - } -} - -#[async_trait] -impl SharesRepository for SqlxSharesRepository { - async fn create_share( - &self, - workspace_id: Uuid, - actor_id: Uuid, - document_id: Uuid, - permission: &str, - expires_at: Option>, - ) -> anyhow::Result<(String, Uuid, String)> { - // Verify ownership and type - let dtype: String = - sqlx::query_scalar("SELECT type FROM documents WHERE id = $1 AND workspace_id = $2") - .bind(document_id) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await? - .ok_or_else(|| anyhow::anyhow!("forbidden"))?; - let token = Uuid::new_v4().to_string(); - let row = sqlx::query("INSERT INTO shares (document_id, token, permission, created_by, expires_at) VALUES ($1, $2, $3, $4, $5) RETURNING id, token") - .bind(document_id) - .bind(&token) - .bind(permission) - .bind(actor_id) - .bind(expires_at) - .fetch_one(&self.pool) - .await?; - let token_saved: String = row.get("token"); - let share_id: Uuid = row.get("id"); - if dtype == "folder" { - // Materialize per-document shares for folder subtree - let _created: i64 = sqlx::query_scalar( - r#" - WITH RECURSIVE subtree AS ( - SELECT id, type FROM documents WHERE id = $1 - UNION ALL - SELECT d.id, d.type FROM documents d JOIN subtree sb ON d.parent_id = sb.id - ), - targets AS ( - SELECT id FROM subtree WHERE type <> 'folder' - ), - inserted AS ( - INSERT INTO shares (document_id, token, permission, created_by, expires_at, parent_share_id) - SELECT t.id, gen_random_uuid()::text, $2, $3, $4, $5 - FROM targets t - WHERE NOT EXISTS ( - SELECT 1 - FROM shares s2 - WHERE s2.document_id = t.id - AND s2.parent_share_id = $5 - ) - RETURNING 1 - ) - SELECT COALESCE(COUNT(*),0) FROM inserted - "# - ) - .bind(document_id) - .bind(permission) - .bind(actor_id) - .bind(expires_at) - .bind(share_id) - .fetch_one(&self.pool) - .await?; - } - Ok((token_saved, share_id, dtype)) - } - - async fn list_document_shares( - &self, - workspace_id: Uuid, - document_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT s.id, s.token, s.permission, s.expires_at, s.parent_share_id, s.created_at, - d.id as document_id, d.title as document_title, d.type as document_type - FROM shares s JOIN documents d ON d.id = s.document_id - WHERE s.document_id = $1 AND d.workspace_id = $2 - ORDER BY s.created_at DESC"#, - ) - .bind(document_id) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - let mut out = Vec::with_capacity(rows.len()); - for r in rows.into_iter() { - out.push(ShareRow { - id: r.get("id"), - token: r.get("token"), - permission: r.get("permission"), - expires_at: r.try_get("expires_at").ok(), - parent_share_id: r.try_get("parent_share_id").ok(), - document_id: r.get("document_id"), - document_type: r.get("document_type"), - document_title: r.get("document_title"), - created_at: r.get("created_at"), - }); - } - Ok(out) - } - - async fn delete_share(&self, workspace_id: Uuid, token: &str) -> anyhow::Result { - let res = sqlx::query( - "DELETE FROM shares s USING documents d WHERE s.token = $1 AND s.document_id = d.id AND d.workspace_id = $2", - ) - .bind(token) - .bind(workspace_id) - .execute(&self.pool) - .await?; - let deleted = res.rows_affected() > 0; - if deleted { - // Remove any saved mounts referencing this share token across workspaces - sqlx::query("DELETE FROM share_mounts WHERE share_token = $1") - .bind(token) - .execute(&self.pool) - .await?; - } - Ok(deleted) - } - - async fn list_share_mounts(&self, workspace_id: Uuid) -> anyhow::Result> { - // Clean up mounts whose share token no longer exists or has expired - sqlx::query( - r#" - DELETE FROM share_mounts sm - WHERE sm.workspace_id = $1 - AND NOT EXISTS ( - SELECT 1 - FROM shares s - WHERE s.token = sm.share_token - AND (s.expires_at IS NULL OR s.expires_at > now()) - ) - "#, - ) - .bind(workspace_id) - .execute(&self.pool) - .await?; - - let rows = sqlx::query( - r#"SELECT id, share_token, target_document_id, target_document_type, target_title, permission, parent_folder_id, created_at - FROM share_mounts - WHERE workspace_id = $1 - ORDER BY created_at DESC"#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - let mut out = Vec::with_capacity(rows.len()); - for r in rows.into_iter() { - out.push(ShareMountRow { - id: r.get("id"), - token: r.get("share_token"), - target_document_id: r.get("target_document_id"), - target_document_type: r.get("target_document_type"), - target_title: r.get("target_title"), - permission: r.get("permission"), - parent_folder_id: r.try_get("parent_folder_id").ok(), - created_at: r.get("created_at"), - }); - } - Ok(out) - } - - async fn create_share_mount( - &self, - workspace_id: Uuid, - actor_id: Uuid, - token: &str, - target_document_id: Uuid, - target_document_type: &str, - target_title: &str, - permission: &str, - parent_folder_id: Option, - ) -> anyhow::Result { - if let Some(parent_id) = parent_folder_id { - let exists = sqlx::query_scalar::<_, i64>( - "SELECT 1 FROM documents WHERE id = $1 AND workspace_id = $2 AND type = 'folder'", - ) - .bind(parent_id) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - if exists.is_none() { - anyhow::bail!("invalid_parent"); - } - } - let row = sqlx::query( - r#" - INSERT INTO share_mounts (workspace_id, created_by, share_token, target_document_id, target_document_type, target_title, permission, parent_folder_id) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8) - ON CONFLICT (workspace_id, share_token, target_document_id) - DO UPDATE SET target_title = EXCLUDED.target_title, - permission = EXCLUDED.permission, - parent_folder_id = EXCLUDED.parent_folder_id - RETURNING id, share_token, target_document_id, target_document_type, target_title, permission, parent_folder_id, created_at - "#, - ) - .bind(workspace_id) - .bind(actor_id) - .bind(token) - .bind(target_document_id) - .bind(target_document_type) - .bind(target_title) - .bind(permission) - .bind(parent_folder_id) - .fetch_one(&self.pool) - .await?; - - Ok(ShareMountRow { - id: row.get("id"), - token: row.get("share_token"), - target_document_id: row.get("target_document_id"), - target_document_type: row.get("target_document_type"), - target_title: row.get("target_title"), - permission: row.get("permission"), - parent_folder_id: row.try_get("parent_folder_id").ok(), - created_at: row.get("created_at"), - }) - } - - async fn delete_share_mount(&self, workspace_id: Uuid, mount_id: Uuid) -> anyhow::Result { - let res = sqlx::query("DELETE FROM share_mounts WHERE id = $1 AND workspace_id = $2") - .bind(mount_id) - .bind(workspace_id) - .execute(&self.pool) - .await?; - Ok(res.rows_affected() > 0) - } - - async fn validate_share_token( - &self, - token: &str, - ) -> anyhow::Result>, String)>> { - let row = sqlx::query( - r#"SELECT s.document_id, s.permission, s.expires_at, d.title - FROM shares s JOIN documents d ON d.id = s.document_id - WHERE s.token = $1"#, - ) - .bind(token) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| { - ( - r.get("document_id"), - r.get("permission"), - r.try_get("expires_at").ok(), - r.get("title"), - ) - })) - } - - async fn list_applicable_shares_for_doc( - &self, - workspace_id: Uuid, - doc_id: Uuid, - ) -> anyhow::Result>)>> { - let rows = sqlx::query( - r#"SELECT s.token, s.permission, s.expires_at - FROM shares s - JOIN documents d ON d.id = s.document_id - WHERE s.document_id = $1 AND d.workspace_id = $2"#, - ) - .bind(doc_id) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .map(|r| { - ( - r.get("token"), - r.get("permission"), - r.try_get("expires_at").ok(), - ) - }) - .collect()) - } - - async fn list_active_shares(&self, workspace_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT s.id, s.token, s.permission, s.expires_at, s.created_at, s.parent_share_id, - d.id as document_id, d.title as document_title, d.type as document_type - FROM shares s - JOIN documents d ON d.id = s.document_id - WHERE d.workspace_id = $1 AND (s.expires_at IS NULL OR s.expires_at > now()) - ORDER BY s.created_at DESC"#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - let mut out = Vec::with_capacity(rows.len()); - for r in rows.into_iter() { - out.push(ShareRow { - id: r.get("id"), - token: r.get("token"), - permission: r.get("permission"), - expires_at: r.try_get("expires_at").ok(), - parent_share_id: r.try_get("parent_share_id").ok(), - document_id: r.get("document_id"), - document_type: r.get("document_type"), - document_title: r.get("document_title"), - created_at: r.get("created_at"), - }); - } - Ok(out) - } - - async fn resolve_share_by_token( - &self, - token: &str, - ) -> anyhow::Result< - Option<( - Uuid, - String, - Option>, - Uuid, - String, - Uuid, - )>, - > { - self.fetch_share_resolution(token).await - } - - async fn get_share_document_meta( - &self, - token: &str, - ) -> anyhow::Result> { - let row = sqlx::query( - "SELECT d.id as document_id, d.owner_id, d.workspace_id FROM shares s JOIN documents d ON d.id = s.document_id WHERE s.token = $1", - ) - .bind(token) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| { - ( - r.get("document_id"), - r.get("owner_id"), - r.get("workspace_id"), - ) - })) - } - - async fn list_subtree_nodes( - &self, - root_id: Uuid, - ) -> anyhow::Result< - Vec<( - Uuid, - String, - String, - Option, - chrono::DateTime, - chrono::DateTime, - )>, - > { - let rows = sqlx::query( - r#" - WITH RECURSIVE subtree AS ( - SELECT id, title, type, parent_id, created_at, updated_at FROM documents WHERE id = $1 - UNION ALL - SELECT d.id, d.title, d.type, d.parent_id, d.created_at, d.updated_at - FROM documents d JOIN subtree s ON d.parent_id = s.id - ) - SELECT id, title, type, parent_id, created_at, updated_at FROM subtree - "# - ) - .bind(root_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .map(|r| { - ( - r.get("id"), - r.get("title"), - r.get("type"), - r.try_get("parent_id").ok(), - r.get("created_at"), - r.get("updated_at"), - ) - }) - .collect()) - } - - async fn list_materialized_children(&self, parent_share_id: Uuid) -> anyhow::Result> { - let ids = sqlx::query_scalar("SELECT document_id FROM shares WHERE parent_share_id = $1 AND (expires_at IS NULL OR expires_at > now())") - .bind(parent_share_id) - .fetch_all(&self.pool) - .await?; - Ok(ids) - } - - async fn materialize_folder_share( - &self, - workspace_id: Uuid, - actor_id: Uuid, - token: &str, - ) -> anyhow::Result { - let row = sqlx::query( - r#"SELECT s.id as share_id, s.permission, s.expires_at, d.id as folder_id, d.workspace_id, d.type - FROM shares s JOIN documents d ON d.id = s.document_id - WHERE s.token = $1"# - ) - .bind(token) - .fetch_optional(&self.pool) - .await?; - let row = match row { - Some(r) => r, - None => anyhow::bail!("not_found"), - }; - let workspace: Uuid = row.get("workspace_id"); - if workspace != workspace_id { - anyhow::bail!("forbidden"); - } - let dtype: String = row.get("type"); - if dtype != "folder" { - anyhow::bail!("bad_request"); - } - let folder_id: Uuid = row.get("folder_id"); - let share_id: Uuid = row.get("share_id"); - let permission: String = row.get("permission"); - let expires_at: Option> = row.try_get("expires_at").ok(); - - if let Some(exp) = expires_at { - if exp < chrono::Utc::now() { - anyhow::bail!("not_found"); - } - } - - let created = sqlx::query_scalar::<_, i64>( - r#" - WITH RECURSIVE subtree AS ( - SELECT id, type FROM documents WHERE id = $1 - UNION ALL - SELECT d.id, d.type FROM documents d JOIN subtree sb ON d.parent_id = sb.id - ), - targets AS ( - SELECT id FROM subtree WHERE type <> 'folder' - ), - inserted AS ( - INSERT INTO shares (document_id, token, permission, created_by, expires_at, parent_share_id) - SELECT t.id, gen_random_uuid()::text, $3, $4, $5, $2 - FROM targets t - WHERE NOT EXISTS ( - SELECT 1 - FROM shares s2 - WHERE s2.document_id = t.id - AND s2.parent_share_id = $2 - ) - RETURNING 1 - ) - SELECT COALESCE(COUNT(*),0) FROM inserted - "# - ) - .bind(folder_id) - .bind(share_id) - .bind(&permission) - .bind(actor_id) - .bind(expires_at) - .fetch_one(&self.pool) - .await?; - Ok(created) - } - - async fn revoke_subtree_shares( - &self, - workspace_id: Uuid, - root_id: Uuid, - ) -> anyhow::Result { - let deleted = sqlx::query_scalar::<_, i64>( - r#" - WITH RECURSIVE subtree AS ( - SELECT id FROM documents WHERE id = $1 AND workspace_id = $2 - UNION ALL - SELECT d.id - FROM documents d - JOIN subtree sb ON d.parent_id = sb.id - WHERE d.workspace_id = $2 - ), - removed AS ( - DELETE FROM shares s - USING subtree sb - WHERE s.document_id = sb.id - RETURNING 1 - ) - SELECT COALESCE(COUNT(*), 0) FROM removed - "#, - ) - .bind(root_id) - .bind(workspace_id) - .fetch_one(&self.pool) - .await?; - Ok(deleted) - } -} - -#[async_trait] -impl ShareAccessPort for SqlxSharesRepository { - async fn resolve_share_by_token( - &self, - token: &str, - ) -> anyhow::Result< - Option<( - Uuid, - String, - Option>, - Uuid, - String, - Uuid, - )>, - > { - self.fetch_share_resolution(token).await - } - - async fn get_materialized_permission( - &self, - parent_share_id: Uuid, - doc_id: Uuid, - ) -> anyhow::Result> { - let perm = sqlx::query_scalar::<_, String>( - "SELECT permission FROM shares WHERE parent_share_id = $1 AND document_id = $2 AND (expires_at IS NULL OR expires_at > now())", - ) - .bind(parent_share_id) - .bind(doc_id) - .fetch_optional(&self.pool) - .await?; - Ok(perm) - } -} diff --git a/api/src/infrastructure/db/repositories/tag_repository_sqlx.rs b/api/src/infrastructure/db/repositories/tag_repository_sqlx.rs deleted file mode 100644 index fe4ae100..00000000 --- a/api/src/infrastructure/db/repositories/tag_repository_sqlx.rs +++ /dev/null @@ -1,58 +0,0 @@ -use async_trait::async_trait; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::tag_repository::TagRepository; -use crate::infrastructure::db::PgPool; - -pub struct SqlxTagRepository { - pub pool: PgPool, -} - -impl SqlxTagRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl TagRepository for SqlxTagRepository { - async fn list_tags( - &self, - owner_id: Uuid, - filter: Option, - ) -> anyhow::Result> { - let rows = if let Some(f) = filter.filter(|s| !s.trim().is_empty()) { - let like = format!("%{}%", f); - sqlx::query( - r#"SELECT t.name, COUNT(*)::BIGINT AS count - FROM document_tags dt - JOIN tags t ON t.id = dt.tag_id - JOIN documents d ON d.id = dt.document_id AND d.owner_id = $1 - WHERE t.name ILIKE $2 - GROUP BY t.name - ORDER BY count DESC, t.name ASC"#, - ) - .bind(owner_id) - .bind(like) - .fetch_all(&self.pool) - .await? - } else { - sqlx::query( - r#"SELECT t.name, COUNT(*)::BIGINT AS count - FROM document_tags dt - JOIN tags t ON t.id = dt.tag_id - JOIN documents d ON d.id = dt.document_id AND d.owner_id = $1 - GROUP BY t.name - ORDER BY count DESC, t.name ASC"#, - ) - .bind(owner_id) - .fetch_all(&self.pool) - .await? - }; - Ok(rows - .into_iter() - .map(|r| (r.get("name"), r.get("count"))) - .collect()) - } -} diff --git a/api/src/infrastructure/db/repositories/tagging_repository_sqlx.rs b/api/src/infrastructure/db/repositories/tagging_repository_sqlx.rs deleted file mode 100644 index 8aae44d7..00000000 --- a/api/src/infrastructure/db/repositories/tagging_repository_sqlx.rs +++ /dev/null @@ -1,55 +0,0 @@ -use async_trait::async_trait; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::tagging_repository::TaggingRepository; -use crate::infrastructure::db::PgPool; - -pub struct SqlxTaggingRepository { - pub pool: PgPool, -} - -impl SqlxTaggingRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl TaggingRepository for SqlxTaggingRepository { - async fn clear_document_tags(&self, doc_id: Uuid) -> anyhow::Result<()> { - sqlx::query("DELETE FROM document_tags WHERE document_id = $1") - .bind(doc_id) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn upsert_tag_return_id(&self, name: &str) -> anyhow::Result { - let row = sqlx::query("INSERT INTO tags(name) VALUES ($1) ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name RETURNING id") - .bind(name) - .fetch_one(&self.pool) - .await?; - Ok(row.get("id")) - } - - async fn owner_doc_exists(&self, doc_id: Uuid, owner_id: Uuid) -> anyhow::Result { - let n = sqlx::query_scalar::<_, i64>( - "SELECT COUNT(1) FROM documents WHERE id = $1 AND owner_id = $2", - ) - .bind(doc_id) - .bind(owner_id) - .fetch_one(&self.pool) - .await?; - Ok(n > 0) - } - - async fn associate_document_tag(&self, doc_id: Uuid, tag_id: i64) -> anyhow::Result<()> { - sqlx::query("INSERT INTO document_tags(document_id, tag_id) VALUES ($1, $2)") - .bind(doc_id) - .bind(tag_id) - .execute(&self.pool) - .await?; - Ok(()) - } -} diff --git a/api/src/infrastructure/db/repositories/user_repository_sqlx.rs b/api/src/infrastructure/db/repositories/user_repository_sqlx.rs deleted file mode 100644 index d2966aee..00000000 --- a/api/src/infrastructure/db/repositories/user_repository_sqlx.rs +++ /dev/null @@ -1,131 +0,0 @@ -use async_trait::async_trait; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::user_repository::{UserRepository, UserRow}; -use crate::infrastructure::db::PgPool; - -pub struct SqlxUserRepository { - pub pool: PgPool, -} - -impl SqlxUserRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl UserRepository for SqlxUserRepository { - async fn create_user( - &self, - id: Uuid, - email: &str, - name: &str, - password_hash: Option<&str>, - default_workspace_id: Uuid, - ) -> anyhow::Result { - let row = sqlx::query( - r#"INSERT INTO users (id, email, name, password_hash, default_workspace_id) - VALUES ($1, $2, $3, $4, $5) - RETURNING id, email, name, password_hash"#, - ) - .bind(id) - .bind(email) - .bind(name) - .bind(password_hash) - .bind(default_workspace_id) - .fetch_one(&self.pool) - .await?; - Ok(UserRow { - id: row.get("id"), - email: row.get("email"), - name: row.get("name"), - password_hash: row.try_get("password_hash").ok(), - }) - } - - async fn find_by_email(&self, email: &str) -> anyhow::Result> { - let row = - sqlx::query(r#"SELECT id, email, name, password_hash FROM users WHERE email = $1"#) - .bind(email) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| UserRow { - id: r.get("id"), - email: r.get("email"), - name: r.get("name"), - password_hash: r.try_get("password_hash").ok(), - })) - } - - async fn find_by_external_identity( - &self, - provider: &str, - subject: &str, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT u.id, u.email, u.name, u.password_hash - FROM user_external_accounts a - JOIN users u ON u.id = a.user_id - WHERE a.provider = $1 AND a.subject = $2"#, - ) - .bind(provider) - .bind(subject) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| UserRow { - id: r.get("id"), - email: r.get("email"), - name: r.get("name"), - password_hash: r.try_get("password_hash").ok(), - })) - } - - async fn find_by_id(&self, id: Uuid) -> anyhow::Result> { - let row = sqlx::query(r#"SELECT id, email, name FROM users WHERE id = $1"#) - .bind(id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| UserRow { - id: r.get("id"), - email: r.get("email"), - name: r.get("name"), - password_hash: None, - })) - } - - async fn link_external_identity( - &self, - user_id: Uuid, - provider: &str, - subject: &str, - ) -> anyhow::Result<()> { - sqlx::query( - r#"INSERT INTO user_external_accounts (user_id, provider, subject) - VALUES ($1, $2, $3) - ON CONFLICT (provider, subject) DO UPDATE SET user_id = EXCLUDED.user_id"#, - ) - .bind(user_id) - .bind(provider) - .bind(subject) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn delete_user(&self, id: Uuid) -> anyhow::Result { - let res = sqlx::query("DELETE FROM users WHERE id = $1") - .bind(id) - .execute(&self.pool) - .await?; - Ok(res.rows_affected() > 0) - } - - async fn list_user_ids(&self) -> anyhow::Result> { - let rows = sqlx::query("SELECT id FROM users") - .fetch_all(&self.pool) - .await?; - Ok(rows.into_iter().map(|r| r.get("id")).collect()) - } -} diff --git a/api/src/infrastructure/db/repositories/user_session_repository_sqlx.rs b/api/src/infrastructure/db/repositories/user_session_repository_sqlx.rs deleted file mode 100644 index fb1a2b97..00000000 --- a/api/src/infrastructure/db/repositories/user_session_repository_sqlx.rs +++ /dev/null @@ -1,239 +0,0 @@ -use async_trait::async_trait; -use chrono::{DateTime, Utc}; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::user_session_repository::{ - UserSessionRecord, UserSessionRepository, UserSessionSecret, -}; -use crate::infrastructure::db::PgPool; - -pub struct SqlxUserSessionRepository { - pool: PgPool, -} - -impl SqlxUserSessionRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } - - fn map_record(row: sqlx::postgres::PgRow) -> UserSessionRecord { - UserSessionRecord { - id: row.get("id"), - user_id: row.get("user_id"), - workspace_id: row.get("workspace_id"), - user_agent: row.try_get("user_agent").ok(), - ip_address: row.try_get("ip_address").ok(), - remember_me: row.get("remember_me"), - created_at: row.get("created_at"), - last_seen_at: row.get("last_seen_at"), - expires_at: row.get("expires_at"), - revoked_at: row.try_get("revoked_at").ok(), - } - } -} - -#[async_trait] -impl UserSessionRepository for SqlxUserSessionRepository { - async fn create( - &self, - user_id: Uuid, - workspace_id: Uuid, - token_hash: &str, - token_digest: &str, - expires_at: DateTime, - remember_me: bool, - user_agent: Option<&str>, - ip_address: Option<&str>, - ) -> anyhow::Result { - let row = sqlx::query( - r#"INSERT INTO user_sessions - (user_id, workspace_id, token_hash, token_digest, expires_at, remember_me, user_agent, ip_address) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8) - RETURNING id, user_id, workspace_id, user_agent, ip_address, remember_me, created_at, last_seen_at, expires_at, revoked_at"#, - ) - .bind(user_id) - .bind(workspace_id) - .bind(token_hash) - .bind(token_digest) - .bind(expires_at) - .bind(remember_me) - .bind(user_agent) - .bind(ip_address) - .fetch_one(&self.pool) - .await?; - - Ok(Self::map_record(row)) - } - - async fn find_by_digest( - &self, - token_digest: &str, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT id, user_id, workspace_id, user_agent, ip_address, remember_me, - created_at, last_seen_at, expires_at, revoked_at, token_hash, token_digest - FROM user_sessions - WHERE token_digest = $1 - LIMIT 1"#, - ) - .bind(token_digest) - .fetch_optional(&self.pool) - .await?; - - Ok(row.map(|row| UserSessionSecret { - token_hash: row.get("token_hash"), - token_digest: row.get("token_digest"), - session: Self::map_record(row), - })) - } - - async fn update_token( - &self, - session_id: Uuid, - expected_token_digest: &str, - token_hash: &str, - token_digest: &str, - expires_at: DateTime, - user_agent: Option<&str>, - ip_address: Option<&str>, - workspace_id: Option, - ) -> anyhow::Result { - let row = sqlx::query( - r#"UPDATE user_sessions - SET token_hash = $2, - token_digest = $3, - expires_at = $4, - last_seen_at = now(), - user_agent = $5, - ip_address = $6, - workspace_id = COALESCE($8, workspace_id) - WHERE id = $1 - AND revoked_at IS NULL - AND token_digest = $7 - RETURNING id"#, - ) - .bind(session_id) - .bind(token_hash) - .bind(token_digest) - .bind(expires_at) - .bind(user_agent) - .bind(ip_address) - .bind(expected_token_digest) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - - Ok(row.is_some()) - } - - async fn update_workspace(&self, session_id: Uuid, workspace_id: Uuid) -> anyhow::Result { - let row = sqlx::query( - r#"UPDATE user_sessions - SET workspace_id = $2 - WHERE id = $1 AND revoked_at IS NULL - RETURNING id"#, - ) - .bind(session_id) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - - Ok(row.is_some()) - } - - async fn touch(&self, session_id: Uuid) -> anyhow::Result<()> { - sqlx::query("UPDATE user_sessions SET last_seen_at = now() WHERE id = $1") - .bind(session_id) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn list_for_user(&self, user_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT id, user_id, workspace_id, user_agent, ip_address, remember_me, - created_at, last_seen_at, expires_at, revoked_at - FROM user_sessions - WHERE user_id = $1 - ORDER BY last_seen_at DESC"#, - ) - .bind(user_id) - .fetch_all(&self.pool) - .await?; - - Ok(rows.into_iter().map(Self::map_record).collect()) - } - - async fn find_by_id(&self, session_id: Uuid) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT id, user_id, workspace_id, user_agent, ip_address, remember_me, - created_at, last_seen_at, expires_at, revoked_at - FROM user_sessions - WHERE id = $1 - LIMIT 1"#, - ) - .bind(session_id) - .fetch_optional(&self.pool) - .await?; - - Ok(row.map(Self::map_record)) - } - - async fn revoke(&self, session_id: Uuid) -> anyhow::Result { - let affected = sqlx::query( - r#"UPDATE user_sessions - SET revoked_at = now() - WHERE id = $1 AND revoked_at IS NULL"#, - ) - .bind(session_id) - .execute(&self.pool) - .await?; - Ok(affected.rows_affected() > 0) - } - - async fn revoke_by_digest(&self, token_digest: &str) -> anyhow::Result { - let affected = sqlx::query( - r#"UPDATE user_sessions - SET revoked_at = now() - WHERE token_digest = $1 AND revoked_at IS NULL"#, - ) - .bind(token_digest) - .execute(&self.pool) - .await?; - Ok(affected.rows_affected() > 0) - } - - async fn revoke_all_for_user(&self, user_id: Uuid) -> anyhow::Result<()> { - sqlx::query( - r#"UPDATE user_sessions - SET revoked_at = now() - WHERE user_id = $1 AND revoked_at IS NULL"#, - ) - .bind(user_id) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn delete_expired(&self, before: DateTime, batch_size: i64) -> anyhow::Result { - let rows = sqlx::query( - r#"WITH expired AS ( - SELECT id - FROM user_sessions - WHERE expires_at < $1 - ORDER BY expires_at ASC - LIMIT $2 - ) - DELETE FROM user_sessions - WHERE id IN (SELECT id FROM expired) - RETURNING 1"#, - ) - .bind(before) - .bind(batch_size) - .fetch_all(&self.pool) - .await?; - - Ok(rows.len() as u64) - } -} diff --git a/api/src/infrastructure/db/repositories/user_shortcut_repository_sqlx.rs b/api/src/infrastructure/db/repositories/user_shortcut_repository_sqlx.rs deleted file mode 100644 index 4cfc4827..00000000 --- a/api/src/infrastructure/db/repositories/user_shortcut_repository_sqlx.rs +++ /dev/null @@ -1,70 +0,0 @@ -use async_trait::async_trait; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::user_shortcut_repository::{ - UserShortcutProfile, UserShortcutRepository, -}; -use crate::infrastructure::db::PgPool; - -pub struct SqlxUserShortcutRepository { - pool: PgPool, -} - -impl SqlxUserShortcutRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl UserShortcutRepository for SqlxUserShortcutRepository { - async fn get_by_user(&self, user_id: Uuid) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT user_id, bindings, leader_key, updated_at - FROM user_shortcuts - WHERE user_id = $1 - LIMIT 1"#, - ) - .bind(user_id) - .fetch_optional(&self.pool) - .await?; - - Ok(row.map(|row| UserShortcutProfile { - user_id: row.get("user_id"), - bindings: row.get("bindings"), - leader_key: row.try_get("leader_key").ok(), - updated_at: row.get("updated_at"), - })) - } - - async fn upsert( - &self, - user_id: Uuid, - bindings: serde_json::Value, - leader_key: Option, - ) -> anyhow::Result { - let row = sqlx::query( - r#"INSERT INTO user_shortcuts (user_id, bindings, leader_key, updated_at) - VALUES ($1, $2, $3, now()) - ON CONFLICT (user_id) - DO UPDATE SET - bindings = EXCLUDED.bindings, - leader_key = EXCLUDED.leader_key, - updated_at = now() - RETURNING user_id, bindings, leader_key, updated_at"#, - ) - .bind(user_id) - .bind(bindings) - .bind(leader_key) - .fetch_one(&self.pool) - .await?; - - Ok(UserShortcutProfile { - user_id: row.get("user_id"), - bindings: row.get("bindings"), - leader_key: row.try_get("leader_key").ok(), - updated_at: row.get("updated_at"), - }) - } -} diff --git a/api/src/infrastructure/db/repositories/workspace_repository_sqlx.rs b/api/src/infrastructure/db/repositories/workspace_repository_sqlx.rs deleted file mode 100644 index 7715a02d..00000000 --- a/api/src/infrastructure/db/repositories/workspace_repository_sqlx.rs +++ /dev/null @@ -1,886 +0,0 @@ -use std::collections::HashMap; - -use anyhow::bail; -use async_trait::async_trait; -use chrono::{DateTime, Utc}; -use sqlx::{PgConnection, Row, postgres::PgRow}; -use uuid::Uuid; - -use crate::application::ports::workspace_repository::{ - WorkspaceInvitationRecord, WorkspaceListItem, WorkspaceMemberDetail, WorkspaceMemberRow, - WorkspacePermissionRecord, WorkspaceRepository, WorkspaceRoleRecord, WorkspaceRow, - WorkspaceSetDefaultError, -}; -use crate::infrastructure::db::PgPool; - -pub struct SqlxWorkspaceRepository { - pub pool: PgPool, -} - -impl SqlxWorkspaceRepository { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } - - fn collect_roles(&self, rows: Vec) -> Vec { - let mut map: HashMap = HashMap::new(); - for row in rows { - let role_id: Uuid = row.get("id"); - let entry = map.entry(role_id).or_insert_with(|| WorkspaceRoleRecord { - id: role_id, - workspace_id: row.get("workspace_id"), - name: row.get("name"), - description: row.try_get("description").ok(), - base_role: row.get("base_role"), - priority: row.get("priority"), - overrides: Vec::new(), - }); - if let (Some(permission), Some(allowed)) = ( - row.try_get::, _>("permission") - .ok() - .flatten(), - row.try_get::, _>("allowed").ok().flatten(), - ) { - entry.overrides.push((permission, allowed)); - } - } - map.into_values() - .map(|mut record| { - record.overrides.sort_by(|a, b| a.0.cmp(&b.0)); - record - }) - .collect() - } - - async fn replace_role_permissions_tx( - &self, - tx: &mut PgConnection, - role_id: Uuid, - overrides: &[(String, bool)], - ) -> anyhow::Result<()> { - sqlx::query("DELETE FROM workspace_role_permissions WHERE workspace_role_id = $1") - .bind(role_id) - .execute(&mut *tx) - .await?; - for (permission, allowed) in overrides { - sqlx::query( - r#"INSERT INTO workspace_role_permissions (workspace_role_id, permission, allowed) - VALUES ($1, $2, $3)"#, - ) - .bind(role_id) - .bind(permission) - .bind(allowed) - .execute(&mut *tx) - .await?; - } - Ok(()) - } - - async fn fetch_role_overrides(&self, role_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT permission, allowed - FROM workspace_role_permissions - WHERE workspace_role_id = $1"#, - ) - .bind(role_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .filter_map(|row| { - row.try_get::, _>("permission") - .ok() - .flatten() - .map(|perm| (perm, row.get("allowed"))) - }) - .collect()) - } - - fn map_invitation_row(&self, row: &PgRow) -> WorkspaceInvitationRecord { - WorkspaceInvitationRecord { - id: row.get("id"), - workspace_id: row.get("workspace_id"), - email: row.get("email"), - role_kind: row.get("role_kind"), - system_role: row.try_get("system_role").ok().flatten(), - custom_role_id: row.try_get("custom_role_id").ok().flatten(), - invited_by: row.get("invited_by"), - token: row.get("token"), - expires_at: row - .try_get::>, _>("expires_at") - .ok() - .flatten(), - accepted_by: row.try_get("accepted_by").ok().flatten(), - accepted_at: row - .try_get::>, _>("accepted_at") - .ok() - .flatten(), - revoked_at: row - .try_get::>, _>("revoked_at") - .ok() - .flatten(), - created_at: row.get("created_at"), - } - } -} - -impl From for WorkspaceSetDefaultError { - fn from(err: sqlx::Error) -> Self { - WorkspaceSetDefaultError::Unexpected(err.into()) - } -} - -#[async_trait] -impl WorkspaceRepository for SqlxWorkspaceRepository { - async fn list_for_user(&self, user_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT w.id, - w.name, - w.slug, - w.icon, - w.description, - w.is_personal, - m.role_kind, - m.system_role, - m.custom_role_id, - m.is_default - FROM workspace_members m - JOIN workspaces w ON w.id = m.workspace_id - WHERE m.user_id = $1 - ORDER BY w.created_at"#, - ) - .bind(user_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .map(|r| WorkspaceListItem { - id: r.get("id"), - name: r.get("name"), - slug: r.get("slug"), - icon: r.try_get("icon").ok(), - description: r.try_get("description").ok(), - is_personal: r.get("is_personal"), - role_kind: r.get("role_kind"), - system_role: r.try_get("system_role").ok(), - custom_role_id: r.try_get("custom_role_id").ok(), - is_default: r.get("is_default"), - }) - .collect()) - } - - async fn create_workspace( - &self, - creator_id: Uuid, - name: &str, - slug: &str, - icon: Option<&str>, - description: Option<&str>, - is_personal: bool, - ) -> anyhow::Result { - let row = sqlx::query( - r#"INSERT INTO workspaces (id, name, slug, icon, description, created_by, is_personal) - VALUES (gen_random_uuid(), $1, $2, $3, $4, $5, $6) - RETURNING id, name, slug, icon, description, is_personal"#, - ) - .bind(name) - .bind(slug) - .bind(icon) - .bind(description) - .bind(creator_id) - .bind(is_personal) - .fetch_one(&self.pool) - .await?; - Ok(WorkspaceRow { - id: row.get("id"), - name: row.get("name"), - slug: row.get("slug"), - icon: row.try_get("icon").ok(), - description: row.try_get("description").ok(), - is_personal: row.get("is_personal"), - }) - } - - async fn get_workspace(&self, workspace_id: Uuid) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT id, name, slug, icon, description, is_personal - FROM workspaces - WHERE id = $1"#, - ) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|row| WorkspaceRow { - id: row.get("id"), - name: row.get("name"), - slug: row.get("slug"), - icon: row.try_get("icon").ok(), - description: row.try_get("description").ok(), - is_personal: row.get("is_personal"), - })) - } - - async fn create_workspace_with_id( - &self, - workspace_id: Uuid, - created_by: Option, - name: &str, - slug: &str, - icon: Option<&str>, - description: Option<&str>, - is_personal: bool, - ) -> anyhow::Result { - let row = sqlx::query( - r#"INSERT INTO workspaces (id, name, slug, icon, description, created_by, is_personal) - VALUES ($1, $2, $3, $4, $5, $6, $7) - RETURNING id, name, slug, icon, description, is_personal"#, - ) - .bind(workspace_id) - .bind(name) - .bind(slug) - .bind(icon) - .bind(description) - .bind(created_by) - .bind(is_personal) - .fetch_one(&self.pool) - .await?; - Ok(WorkspaceRow { - id: row.get("id"), - name: row.get("name"), - slug: row.get("slug"), - icon: row.try_get("icon").ok(), - description: row.try_get("description").ok(), - is_personal: row.get("is_personal"), - }) - } - - async fn add_member( - &self, - workspace_id: Uuid, - user_id: Uuid, - role_kind: &str, - system_role: Option<&str>, - custom_role_id: Option, - ) -> anyhow::Result { - let row = sqlx::query( - r#"INSERT INTO workspace_members (workspace_id, user_id, role_kind, system_role, custom_role_id, invited_by) - VALUES ($1, $2, $3, $4, $5, $2) - ON CONFLICT (workspace_id, user_id) DO UPDATE SET - role_kind = EXCLUDED.role_kind, - system_role = EXCLUDED.system_role, - custom_role_id = EXCLUDED.custom_role_id - RETURNING workspace_id, user_id, role_kind, system_role, custom_role_id, is_default"#, - ) - .bind(workspace_id) - .bind(user_id) - .bind(role_kind) - .bind(system_role) - .bind(custom_role_id) - .fetch_one(&self.pool) - .await?; - Ok(WorkspaceMemberRow { - workspace_id: row.get("workspace_id"), - user_id: row.get("user_id"), - role_kind: row.get("role_kind"), - system_role: row.try_get("system_role").ok(), - custom_role_id: row.try_get("custom_role_id").ok(), - is_default: row.get("is_default"), - }) - } - - async fn set_default_workspace( - &self, - user_id: Uuid, - workspace_id: Uuid, - ) -> Result { - let mut tx = self - .pool - .begin() - .await - .map_err(WorkspaceSetDefaultError::from)?; - sqlx::query(r#"UPDATE workspace_members SET is_default = false WHERE user_id = $1"#) - .bind(user_id) - .execute(tx.as_mut()) - .await - .map_err(WorkspaceSetDefaultError::from)?; - - let row = sqlx::query( - r#"UPDATE workspace_members - SET is_default = true - WHERE workspace_id = $1 AND user_id = $2 - RETURNING workspace_id, user_id, role_kind, system_role, custom_role_id, is_default"#, - ) - .bind(workspace_id) - .bind(user_id) - .fetch_optional(tx.as_mut()) - .await - .map_err(WorkspaceSetDefaultError::from)?; - - let Some(row) = row else { - tx.rollback().await.ok(); - return Err(WorkspaceSetDefaultError::MembershipNotFound); - }; - - sqlx::query(r#"UPDATE users SET default_workspace_id = $1 WHERE id = $2"#) - .bind(workspace_id) - .bind(user_id) - .execute(tx.as_mut()) - .await - .map_err(WorkspaceSetDefaultError::from)?; - - tx.commit().await.map_err(WorkspaceSetDefaultError::from)?; - Ok(WorkspaceMemberRow { - workspace_id: row.get("workspace_id"), - user_id: row.get("user_id"), - role_kind: row.get("role_kind"), - system_role: row.try_get("system_role").ok(), - custom_role_id: row.try_get("custom_role_id").ok(), - is_default: row.get("is_default"), - }) - } - - async fn list_members(&self, workspace_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT m.workspace_id, - m.user_id, - m.role_kind, - m.system_role, - m.custom_role_id, - m.is_default, - u.email, - u.name - FROM workspace_members m - JOIN users u ON u.id = m.user_id - WHERE m.workspace_id = $1 - ORDER BY u.name"#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .map(|row| WorkspaceMemberDetail { - workspace_id: row.get("workspace_id"), - user_id: row.get("user_id"), - role_kind: row.get("role_kind"), - system_role: row.try_get("system_role").ok(), - custom_role_id: row.try_get("custom_role_id").ok(), - is_default: row.get("is_default"), - user_email: row.get("email"), - user_name: row.get("name"), - }) - .collect()) - } - - async fn get_member_detail( - &self, - workspace_id: Uuid, - user_id: Uuid, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT m.workspace_id, - m.user_id, - m.role_kind, - m.system_role, - m.custom_role_id, - m.is_default, - u.email, - u.name - FROM workspace_members m - JOIN users u ON u.id = m.user_id - WHERE m.workspace_id = $1 AND m.user_id = $2"#, - ) - .bind(workspace_id) - .bind(user_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|row| WorkspaceMemberDetail { - workspace_id: row.get("workspace_id"), - user_id: row.get("user_id"), - role_kind: row.get("role_kind"), - system_role: row.try_get("system_role").ok(), - custom_role_id: row.try_get("custom_role_id").ok(), - is_default: row.get("is_default"), - user_email: row.get("email"), - user_name: row.get("name"), - })) - } - - async fn update_member_role( - &self, - workspace_id: Uuid, - user_id: Uuid, - role_kind: &str, - system_role: Option<&str>, - custom_role_id: Option, - ) -> anyhow::Result { - let row = sqlx::query( - r#"UPDATE workspace_members - SET role_kind = $3, - system_role = $4, - custom_role_id = $5 - WHERE workspace_id = $1 AND user_id = $2 - RETURNING workspace_id, user_id, role_kind, system_role, custom_role_id, is_default"#, - ) - .bind(workspace_id) - .bind(user_id) - .bind(role_kind) - .bind(system_role) - .bind(custom_role_id) - .fetch_optional(&self.pool) - .await?; - let Some(row) = row else { - bail!("membership_not_found"); - }; - Ok(WorkspaceMemberRow { - workspace_id: row.get("workspace_id"), - user_id: row.get("user_id"), - role_kind: row.get("role_kind"), - system_role: row.try_get("system_role").ok(), - custom_role_id: row.try_get("custom_role_id").ok(), - is_default: row.get("is_default"), - }) - } - - async fn get_member_with_permissions( - &self, - workspace_id: Uuid, - user_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT m.workspace_id, - m.user_id, - m.role_kind, - m.system_role, - m.custom_role_id, - r.base_role, - p.permission, - p.allowed - FROM workspace_members m - LEFT JOIN workspace_roles r ON r.id = m.custom_role_id - LEFT JOIN workspace_role_permissions p ON p.workspace_role_id = r.id - WHERE m.workspace_id = $1 AND m.user_id = $2"#, - ) - .bind(workspace_id) - .bind(user_id) - .fetch_all(&self.pool) - .await?; - - if rows.is_empty() { - return Ok(None); - } - - let first = &rows[0]; - let mut record = WorkspacePermissionRecord { - workspace_id: first.get("workspace_id"), - user_id: first.get("user_id"), - role_kind: first.get("role_kind"), - system_role: first.try_get("system_role").ok(), - custom_role_id: first.try_get("custom_role_id").ok(), - custom_base_role: first.try_get("base_role").ok(), - overrides: Vec::new(), - }; - - for row in rows { - if let (Some(permission), Some(allowed)) = ( - row.try_get::, _>("permission") - .ok() - .flatten(), - row.try_get::, _>("allowed").ok().flatten(), - ) { - record.overrides.push((permission, allowed)); - } - } - - Ok(Some(record)) - } - - async fn count_system_role_members( - &self, - workspace_id: Uuid, - system_role: &str, - ) -> anyhow::Result { - let count = sqlx::query_scalar( - r#"SELECT COUNT(1)::BIGINT - FROM workspace_members - WHERE workspace_id = $1 - AND role_kind = 'system' - AND system_role = $2"#, - ) - .bind(workspace_id) - .bind(system_role) - .fetch_one(&self.pool) - .await?; - Ok(count) - } - - async fn list_roles(&self, workspace_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT r.id, - r.workspace_id, - r.name, - r.description, - r.base_role, - r.priority, - p.permission, - p.allowed - FROM workspace_roles r - LEFT JOIN workspace_role_permissions p ON p.workspace_role_id = r.id - WHERE r.workspace_id = $1 - ORDER BY r.priority, r.created_at"#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - Ok(self.collect_roles(rows)) - } - - async fn create_role( - &self, - workspace_id: Uuid, - name: &str, - base_role: &str, - description: Option<&str>, - priority: i32, - overrides: &[(String, bool)], - ) -> anyhow::Result { - let mut tx = self.pool.begin().await?; - let row = sqlx::query( - r#"INSERT INTO workspace_roles (workspace_id, name, base_role, description, priority) - VALUES ($1, $2, $3, $4, $5) - RETURNING id, workspace_id, name, description, base_role, priority"#, - ) - .bind(workspace_id) - .bind(name) - .bind(base_role) - .bind(description) - .bind(priority) - .fetch_one(tx.as_mut()) - .await?; - let role_id: Uuid = row.get("id"); - self.replace_role_permissions_tx(tx.as_mut(), role_id, overrides) - .await?; - tx.commit().await?; - Ok(WorkspaceRoleRecord { - id: role_id, - workspace_id: row.get("workspace_id"), - name: row.get("name"), - description: row.try_get("description").ok(), - base_role: row.get("base_role"), - priority: row.get("priority"), - overrides: overrides.to_vec(), - }) - } - - async fn update_role( - &self, - workspace_id: Uuid, - role_id: Uuid, - name: Option<&str>, - base_role: Option<&str>, - description: Option<&str>, - priority: Option, - overrides: Option<&[(String, bool)]>, - ) -> anyhow::Result { - let mut tx = self.pool.begin().await?; - let row = sqlx::query( - r#"UPDATE workspace_roles - SET name = COALESCE($3, name), - base_role = COALESCE($4, base_role), - description = COALESCE($5, description), - priority = COALESCE($6, priority) - WHERE id = $2 AND workspace_id = $1 - RETURNING id, workspace_id, name, description, base_role, priority"#, - ) - .bind(workspace_id) - .bind(role_id) - .bind(name) - .bind(base_role) - .bind(description) - .bind(priority) - .fetch_optional(tx.as_mut()) - .await?; - let Some(row) = row else { - bail!("role_not_found"); - }; - if let Some(overrides) = overrides { - self.replace_role_permissions_tx(tx.as_mut(), role_id, overrides) - .await?; - } - tx.commit().await?; - let overrides_vec = if let Some(overrides) = overrides { - overrides.to_vec() - } else { - self.fetch_role_overrides(role_id).await? - }; - Ok(WorkspaceRoleRecord { - id: row.get("id"), - workspace_id: row.get("workspace_id"), - name: row.get("name"), - description: row.try_get("description").ok(), - base_role: row.get("base_role"), - priority: row.get("priority"), - overrides: overrides_vec, - }) - } - - async fn delete_role(&self, workspace_id: Uuid, role_id: Uuid) -> anyhow::Result { - let result = sqlx::query( - r#"DELETE FROM workspace_roles - WHERE id = $1 AND workspace_id = $2"#, - ) - .bind(role_id) - .bind(workspace_id) - .execute(&self.pool) - .await?; - Ok(result.rows_affected() > 0) - } - - async fn delete_workspace(&self, workspace_id: Uuid) -> anyhow::Result { - let result = sqlx::query("DELETE FROM workspaces WHERE id = $1") - .bind(workspace_id) - .execute(&self.pool) - .await?; - Ok(result.rows_affected() > 0) - } - - async fn get_role( - &self, - workspace_id: Uuid, - role_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT r.id, - r.workspace_id, - r.name, - r.description, - r.base_role, - r.priority, - p.permission, - p.allowed - FROM workspace_roles r - LEFT JOIN workspace_role_permissions p ON p.workspace_role_id = r.id - WHERE r.workspace_id = $1 AND r.id = $2"#, - ) - .bind(workspace_id) - .bind(role_id) - .fetch_all(&self.pool) - .await?; - let mut roles = self.collect_roles(rows); - Ok(roles.pop()) - } - - async fn delete_member(&self, workspace_id: Uuid, user_id: Uuid) -> anyhow::Result { - let result = sqlx::query( - r#"DELETE FROM workspace_members - WHERE workspace_id = $1 AND user_id = $2"#, - ) - .bind(workspace_id) - .bind(user_id) - .execute(&self.pool) - .await?; - Ok(result.rows_affected() > 0) - } - - async fn update_workspace( - &self, - workspace_id: Uuid, - name: Option<&str>, - icon: Option<&str>, - description: Option<&str>, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"UPDATE workspaces - SET name = COALESCE($2, name), - icon = COALESCE($3, icon), - description = COALESCE($4, description), - updated_at = now() - WHERE id = $1 - RETURNING id, name, slug, icon, description, is_personal"#, - ) - .bind(workspace_id) - .bind(name) - .bind(icon) - .bind(description) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|row| WorkspaceRow { - id: row.get("id"), - name: row.get("name"), - slug: row.get("slug"), - icon: row.try_get("icon").ok(), - description: row.try_get("description").ok(), - is_personal: row.get("is_personal"), - })) - } - - async fn create_invitation( - &self, - workspace_id: Uuid, - email: &str, - role_kind: &str, - system_role: Option<&str>, - custom_role_id: Option, - invited_by: Uuid, - token: &str, - expires_at: Option>, - ) -> anyhow::Result { - let row = sqlx::query( - r#"INSERT INTO workspace_invitations ( - workspace_id, - email, - role_kind, - system_role, - custom_role_id, - invited_by, - token, - expires_at - ) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8) - RETURNING id, workspace_id, email, role_kind, system_role, custom_role_id, - invited_by, token, expires_at, accepted_by, accepted_at, revoked_at, - created_at"#, - ) - .bind(workspace_id) - .bind(email) - .bind(role_kind) - .bind(system_role) - .bind(custom_role_id) - .bind(invited_by) - .bind(token) - .bind(expires_at) - .fetch_one(&self.pool) - .await?; - Ok(self.map_invitation_row(&row)) - } - - async fn list_invitations( - &self, - workspace_id: Uuid, - ) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT id, workspace_id, email, role_kind, system_role, custom_role_id, - invited_by, token, expires_at, accepted_by, accepted_at, revoked_at, - created_at - FROM workspace_invitations - WHERE workspace_id = $1 - ORDER BY created_at DESC"#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - Ok(rows - .into_iter() - .map(|row| self.map_invitation_row(&row)) - .collect()) - } - - async fn accept_invitation( - &self, - token: &str, - user_id: Uuid, - user_email: &str, - ) -> anyhow::Result { - let mut tx = self.pool.begin().await?; - let row = sqlx::query( - r#"SELECT id, workspace_id, email, role_kind, system_role, custom_role_id, - invited_by, token, expires_at, accepted_by, accepted_at, revoked_at, - created_at - FROM workspace_invitations - WHERE token = $1 - FOR UPDATE"#, - ) - .bind(token) - .fetch_optional(tx.as_mut()) - .await?; - - let Some(row) = row else { - bail!("invitation_not_found"); - }; - let mut record = self.map_invitation_row(&row); - if record.revoked_at.is_some() { - bail!("invitation_revoked"); - } - if record.accepted_at.is_some() { - bail!("invitation_already_accepted"); - } - if record - .expires_at - .is_some_and(|expires| expires < Utc::now()) - { - bail!("invitation_expired"); - } - if record.email.trim().to_lowercase() != user_email.trim().to_lowercase() { - bail!("invitation_email_mismatch"); - } - - let now = Utc::now(); - sqlx::query( - r#"UPDATE workspace_invitations - SET accepted_by = $2, accepted_at = $3 - WHERE id = $1"#, - ) - .bind(record.id) - .bind(user_id) - .bind(now) - .execute(tx.as_mut()) - .await?; - - sqlx::query( - r#"INSERT INTO workspace_members ( - workspace_id, - user_id, - role_kind, - system_role, - custom_role_id, - invited_by, - is_default - ) - VALUES ($1, $2, $3, $4, $5, $6, false) - ON CONFLICT (workspace_id, user_id) DO UPDATE SET - role_kind = EXCLUDED.role_kind, - system_role = EXCLUDED.system_role, - custom_role_id = EXCLUDED.custom_role_id"#, - ) - .bind(record.workspace_id) - .bind(user_id) - .bind(record.role_kind.clone()) - .bind(record.system_role.clone()) - .bind(record.custom_role_id) - .bind(record.invited_by) - .execute(tx.as_mut()) - .await?; - - tx.commit().await?; - record.accepted_by = Some(user_id); - record.accepted_at = Some(now); - Ok(record) - } - - async fn revoke_invitation( - &self, - workspace_id: Uuid, - invitation_id: Uuid, - ) -> anyhow::Result> { - let row = sqlx::query( - r#"UPDATE workspace_invitations - SET revoked_at = now() - WHERE id = $1 AND workspace_id = $2 AND revoked_at IS NULL AND accepted_at IS NULL - RETURNING id, workspace_id, email, role_kind, system_role, custom_role_id, - invited_by, token, expires_at, accepted_by, accepted_at, revoked_at, - created_at"#, - ) - .bind(invitation_id) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|row| self.map_invitation_row(&row))) - } - - async fn list_all_workspace_ids(&self) -> anyhow::Result> { - let rows = sqlx::query("SELECT id FROM workspaces ORDER BY created_at") - .fetch_all(&self.pool) - .await?; - Ok(rows.into_iter().map(|row| row.get("id")).collect()) - } -} diff --git a/api/src/infrastructure/documents/doc_event_log.rs b/api/src/infrastructure/documents/doc_event_log.rs deleted file mode 100644 index 6da1ed71..00000000 --- a/api/src/infrastructure/documents/doc_event_log.rs +++ /dev/null @@ -1,64 +0,0 @@ -use async_trait::async_trait; -use serde_json::Value; -use uuid::Uuid; - -use crate::application::ports::doc_event_log::DocEventLog; -use crate::infrastructure::db::PgPool; - -pub struct PgDocEventLog { - pool: PgPool, -} - -impl PgDocEventLog { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl DocEventLog for PgDocEventLog { - async fn append( - &self, - workspace_id: Uuid, - doc_id: Uuid, - event_type: &str, - payload: Option, - ) -> anyhow::Result<()> { - sqlx::query( - r#" - INSERT INTO doc_events (workspace_id, doc_id, event_type, payload) - VALUES ($1, $2, $3, $4) - "#, - ) - .bind(workspace_id) - .bind(doc_id) - .bind(event_type) - .bind(payload) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn append_tx( - &self, - tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - workspace_id: Uuid, - doc_id: Uuid, - event_type: &str, - payload: Option, - ) -> anyhow::Result<()> { - sqlx::query( - r#" - INSERT INTO doc_events (workspace_id, doc_id, event_type, payload) - VALUES ($1, $2, $3, $4) - "#, - ) - .bind(workspace_id) - .bind(doc_id) - .bind(event_type) - .bind(payload) - .execute(tx.as_mut()) - .await?; - Ok(()) - } -} diff --git a/api/src/infrastructure/git/workspace.rs b/api/src/infrastructure/git/workspace.rs deleted file mode 100644 index 67a39c50..00000000 --- a/api/src/infrastructure/git/workspace.rs +++ /dev/null @@ -1,4745 +0,0 @@ -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -use std::fs; -use std::io::{self, ErrorKind, Write}; -use std::path::{Path, PathBuf}; -use std::sync::Arc; - -use anyhow::{Context, anyhow}; -use async_trait::async_trait; -use chrono::{DateTime, Utc}; -use futures_util::StreamExt; -use git2::{ - CertificateCheckStatus, Commit, Cred, Error as GitError, ErrorClass, FetchOptions, FileMode, - Indexer, ObjectType, PushOptions, RemoteCallbacks, Repository, Signature, Sort, Time, - TreeWalkMode, TreeWalkResult, -}; -use sqlx::{Row, types::Json}; -use tempfile::{Builder as TempDirBuilder, TempDir}; -use tracing::{error, info, warn}; -use uuid::Uuid; - -use crate::application::dto::diff::TextDiffResult; -use crate::application::dto::git::{ - GitChangeItem, GitCommitInfo, GitImportOutcome, GitPullConflictItemDto, GitPullRequestDto, - GitPullResultDto, GitRemoteCheckDto, GitSyncOutcome, GitSyncRequestDto, GitWorkspaceStatus, -}; -use crate::application::ports::document_repository::DocumentRepository; -use crate::application::ports::git_repository::UserGitCfg; -use crate::application::ports::git_storage::{ - BlobKey, CommitMeta, GitStorage, decode_commit_id, encode_commit_id, -}; -use crate::application::ports::git_workspace::GitWorkspacePort; -use crate::application::ports::realtime_port::RealtimeEngine; -use crate::application::ports::storage_port::StorageResolverPort; -use crate::application::services::diff::text_diff::compute_text_diff; -use crate::application::services::realtime::snapshot::{SnapshotService, snapshot_from_markdown}; -use crate::application::utils::hash::sha256_hex; -use crate::infrastructure::db::PgPool; -use tokio::fs as async_fs; - -pub struct GitWorkspaceService { - pool: PgPool, - git_storage: Arc, - storage: Arc, - snapshot: Arc, - realtime: Arc, - docs: Arc, -} - -impl GitWorkspaceService { - pub fn new( - pool: PgPool, - git_storage: Arc, - storage: Arc, - snapshot: Arc, - realtime: Arc, - docs: Arc, - ) -> anyhow::Result { - Ok(Self { - pool, - git_storage, - storage, - snapshot, - realtime, - docs, - }) - } - - fn is_missing_objects(err: &anyhow::Error) -> bool { - let msg = err.to_string().to_lowercase(); - msg.contains("missing objects") || msg.contains("packfile is missing") - } - - async fn recover_missing_objects( - &self, - workspace_id: Uuid, - cfg: &UserGitCfg, - ) -> anyhow::Result<()> { - // Pick branch from cfg or fallback to repository state default. - let branch = if cfg.branch_name.is_empty() { - self.load_repository_state(workspace_id) - .await? - .map(|(_, default_branch)| default_branch) - .unwrap_or_else(|| "main".to_string()) - } else { - cfg.branch_name.clone() - }; - - let mut tx = self.pool.begin().await?; - sqlx::query("DELETE FROM git_dirty_files WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&mut *tx) - .await?; - sqlx::query("DELETE FROM git_commits WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&mut *tx) - .await?; - sqlx::query( - "UPDATE git_repository_state SET initialized = true, default_branch = $2, updated_at = now() WHERE workspace_id = $1", - ) - .bind(workspace_id) - .bind(&branch) - .execute(&mut *tx) - .await?; - tx.commit().await?; - - let _ = self.git_storage.delete_all(workspace_id).await; - let _ = self.git_storage.set_latest_commit(workspace_id, None).await; - - // Re-bootstrap remote history (best effort). - let _ = self - .bootstrap_remote_history(workspace_id, cfg, branch.as_str()) - .await; - Ok(()) - } - - async fn load_repository_state( - &self, - workspace_id: Uuid, - ) -> anyhow::Result> { - let row = sqlx::query( - "SELECT initialized, default_branch FROM git_repository_state WHERE workspace_id = $1", - ) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|r| (r.get("initialized"), r.get("default_branch")))) - } - - async fn latest_commit_meta(&self, workspace_id: Uuid) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT commit_id, parent_commit_id, message, author_name, author_email, - committed_at, pack_key, file_hash_index - FROM git_commits - WHERE workspace_id = $1 - ORDER BY committed_at DESC - LIMIT 1"#, - ) - .bind(workspace_id) - .fetch_optional(&self.pool) - .await?; - - row.map(|r| row_to_commit_meta(r)).transpose() - } - - async fn load_commit_meta_ref( - &self, - workspace_id: Uuid, - rev: &str, - ) -> anyhow::Result> { - if let Some(base) = rev.strip_suffix('^') { - let Some(meta) = self.commit_meta_by_hex(workspace_id, base).await? else { - return Ok(None); - }; - if let Some(parent_id) = meta.parent_commit_id.clone() { - return self - .commit_meta_by_id(workspace_id, parent_id.as_slice()) - .await; - } - return Ok(None); - } - self.commit_meta_by_hex(workspace_id, rev).await - } - - async fn commit_meta_by_id( - &self, - workspace_id: Uuid, - commit_id: &[u8], - ) -> anyhow::Result> { - let row = sqlx::query( - r#"SELECT commit_id, parent_commit_id, message, author_name, author_email, - committed_at, pack_key, file_hash_index - FROM git_commits - WHERE workspace_id = $1 AND commit_id = $2 - LIMIT 1"#, - ) - .bind(workspace_id) - .bind(commit_id) - .fetch_optional(&self.pool) - .await?; - row.map(|row| row_to_commit_meta(row)).transpose() - } - - async fn commit_meta_by_hex( - &self, - workspace_id: Uuid, - hex: &str, - ) -> anyhow::Result> { - let bytes = crate::application::ports::git_storage::decode_commit_id(hex)?; - let row = sqlx::query( - r#"SELECT commit_id, parent_commit_id, message, author_name, author_email, - committed_at, pack_key, file_hash_index - FROM git_commits - WHERE workspace_id = $1 AND commit_id = $2 - LIMIT 1"#, - ) - .bind(workspace_id) - .bind(bytes) - .fetch_optional(&self.pool) - .await?; - row.map(|r| row_to_commit_meta(r)).transpose() - } - - async fn ensure_latest_meta(&self, workspace_id: Uuid) -> anyhow::Result> { - if let Some(meta) = self.latest_commit_meta(workspace_id).await? { - return Ok(Some(meta)); - } - let Some(storage_latest) = self.git_storage.latest_commit(workspace_id).await? else { - return Ok(None); - }; - info!(workspace_id = %workspace_id, commit = %encode_commit_id(&storage_latest.commit_id), "git_backfill_latest_from_storage"); - self.backfill_commits_from_storage(workspace_id, &storage_latest) - .await?; - Ok(Some(storage_latest)) - } - - async fn bootstrap_remote_history( - &self, - workspace_id: Uuid, - cfg: &UserGitCfg, - branch: &str, - ) -> anyhow::Result> { - let temp_dir = TempDirBuilder::new() - .prefix("git-bootstrap-") - .tempdir() - .map_err(|e| anyhow!(e))?; - let repo = Repository::init_bare(temp_dir.path())?; - - let Some(remote_head) = fetch_remote_head(&repo, cfg, branch)? else { - return Ok(None); - }; - - let ordered = { - let mut revwalk = repo.revwalk()?; - revwalk.push(remote_head)?; - revwalk.set_sorting(Sort::TOPOLOGICAL | Sort::REVERSE)?; - - let mut collected = Vec::new(); - for oid_result in revwalk { - collected.push(oid_result?); - } - collected - }; - - if ordered.is_empty() { - return Ok(None); - } - - let pack_bytes_master = read_first_pack(repo.path())?.ok_or_else(|| { - anyhow!( - "remote fetch produced no pack files for workspace {}", - workspace_id - ) - })?; - - let mut latest_meta = self.git_storage.latest_commit(workspace_id).await?; - - for oid in ordered { - let existing_meta = self.commit_meta_by_id(workspace_id, oid.as_bytes()).await?; - let existing_pack = self - .git_storage - .fetch_pack_for_commit(workspace_id, oid.as_bytes()) - .await?; - // Skip only when both DB row and pack already exist. - if existing_meta.is_some() && existing_pack.is_some() { - latest_meta = existing_meta; - continue; - } - - let (meta, snapshots, pack_bytes) = { - let commit = repo.find_commit(oid)?; - let committed_at = git_time_to_datetime(commit.time())?; - let message = commit - .message() - .map(|m| m.trim_end_matches('\n').to_string()) - .filter(|m| !m.trim().is_empty()); - let author = commit.author(); - let author_name = author.name().map(|s| s.to_string()); - let author_email = author.email().map(|s| s.to_string()); - let parent_commit_id = if commit.parent_count() > 0 { - let parent = commit.parent_id(0)?; - Some(parent.as_bytes().to_vec()) - } else { - None - }; - - let files = read_commit_files(&repo, oid.as_bytes())?; - let mut snapshots: HashMap = HashMap::new(); - let mut file_hash_index: HashMap = HashMap::new(); - for (path, bytes) in files.into_iter() { - let hash = sha256_hex(&bytes); - let is_text = std::str::from_utf8(&bytes).is_ok(); - file_hash_index.insert(path.clone(), hash.clone()); - snapshots.insert( - path, - FileSnapshot { - hash, - data: FileSnapshotData::Inline(bytes), - is_text, - }, - ); - } - - let pack_builder = repo.packbuilder()?; - // Use the full remote pack for every commit to avoid thin-pack corruption. - let pack_bytes = pack_bytes_master.clone(); - drop(pack_builder); - - let commit_id = oid.as_bytes().to_vec(); - let pack_key = format!( - "git/packs/{}/{}.pack", - workspace_id, - encode_commit_id(&commit_id) - ); - - let meta = CommitMeta { - commit_id, - parent_commit_id, - message, - author_name, - author_email, - committed_at, - pack_key, - file_hash_index, - }; - - (meta, snapshots, pack_bytes) - }; - - let prev_latest = latest_meta.clone(); - let snapshot_keys = match self - .store_commit_snapshots(workspace_id, &meta.commit_id, &snapshots) - .await - { - Ok(keys) => keys, - Err(err) => { - return Err(err); - } - }; - - if let Err(err) = self - .git_storage - .store_pack(workspace_id, &pack_bytes, &meta) - .await - { - for key in snapshot_keys.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - return Err(err); - } - - if let Err(err) = self - .git_storage - .set_latest_commit(workspace_id, Some(&meta)) - .await - { - let _ = self - .git_storage - .delete_pack(workspace_id, &meta.commit_id) - .await; - for key in snapshot_keys.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - let _ = self - .git_storage - .set_latest_commit(workspace_id, prev_latest.as_ref()) - .await; - return Err(err); - } - - let mut tx = self.pool.begin().await?; - let upsert_res = sqlx::query( - r#"INSERT INTO git_commits ( - commit_id, - parent_commit_id, - workspace_id, - message, - author_name, - author_email, - committed_at, - pack_key, - file_hash_index - ) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9) - ON CONFLICT (workspace_id, commit_id) DO UPDATE SET - parent_commit_id = EXCLUDED.parent_commit_id, - message = EXCLUDED.message, - author_name = EXCLUDED.author_name, - author_email = EXCLUDED.author_email, - committed_at = EXCLUDED.committed_at, - pack_key = EXCLUDED.pack_key, - file_hash_index = EXCLUDED.file_hash_index"#, - ) - .bind(meta.commit_id.clone()) - .bind(meta.parent_commit_id.clone()) - .bind(workspace_id) - .bind(meta.message.clone()) - .bind(meta.author_name.clone()) - .bind(meta.author_email.clone()) - .bind(meta.committed_at) - .bind(meta.pack_key.clone()) - .bind(Json(&meta.file_hash_index)) - .execute(&mut *tx) - .await; - - if let Err(err) = upsert_res { - tx.rollback().await.ok(); - let _ = self - .git_storage - .delete_pack(workspace_id, &meta.commit_id) - .await; - for key in snapshot_keys.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - let _ = self - .git_storage - .set_latest_commit(workspace_id, prev_latest.as_ref()) - .await; - return Err(err.into()); - } - - if let Err(err) = sqlx::query( - "UPDATE git_repository_state SET updated_at = now() WHERE workspace_id = $1", - ) - .bind(workspace_id) - .execute(&mut *tx) - .await - { - tx.rollback().await.ok(); - let _ = self - .git_storage - .delete_pack(workspace_id, &meta.commit_id) - .await; - for key in snapshot_keys.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - let _ = self - .git_storage - .set_latest_commit(workspace_id, prev_latest.as_ref()) - .await; - return Err(err.into()); - } - - if let Err(err) = tx.commit().await { - let _ = self - .git_storage - .delete_pack(workspace_id, &meta.commit_id) - .await; - for key in snapshot_keys.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - let _ = self - .git_storage - .set_latest_commit(workspace_id, prev_latest.as_ref()) - .await; - return Err(err.into()); - } - - latest_meta = Some(meta); - } - - drop(repo); - let _ = temp_dir.close(); - self.git_storage.latest_commit(workspace_id).await - } - - async fn backfill_commits_from_storage( - &self, - workspace_id: Uuid, - latest: &CommitMeta, - ) -> anyhow::Result<()> { - let mut pending = Vec::new(); - let mut cursor = Some(latest.clone()); - while let Some(meta) = cursor { - if self - .commit_meta_by_id(workspace_id, meta.commit_id.as_slice()) - .await? - .is_some() - { - break; - } - pending.push(meta.clone()); - cursor = match meta.parent_commit_id.clone() { - Some(parent) => { - self.git_storage - .commit_meta(workspace_id, parent.as_slice()) - .await? - } - None => None, - }; - } - if pending.is_empty() { - return Ok(()); - } - pending.reverse(); - let mut tx = self.pool.begin().await?; - for meta in pending.into_iter() { - sqlx::query( - r#"INSERT INTO git_commits ( - commit_id, - parent_commit_id, - workspace_id, - message, - author_name, - author_email, - committed_at, - pack_key, - file_hash_index - ) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9) - ON CONFLICT (workspace_id, commit_id) DO NOTHING"#, - ) - .bind(meta.commit_id.clone()) - .bind(meta.parent_commit_id.clone()) - .bind(workspace_id) - .bind(meta.message.clone()) - .bind(meta.author_name.clone()) - .bind(meta.author_email.clone()) - .bind(meta.committed_at) - .bind(meta.pack_key.clone()) - .bind(Json(&meta.file_hash_index)) - .execute(&mut *tx) - .await?; - } - tx.commit().await?; - Ok(()) - } - - async fn collect_commit_chain( - &self, - workspace_id: Uuid, - start: CommitMeta, - ) -> anyhow::Result> { - let mut chain = Vec::new(); - let mut cursor = Some(start); - while let Some(meta) = cursor { - chain.push(meta.clone()); - cursor = match meta.parent_commit_id.clone() { - Some(parent) => { - self.commit_meta_by_id(workspace_id, parent.as_slice()) - .await? - } - None => None, - }; - } - Ok(chain) - } - - async fn remove_commits( - &self, - workspace_id: Uuid, - commits: &[CommitMeta], - ) -> anyhow::Result<()> { - for meta in commits { - let commit_hex = encode_commit_id(&meta.commit_id); - if let Err(error) = self - .git_storage - .delete_pack(workspace_id, &meta.commit_id) - .await - { - warn!( - workspace_id = %workspace_id, - commit = %commit_hex, - error = ?error, - "git_commit_cleanup_pack_failed" - ); - } - for path in meta.file_hash_index.keys() { - let key = blob_key(workspace_id, &meta.commit_id, path); - if let Err(error) = self.git_storage.delete_blob(&key).await { - warn!( - workspace_id = %workspace_id, - commit = %commit_hex, - path = %path, - error = ?error, - "git_commit_cleanup_blob_failed" - ); - } - } - sqlx::query("DELETE FROM git_commits WHERE workspace_id = $1 AND commit_id = $2") - .bind(workspace_id) - .bind(meta.commit_id.clone()) - .execute(&self.pool) - .await?; - } - Ok(()) - } - - async fn realign_commit_history( - &self, - workspace_id: Uuid, - storage_latest: Option, - db_latest: Option, - ) -> anyhow::Result<()> { - match (storage_latest, db_latest) { - (Some(storage), Some(db)) => { - if storage.commit_id == db.commit_id { - return Ok(()); - } - let storage_id = storage.commit_id.clone(); - let mut cursor = Some(db.clone()); - let mut reached_storage = false; - let mut to_prune: Vec = Vec::new(); - while let Some(meta) = cursor.clone() { - if meta.commit_id == storage_id { - reached_storage = true; - break; - } - to_prune.push(meta.clone()); - cursor = match meta.parent_commit_id.clone() { - Some(parent) => { - self.commit_meta_by_id(workspace_id, parent.as_slice()) - .await? - } - None => None, - }; - } - if !reached_storage { - let all = self.collect_commit_chain(workspace_id, db.clone()).await?; - if !all.is_empty() { - info!( - workspace_id = %workspace_id, - removed = all.len(), - "git_commit_pointer_reset_db_chain" - ); - self.remove_commits(workspace_id, &all).await?; - } - } else if !to_prune.is_empty() { - info!( - workspace_id = %workspace_id, - removed = to_prune.len(), - "git_commit_pointer_pruned_db_commits" - ); - self.remove_commits(workspace_id, &to_prune).await?; - } - self.backfill_commits_from_storage(workspace_id, &storage) - .await?; - } - (Some(storage), None) => { - self.backfill_commits_from_storage(workspace_id, &storage) - .await?; - } - (None, Some(db)) => { - let all = self.collect_commit_chain(workspace_id, db).await?; - if !all.is_empty() { - info!( - workspace_id = %workspace_id, - removed = all.len(), - "git_commit_pointer_dropped_db_history" - ); - self.remove_commits(workspace_id, &all).await?; - } - } - (None, None) => {} - } - Ok(()) - } - - async fn prune_commits_from_head( - &self, - workspace_id: Uuid, - commits: &[CommitMeta], - ) -> anyhow::Result<()> { - if commits.is_empty() { - return Ok(()); - } - self.remove_commits(workspace_id, commits).await?; - let new_latest = self.latest_commit_meta(workspace_id).await?; - self.git_storage - .set_latest_commit(workspace_id, new_latest.as_ref()) - .await?; - Ok(()) - } - - async fn ensure_storage_commit_integrity(&self, workspace_id: Uuid) -> anyhow::Result<()> { - loop { - let Some(latest) = self.latest_commit_meta(workspace_id).await? else { - self.git_storage - .set_latest_commit(workspace_id, None) - .await?; - return Ok(()); - }; - let chain = self - .collect_commit_chain(workspace_id, latest.clone()) - .await?; - let mut missing_idx: Option = None; - for (idx, meta) in chain.iter().enumerate() { - match self - .git_storage - .commit_meta(workspace_id, meta.commit_id.as_slice()) - .await? - { - Some(_) => continue, - None => { - missing_idx = Some(idx); - break; - } - } - } - if let Some(idx) = missing_idx { - let to_remove: Vec = chain[..=idx].to_vec(); - info!( - workspace_id = %workspace_id, - removed = to_remove.len(), - missing_commit = %encode_commit_id(&chain[idx].commit_id), - "git_commit_pointer_pruned_missing_storage_meta" - ); - self.prune_commits_from_head(workspace_id, &to_remove) - .await?; - continue; - } - break; - } - Ok(()) - } - - async fn collect_current_state( - &self, - workspace_id: Uuid, - ) -> anyhow::Result> { - let mut state: HashMap = HashMap::new(); - - let doc_rows = self - .docs - .list_workspace_documents(workspace_id) - .await? - .into_iter() - .filter(|d| d.doc_type != "folder"); - - for doc in doc_rows { - let export = match self.snapshot.export_current_markdown(&doc.id).await? { - Some(export) => export, - None => continue, - }; - let repo_path = export - .repo_path - .or_else(|| Some(doc.desired_path.clone())) - .map(normalize_repo_path) - .ok_or_else(|| anyhow!("missing_repo_path_for_doc {}", doc.id))?; - state.insert( - repo_path, - FileSnapshot { - hash: export.content_hash, - data: FileSnapshotData::Inline(export.bytes), - is_text: true, - }, - ); - } - - let attachment_rows = sqlx::query( - r#"SELECT f.id AS file_id, f.storage_path, f.content_hash - FROM files f - JOIN documents d ON d.id = f.document_id - WHERE d.owner_id = $1"#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - - for row in attachment_rows { - let file_id: Uuid = row.get("file_id"); - let storage_path: String = row.get("storage_path"); - let stored_hash: Option = row - .try_get("content_hash") - .ok() - .and_then(|h: String| if h.is_empty() { None } else { Some(h) }); - let (hash, needs_persist) = match stored_hash { - Some(existing) => (existing, false), - None => { - let computed = self - .compute_attachment_hash(&storage_path) - .await - .with_context(|| { - format!("failed to compute attachment hash for {}", storage_path) - })?; - match computed { - Some(value) => (value, true), - None => continue, - } - } - }; - if needs_persist { - if let Err(err) = self.persist_attachment_hash(file_id, &hash).await { - warn!( - file_id = %file_id, - path = storage_path.as_str(), - error = ?err, - "git_workspace_attachment_hash_persist_failed" - ); - } - } - let repo_path = repo_relative_path(&storage_path)?; - state.insert( - repo_path, - FileSnapshot { - hash, - data: FileSnapshotData::StoragePath(storage_path), - is_text: false, - }, - ); - } - - Ok(state) - } - - async fn compute_attachment_hash(&self, storage_path: &str) -> anyhow::Result> { - let abs = self.storage.absolute_from_relative(storage_path); - match self.storage.read_bytes(abs.as_path()).await { - Ok(bytes) => Ok(Some(sha256_hex(&bytes))), - Err(err) => { - if let Some(io_err) = err.downcast_ref::() { - if io_err.kind() == io::ErrorKind::NotFound { - return Ok(None); - } - } - if err.to_string().to_lowercase().contains("not found") { - return Ok(None); - } - Err(err) - } - } - } - - async fn persist_attachment_hash(&self, file_id: Uuid, hash: &str) -> anyhow::Result<()> { - sqlx::query( - r#"UPDATE files SET content_hash = $2, updated_at = now() - WHERE id = $1"#, - ) - .bind(file_id) - .bind(hash) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn fetch_dirty(&self, workspace_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT path, is_text, op, content_hash - FROM git_dirty_files - WHERE workspace_id = $1 - ORDER BY created_at ASC"#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - - let mut out = Vec::new(); - for r in rows { - let path: String = r.get("path"); - let is_text: bool = r.get("is_text"); - let op: String = r.get("op"); - let content_hash: Option = r.try_get("content_hash").ok(); - out.push(DirtyRow { - path, - is_text, - op, - content_hash, - }); - } - Ok(out) - } - - async fn clear_dirty(&self, workspace_id: Uuid) -> anyhow::Result { - let res = sqlx::query("DELETE FROM git_dirty_files WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&self.pool) - .await?; - Ok(res.rows_affected()) - } - - async fn export_markdown_for_repo_path( - &self, - workspace_id: Uuid, - repo_path: &str, - ) -> anyhow::Result, String)>> { - let trimmed = repo_path.trim_start_matches('/'); - let mut candidates: Vec<(&str, bool)> = vec![(trimmed, false)]; - if let Some(stripped) = trimmed.strip_prefix("Archives/") { - if !stripped.is_empty() { - candidates.push((stripped, true)); - } - } - - // First try by normalized repo path (documents.path). Fall back to desired_path for older records. - let all_docs = self.docs.list_workspace_documents(workspace_id).await?; - - for (candidate, archived_only) in candidates { - let lookup_path = format!("{}/{}", workspace_id, candidate); - let from_path = self - .docs - .get_by_owner_and_path(workspace_id, &lookup_path) - .await?; - - let doc = if let Some(doc) = from_path { - Some(doc) - } else { - all_docs - .iter() - .find(|d| normalize_repo_path(d.desired_path.clone()) == candidate) - .cloned() - }; - - if let Some(doc) = doc { - if doc.doc_type == "folder" { - continue; - } - if archived_only && doc.archived_at.is_none() { - continue; - } - if let Some(export) = self.snapshot.export_current_markdown(&doc.id).await? { - return Ok(Some((export.bytes, export.content_hash))); - } - } - } - - Ok(None) - } - - fn compute_deltas( - &self, - current: &HashMap, - previous: &HashMap, - ) -> FileDeltaSummary { - let mut added = Vec::new(); - let mut modified = Vec::new(); - let mut deleted = Vec::new(); - - for (path, snapshot) in current.iter() { - match previous.get(path) { - None => added.push(path.clone()), - Some(prev_hash) if prev_hash != &snapshot.hash => modified.push(path.clone()), - _ => {} - } - } - - for path in previous.keys() { - if !current.contains_key(path) { - deleted.push(path.clone()); - } - } - - FileDeltaSummary { - added, - modified, - deleted, - } - } - - async fn store_commit_snapshots( - &self, - workspace_id: Uuid, - commit_id: &[u8], - state: &HashMap, - ) -> anyhow::Result> { - let mut stored = Vec::new(); - for (path, snapshot) in state.iter() { - let key = blob_key(workspace_id, commit_id, path); - let bytes = self.snapshot_bytes(snapshot).await?; - if let Err(err) = self.git_storage.put_blob(&key, &bytes).await { - for key in stored.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - return Err(err); - } - stored.push(key); - } - Ok(stored) - } - - async fn snapshot_bytes(&self, snapshot: &FileSnapshot) -> anyhow::Result> { - match &snapshot.data { - FileSnapshotData::Inline(bytes) => Ok(bytes.clone()), - FileSnapshotData::StoragePath(path) => { - let abs = self.storage.absolute_from_relative(path); - self.storage.read_bytes(abs.as_path()).await - } - } - } - - async fn load_file_snapshot( - &self, - workspace_id: Uuid, - commit_id: &[u8], - path: &str, - ) -> anyhow::Result>> { - let key = blob_key(workspace_id, commit_id, path); - match self.git_storage.fetch_blob(&key).await { - Ok(bytes) => Ok(Some(bytes)), - Err(err) => { - // Treat missing blob as absence (e.g., binary or not stored). - if let Some(io_err) = err.downcast_ref::() { - if io_err.kind() == std::io::ErrorKind::NotFound { - return Ok(None); - } - } - if err.to_string().contains("not found") { - return Ok(None); - } - Err(err) - } - } - } - - #[allow(dead_code)] - async fn state_from_commit_meta( - &self, - workspace_id: Uuid, - meta: &CommitMeta, - ) -> anyhow::Result> { - let mut state: HashMap = HashMap::new(); - for path in meta.file_hash_index.keys() { - let Some(bytes) = self - .load_file_snapshot(workspace_id, &meta.commit_id, path) - .await? - else { - continue; - }; - let hash = sha256_hex(&bytes); - let is_text = std::str::from_utf8(&bytes).is_ok(); - state.insert( - path.clone(), - FileSnapshot { - hash, - data: FileSnapshotData::Inline(bytes), - is_text, - }, - ); - } - Ok(state) - } - - async fn apply_state_to_workspace( - &self, - workspace_id: Uuid, - state: &HashMap, - previous_index: &HashMap, - ) -> anyhow::Result { - let mut changed: u32 = 0; - // write/update files - for (path, snapshot) in state.iter() { - let rel = format!("{}/{}", workspace_id, path.trim_start_matches('/')); - let abs = self.storage.absolute_from_relative(&rel); - if let Some(parent) = abs.parent() { - async_fs::create_dir_all(parent).await?; - } - let bytes = self.snapshot_bytes(snapshot).await?; - self.storage.write_bytes(abs.as_path(), &bytes).await?; - changed += 1; - } - // remove files missing in next state - for path in previous_index.keys() { - if state.contains_key(path) { - continue; - } - let rel = format!("{}/{}", workspace_id, path.trim_start_matches('/')); - let abs = self.storage.absolute_from_relative(&rel); - if async_fs::remove_file(&abs).await.is_ok() { - changed += 1; - } - } - Ok(changed) - } - - async fn ensure_folder( - &self, - workspace_id: Uuid, - actor_id: Uuid, - folder_path: &str, - cache: &mut HashMap, - ) -> anyhow::Result> { - let trimmed = folder_path.trim_matches('/'); - if trimmed.is_empty() { - return Ok(None); - } - - let mut current_parent: Option = None; - let mut accumulated = String::new(); - for segment in trimmed.split('/') { - if !accumulated.is_empty() { - accumulated.push('/'); - } - accumulated.push_str(segment); - - if let Some(id) = cache.get(&accumulated) { - current_parent = Some(*id); - continue; - } - - let lookup_path = format!("{}/{}", workspace_id, accumulated); - if let Some(existing) = self - .docs - .get_by_owner_and_path(workspace_id, &lookup_path) - .await? - { - if existing.doc_type != "folder" { - anyhow::bail!("path_conflict_not_folder"); - } - cache.insert(accumulated.clone(), existing.id); - current_parent = Some(existing.id); - continue; - } - - let title = if segment.trim().is_empty() { - "folder" - } else { - segment - }; - let folder = self - .docs - .create_for_user( - workspace_id, - actor_id, - title, - current_parent, - "folder", - None, - ) - .await?; - self.docs - .update_repo_path(folder.id, workspace_id, &accumulated) - .await?; - - cache.insert(accumulated.clone(), folder.id); - current_parent = Some(folder.id); - } - - Ok(current_parent) - } - - async fn materialize_documents_from_state( - &self, - workspace_id: Uuid, - actor_id: Uuid, - state: &HashMap, - ) -> anyhow::Result<(u32, u32)> { - fn folder_key(path: &str) -> String { - path.rsplitn(2, '/') - .nth(1) - .map(|s| s.trim().trim_end_matches('/').to_string()) - .filter(|s| !s.is_empty()) - .unwrap_or_else(String::new) - } - - fn attachment_owner_folder(path: &str) -> String { - if let Some(idx) = path.find("/attachments/") { - let prefix = &path[..idx]; - if prefix.is_empty() { - String::new() - } else { - prefix.trim_end_matches('/').to_string() - } - } else if path.starts_with("attachments/") { - String::new() - } else { - folder_key(path) - } - } - - fn is_markdown_path(path: &str) -> bool { - let lower = path.to_ascii_lowercase(); - lower.ends_with(".md") || lower.ends_with(".markdown") - } - - let mut folder_cache: HashMap = HashMap::new(); - let mut docs_created: u32 = 0; - let mut attachments_created: u32 = 0; - - let mut existing_by_desired: HashMap = HashMap::new(); - let mut folder_docs: HashMap> = HashMap::new(); - - for doc in self.docs.list_workspace_documents(workspace_id).await? { - let normalized = normalize_repo_path(doc.desired_path.clone()); - existing_by_desired.insert(normalized.clone(), doc.id); - if doc.doc_type != "folder" { - let key = folder_key(&normalized); - folder_docs.entry(key.clone()).or_default().push(doc.id); - if doc.archived_at.is_some() { - let archived_key = if key.is_empty() { - "Archives".to_string() - } else { - format!("Archives/{}", key) - }; - folder_docs.entry(archived_key).or_default().push(doc.id); - } - } - } - - let mut paths: Vec = state.keys().cloned().collect(); - paths.sort(); - - // First pass: create documents only for markdown files - for path in paths.iter() { - let snapshot = match state.get(path) { - Some(s) => s, - None => continue, - }; - if !snapshot.is_text { - continue; - } - let normalized = normalize_repo_path(path.clone()); - if !is_markdown_path(&normalized) { - continue; - } - - // Skip if document already exists at desired_path (including folders that would conflict) - if existing_by_desired.contains_key(&normalized) { - continue; - } - - let parent_path = folder_key(&normalized); - let parent_id = if parent_path.is_empty() { - None - } else { - self.ensure_folder(workspace_id, actor_id, &parent_path, &mut folder_cache) - .await? - }; - - let filename = normalized - .rsplit('/') - .next() - .unwrap_or(&normalized) - .to_string(); - let title = filename - .trim_end_matches(".md") - .trim_end_matches(".markdown") - .trim_end_matches(".txt"); - - let doc = self - .docs - .create_for_user( - workspace_id, - actor_id, - if title.is_empty() { "Document" } else { title }, - parent_id, - "document", - None, - ) - .await?; - self.docs - .update_repo_path(doc.id, workspace_id, &normalized) - .await?; - docs_created += 1; - existing_by_desired.insert(normalized.clone(), doc.id); - - folder_docs.entry(parent_path).or_default().push(doc.id); - - let bytes = self.snapshot_bytes(snapshot).await.unwrap_or_default(); - let body = extract_markdown_body(&bytes) - .unwrap_or_else(|| std::str::from_utf8(&bytes).unwrap_or_default().to_string()); - let snap_bytes = snapshot_from_markdown(&body); - let _ = self - .realtime - .apply_snapshot(&doc.id.to_string(), snap_bytes.as_slice()) - .await; - let _ = self.realtime.force_persist(&doc.id.to_string()).await; - } - - for docs in folder_docs.values_mut() { - docs.sort(); - } - - // Second pass: attach binaries without creating documents - for path in paths { - let snapshot = match state.get(&path) { - Some(s) => s, - None => continue, - }; - if snapshot.is_text { - continue; - } - let normalized = normalize_repo_path(path.clone()); - if !normalized.contains("/attachments/") && !normalized.starts_with("attachments/") { - continue; - } - let filename = normalized - .rsplit('/') - .next() - .unwrap_or(&normalized) - .to_string(); - let folder = attachment_owner_folder(&normalized); - let doc_id = folder_docs.get(&folder).and_then(|v| v.first().copied()); - let Some(doc_id) = doc_id else { - warn!( - workspace_id = %workspace_id, - repo_path = normalized.as_str(), - "git_materialize_attachment_no_owner" - ); - continue; - }; - - let storage_path = format!("{}/{}", workspace_id, normalized); - let existing: Option = - sqlx::query_scalar("SELECT id FROM files WHERE storage_path = $1 LIMIT 1") - .bind(&storage_path) - .fetch_optional(&self.pool) - .await?; - if existing.is_some() { - continue; - } - - let bytes = self.snapshot_bytes(snapshot).await.unwrap_or_default(); - let size = bytes.len() as i64; - let _ = sqlx::query( - r#"INSERT INTO files (document_id, filename, content_type, size, storage_path, content_hash) - VALUES ($1,$2,$3,$4,$5,$6)"#, - ) - .bind(doc_id) - .bind(&filename) - .bind::>(None) - .bind(size) - .bind(&storage_path) - .bind(&snapshot.hash) - .execute(&self.pool) - .await?; - attachments_created += 1; - } - Ok((docs_created, attachments_created)) - } - - /// Apply merged markdown files directly to realtime/persistence so documents reflect Pull results. - async fn apply_merged_to_documents( - &self, - workspace_id: Uuid, - next_state: &HashMap, - ) -> anyhow::Result<()> { - let doc_rows = self - .docs - .list_workspace_documents(workspace_id) - .await? - .into_iter() - .filter(|d| d.doc_type != "folder"); - - for doc in doc_rows { - let doc_id = doc.id; - let normalized = normalize_repo_path(doc.desired_path.clone()); - let Some(snapshot) = next_state.get(&normalized) else { - continue; - }; - - if !snapshot.is_text { - continue; - } - let bytes = match self.snapshot_bytes(snapshot).await { - Ok(b) => b, - Err(err) => { - warn!(document_id = %doc_id, error = ?err, "git_pull_snapshot_bytes_failed"); - continue; - } - }; - let body = match extract_markdown_body(&bytes) { - Some(b) => b, - None => continue, - }; - let snap_bytes = - crate::application::services::realtime::snapshot::snapshot_from_markdown(&body); - if let Err(err) = crate::infrastructure::storage::suppress_git_dirty(async { - self.realtime - .apply_snapshot(&doc_id.to_string(), snap_bytes.as_slice()) - .await?; - self.realtime.force_persist(&doc_id.to_string()).await - }) - .await - { - warn!(document_id = %doc_id, error = ?err, "git_pull_apply_snapshot_failed"); - continue; - } - } - Ok(()) - } - - fn build_diff_result( - &self, - path: &str, - old_content: Option<&str>, - new_content: Option<&str>, - ) -> TextDiffResult { - match (old_content, new_content) { - (Some(old), Some(new)) => compute_text_diff(old, new, path), - _ => TextDiffResult { - file_path: path.to_string(), - diff_lines: Vec::new(), - old_content: old_content.map(|s| s.to_string()), - new_content: new_content.map(|s| s.to_string()), - }, - } - } - - async fn commit_diff_via_packs( - &self, - workspace_id: Uuid, - from_meta: Option<&CommitMeta>, - to_meta: &CommitMeta, - ) -> anyhow::Result> { - let (to_pack_dir, to_pack_paths) = self - .persist_pack_chain(workspace_id, Some(to_meta.commit_id.as_slice())) - .await? - .ok_or_else(|| { - anyhow!( - "missing pack data for commit {}", - encode_commit_id(&to_meta.commit_id) - ) - })?; - - let from_pack = if let Some(from_meta) = from_meta { - if from_meta.commit_id != to_meta.commit_id { - Some( - self.persist_pack_chain(workspace_id, Some(from_meta.commit_id.as_slice())) - .await? - .ok_or_else(|| { - anyhow!( - "missing pack data for commit {}", - encode_commit_id(&from_meta.commit_id) - ) - })?, - ) - } else { - None - } - } else { - None - }; - - let temp_dir = TempDirBuilder::new() - .prefix("git-diff-") - .tempdir() - .map_err(|e| anyhow::anyhow!(e))?; - let repo = Repository::init_bare(temp_dir.path())?; - - apply_pack_files(&repo, &to_pack_paths)?; - if let Some((_, ref paths)) = from_pack { - apply_pack_files(&repo, paths)?; - } - - let from_files = if let Some(from_meta) = from_meta { - read_commit_files(&repo, from_meta.commit_id.as_slice())? - } else { - HashMap::new() - }; - let to_files = read_commit_files(&repo, to_meta.commit_id.as_slice())?; - - drop(repo); - let _ = temp_dir.close(); - drop(to_pack_dir); - if let Some((dir, _)) = from_pack { - drop(dir); - } - - let mut paths: BTreeSet = BTreeSet::new(); - paths.extend(from_files.keys().cloned()); - paths.extend(to_files.keys().cloned()); - - let mut results = Vec::new(); - for path in paths { - let old_bytes = from_files.get(&path); - let new_bytes = to_files.get(&path); - let old_content = old_bytes - .and_then(|b| std::str::from_utf8(b).ok()) - .map(|s| s.to_string()); - let new_content = new_bytes - .and_then(|b| std::str::from_utf8(b).ok()) - .map(|s| s.to_string()); - if old_content.is_none() && new_content.is_none() { - if old_bytes.is_some() || new_bytes.is_some() { - results.push(self.build_diff_result(&path, None, None)); - } - continue; - } - results.push(self.build_diff_result( - &path, - old_content.as_deref(), - new_content.as_deref(), - )); - } - Ok(results) - } - - async fn commit_diff_from_storage( - &self, - workspace_id: Uuid, - from_meta: Option<&CommitMeta>, - to_meta: Option<&CommitMeta>, - ) -> anyhow::Result> { - let Some(to_meta) = to_meta else { - return Ok(Vec::new()); - }; - - let mut paths: BTreeSet = BTreeSet::new(); - if let Some(meta) = from_meta { - paths.extend(meta.file_hash_index.keys().cloned()); - } - paths.extend(to_meta.file_hash_index.keys().cloned()); - - let mut results = Vec::new(); - for path in paths { - let old_hash = from_meta.and_then(|meta| meta.file_hash_index.get(&path)); - let new_hash = to_meta.file_hash_index.get(&path); - if let (Some(old), Some(new)) = (old_hash, new_hash) { - if old == new { - continue; - } - } - - let old_bytes = match (from_meta, old_hash) { - (Some(meta), Some(_)) => { - self.load_file_snapshot(workspace_id, meta.commit_id.as_slice(), &path) - .await? - } - _ => None, - }; - let new_bytes = match new_hash { - Some(_) => { - self.load_file_snapshot(workspace_id, to_meta.commit_id.as_slice(), &path) - .await? - } - None => None, - }; - - let old_text = old_bytes - .as_ref() - .and_then(|bytes| std::str::from_utf8(bytes).ok()) - .map(|s| s.to_string()); - let new_text = new_bytes - .as_ref() - .and_then(|bytes| std::str::from_utf8(bytes).ok()) - .map(|s| s.to_string()); - - if old_text.is_none() && new_text.is_none() { - if old_bytes.is_some() || new_bytes.is_some() { - results.push(self.build_diff_result(&path, None, None)); - } - } else { - results.push(self.build_diff_result( - &path, - old_text.as_deref(), - new_text.as_deref(), - )); - } - } - - Ok(results) - } - - // Build a synthetic commit from the current workspace state so dirty edits participate in merges. - fn build_synthetic_commit( - &self, - workspace_id: Uuid, - repo: &Repository, - base_oid: git2::Oid, - ) -> anyhow::Result { - // Collect current workspace state into blobs and index entries (supports nested paths). - let current_state = tokio::task::block_in_place(|| { - let handle = tokio::runtime::Handle::current(); - handle.block_on(self.collect_current_state(workspace_id)) - })?; - - let mut index = repo.index()?; - index.clear()?; - - for (path, snapshot) in current_state.iter() { - let bytes = tokio::task::block_in_place(|| { - let handle = tokio::runtime::Handle::current(); - handle.block_on(self.snapshot_bytes(snapshot)) - })?; - let blob_oid = repo.blob(&bytes)?; - - let entry = git2::IndexEntry { - ctime: git2::IndexTime::new(0, 0), - mtime: git2::IndexTime::new(0, 0), - dev: 0, - ino: 0, - mode: 0o100644, - uid: 0, - gid: 0, - file_size: bytes.len() as u32, - id: blob_oid, - flags: std::cmp::min(path.as_bytes().len(), 0x0fff) as u16, - flags_extended: 0, - path: path.as_bytes().to_vec(), - }; - index.add(&entry)?; - } - - let tree_oid = index.write_tree_to(repo)?; - let tree = repo.find_tree(tree_oid)?; - - // Create a synthetic commit with remote as parent to anchor the merge base. - // Use an explicit signature so we don't rely on local git config being present. - let sig = signature_from_parts("RefMD", "refmd@example.com", Utc::now())?; - let commit_oid = repo.commit( - Some("refs/heads/synthetic-workspace"), - &sig, - &sig, - "workspace-state", - &tree, - &[&repo.find_commit(base_oid)?], - )?; - Ok(commit_oid) - } -} - -#[async_trait] -impl GitWorkspacePort for GitWorkspaceService { - async fn ensure_repository( - &self, - workspace_id: Uuid, - default_branch: &str, - ) -> anyhow::Result<()> { - sqlx::query( - r#"INSERT INTO git_repository_state (workspace_id, initialized, default_branch, initialized_at, updated_at) - VALUES ($1, true, $2, now(), now()) - ON CONFLICT (workspace_id) DO UPDATE SET - initialized = true, - default_branch = EXCLUDED.default_branch, - initialized_at = COALESCE(git_repository_state.initialized_at, EXCLUDED.initialized_at), - updated_at = now()"#, - ) - .bind(workspace_id) - .bind(default_branch) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn remove_repository(&self, workspace_id: Uuid) -> anyhow::Result<()> { - let mut tx = self.pool.begin().await?; - sqlx::query("DELETE FROM git_dirty_files WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&mut *tx) - .await?; - sqlx::query("DELETE FROM git_commits WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&mut *tx) - .await?; - sqlx::query( - "UPDATE git_repository_state SET initialized = false, updated_at = now() WHERE workspace_id = $1", - ) - .bind(workspace_id) - .execute(&mut *tx) - .await?; - tx.commit().await?; - self.git_storage.delete_all(workspace_id).await?; - Ok(()) - } - - async fn status(&self, workspace_id: Uuid) -> anyhow::Result { - let state = self.load_repository_state(workspace_id).await?; - let Some((initialized, branch)) = state else { - return Ok(GitWorkspaceStatus { - repository_initialized: false, - current_branch: None, - uncommitted_changes: 0, - untracked_files: 0, - }); - }; - if !initialized { - return Ok(GitWorkspaceStatus { - repository_initialized: false, - current_branch: Some(branch), - uncommitted_changes: 0, - untracked_files: 0, - }); - } - // Dirty-driven status: avoid full workspace scan - let latest = self.latest_commit_meta(workspace_id).await?; - let previous_index: HashMap = latest - .as_ref() - .map(|c| c.file_hash_index.clone()) - .unwrap_or_default(); - - let dirty = self.fetch_dirty(workspace_id).await?; - let mut added: u32 = 0; - let mut modified: u32 = 0; - let mut deleted: u32 = 0; - - for d in dirty.iter() { - match d.op.as_str() { - "upsert" => { - if let Some(prev_hash) = previous_index.get(&d.path) { - // Existing file: if hash unchanged and hash known, ignore; else modified - match d.content_hash.as_ref() { - Some(h) if h == prev_hash => {} - _ => modified += 1, - } - } else { - // New file - added += 1; - } - } - "delete" => { - // Treat as deleted (even if not present in previous index) - deleted += 1; - } - _ => {} - } - } - - Ok(GitWorkspaceStatus { - repository_initialized: true, - current_branch: Some(branch), - uncommitted_changes: modified + deleted, - untracked_files: added, - }) - } - - async fn list_changes(&self, workspace_id: Uuid) -> anyhow::Result> { - // If repository isn't initialized, nothing to report - if let Some((initialized, _branch)) = self.load_repository_state(workspace_id).await? { - if !initialized { - return Ok(Vec::new()); - } - } else { - return Ok(Vec::new()); - } - - // Use dirty set to derive changes without scanning storage - let latest = self.latest_commit_meta(workspace_id).await?; - let previous_index: HashMap = latest - .as_ref() - .map(|c| c.file_hash_index.clone()) - .unwrap_or_default(); - let dirty = self.fetch_dirty(workspace_id).await?; - - let mut change_map: BTreeMap = BTreeMap::new(); - for d in dirty.iter() { - match d.op.as_str() { - "upsert" => { - if let Some(prev_hash) = previous_index.get(&d.path) { - // If hash unchanged and we know the new hash, skip reporting - match d.content_hash.as_ref() { - Some(h) if h == prev_hash => { - change_map.remove(&d.path); - } - _ => { - change_map.insert(d.path.clone(), "modified".to_string()); - } - } - } else { - change_map.insert(d.path.clone(), "untracked".to_string()); - } - } - "delete" => { - change_map.insert(d.path.clone(), "deleted".to_string()); - } - _ => {} - } - } - - let changes = change_map - .into_iter() - .map(|(path, status)| GitChangeItem { path, status }) - .collect(); - Ok(changes) - } - - async fn working_diff(&self, workspace_id: Uuid) -> anyhow::Result> { - let latest = self.latest_commit_meta(workspace_id).await?; - let previous_index = latest - .as_ref() - .map(|c| c.file_hash_index.clone()) - .unwrap_or_default(); - let current = self.collect_current_state(workspace_id).await?; - let delta = self.compute_deltas(¤t, &previous_index); - let mut results = Vec::new(); - - let latest_commit_id = latest.as_ref().map(|c| c.commit_id.clone()); - - for path in delta.added.iter().chain(delta.modified.iter()) { - if let Some(snapshot) = current.get(path) { - if snapshot.is_text { - let new_bytes = self.snapshot_bytes(snapshot).await?; - let new_content = String::from_utf8_lossy(&new_bytes).to_string(); - let old_bytes = match (&latest_commit_id, previous_index.get(path)) { - (Some(commit_id), Some(_)) => { - self.load_file_snapshot(workspace_id, commit_id.as_slice(), path) - .await? - } - _ => None, - }; - let old_text = old_bytes.and_then(|b| String::from_utf8(b).ok()); - results.push(self.build_diff_result( - path, - old_text.as_deref(), - Some(&new_content), - )); - } else { - results.push(TextDiffResult { - file_path: path.clone(), - diff_lines: Vec::new(), - old_content: None, - new_content: None, - }); - } - } - } - - for path in delta.deleted { - let old_bytes = if let (Some(commit_id), Some(_)) = - (&latest_commit_id, previous_index.get(&path)) - { - self.load_file_snapshot(workspace_id, commit_id.as_slice(), &path) - .await? - } else { - None - }; - let old_text = old_bytes.and_then(|b| String::from_utf8(b).ok()); - results.push(self.build_diff_result(&path, old_text.as_deref(), None)); - } - - Ok(results) - } - - async fn commit_diff( - &self, - workspace_id: Uuid, - from: &str, - to: &str, - ) -> anyhow::Result> { - let from_meta = self.load_commit_meta_ref(workspace_id, from).await?; - let to_meta = self.load_commit_meta_ref(workspace_id, to).await?; - - if let Some(to_meta_ref) = to_meta.as_ref() { - match self - .commit_diff_via_packs(workspace_id, from_meta.as_ref(), to_meta_ref) - .await - { - Ok(results) => return Ok(results), - Err(err) => { - warn!( - %err, - from = from_meta - .as_ref() - .map(|m| encode_commit_id(&m.commit_id)) - .unwrap_or_else(|| "(root)".to_string()), - to = encode_commit_id(&to_meta_ref.commit_id), - "failed to compute commit diff from pack data, using stored snapshots" - ); - } - } - } - - self.commit_diff_from_storage(workspace_id, from_meta.as_ref(), to_meta.as_ref()) - .await - } - - async fn history(&self, workspace_id: Uuid) -> anyhow::Result> { - let rows = sqlx::query( - r#"SELECT commit_id, message, author_name, author_email, committed_at - FROM git_commits - WHERE workspace_id = $1 - ORDER BY committed_at DESC - LIMIT 200"#, - ) - .bind(workspace_id) - .fetch_all(&self.pool) - .await?; - - let history = rows - .into_iter() - .filter_map(|row| { - let commit_id: Vec = row.get("commit_id"); - let message: Option = row.try_get("message").ok(); - let author_name: Option = row.try_get("author_name").ok(); - let author_email: Option = row.try_get("author_email").ok(); - let committed_at: DateTime = row.get("committed_at"); - Some(GitCommitInfo { - hash: encode_commit_id(&commit_id), - message: message.unwrap_or_default(), - author_name: author_name.unwrap_or_default(), - author_email: author_email.unwrap_or_default(), - time: committed_at, - }) - }) - .collect(); - Ok(history) - } - - async fn sync( - &self, - workspace_id: Uuid, - req: &GitSyncRequestDto, - cfg: Option<&UserGitCfg>, - ) -> anyhow::Result { - let state = self.load_repository_state(workspace_id).await?; - let Some((state_initialized, state_default_branch)) = state else { - anyhow::bail!("repository not initialized") - }; - if !state_initialized { - anyhow::bail!("repository not initialized") - } - - let branch_hint = cfg - .map(|c| c.branch_name.clone()) - .unwrap_or(state_default_branch.clone()); - - let mut latest_meta = self.ensure_latest_meta(workspace_id).await?; - if latest_meta.is_none() { - if let Some(cfg) = cfg { - if !cfg.repository_url.is_empty() { - // Bootstrap remote history; propagate errors to avoid proceeding without packs. - self.bootstrap_remote_history(workspace_id, cfg, branch_hint.as_str()) - .await?; - latest_meta = self.ensure_latest_meta(workspace_id).await?; - } - } - } - - // Resolve branch without holding a DB lock for long. - let branch_name = cfg - .map(|c| c.branch_name.clone()) - .unwrap_or(state_default_branch.clone()); - let force_push = req.force.unwrap_or(false); - let force_full_scan = req.full_scan.unwrap_or(false); - let skip_push = req.skip_push.unwrap_or(false); - let push_required = cfg - .as_ref() - .map(|c| !c.repository_url.is_empty()) - .unwrap_or(false) - && !skip_push; - - // Ensure latest commit pack exists; if missing, attempt to rebuild from storage/remote or fail early. - if let Some(latest) = latest_meta.as_ref() { - if self - .git_storage - .fetch_pack_for_commit(workspace_id, latest.commit_id.as_slice()) - .await? - .is_none() - { - // Try to restore metadata and pack from storage (if pointer mismatch), else try remote bootstrap. - warn!( - workspace_id = %workspace_id, - commit = %encode_commit_id(&latest.commit_id), - "git_sync_missing_latest_pack_detected" - ); - // Attempt backfill from storage; ensure_latest_meta will also update latest pointer. - self.ensure_storage_commit_integrity(workspace_id).await?; - latest_meta = self.ensure_latest_meta(workspace_id).await?; - if let Some(latest2) = latest_meta.as_ref() { - if self - .git_storage - .fetch_pack_for_commit(workspace_id, latest2.commit_id.as_slice()) - .await? - .is_none() - { - if let Some(cfg) = cfg { - if !cfg.repository_url.is_empty() { - info!( - workspace_id = %workspace_id, - commit = %encode_commit_id(&latest2.commit_id), - "git_sync_missing_latest_pack_bootstrap_remote" - ); - self.bootstrap_remote_history( - workspace_id, - cfg, - branch_hint.as_str(), - ) - .await?; - latest_meta = self.ensure_latest_meta(workspace_id).await?; - } - } - } - } - if let Some(latest3) = latest_meta.as_ref() { - if self - .git_storage - .fetch_pack_for_commit(workspace_id, latest3.commit_id.as_slice()) - .await? - .is_none() - { - anyhow::bail!( - "missing pack data for latest commit {}; pull and retry", - encode_commit_id(&latest3.commit_id) - ); - } - } - } - } - - let mut storage_latest = self.git_storage.latest_commit(workspace_id).await?; - let mut storage_commit_hex = storage_latest - .as_ref() - .map(|m| encode_commit_id(&m.commit_id)); - let mut db_commit_hex = latest_meta.as_ref().map(|m| encode_commit_id(&m.commit_id)); - if storage_commit_hex != db_commit_hex { - warn!( - workspace_id = %workspace_id, - db_commit = ?db_commit_hex, - storage_commit = ?storage_commit_hex, - "git_commit_pointer_mismatch_detected" - ); - if let Some(storage_meta) = storage_latest.as_ref() { - self.backfill_commits_from_storage(workspace_id, storage_meta) - .await?; - latest_meta = self.latest_commit_meta(workspace_id).await?; - } - storage_latest = self.git_storage.latest_commit(workspace_id).await?; - storage_commit_hex = storage_latest - .as_ref() - .map(|m| encode_commit_id(&m.commit_id)); - db_commit_hex = latest_meta.as_ref().map(|m| encode_commit_id(&m.commit_id)); - if storage_commit_hex == db_commit_hex { - info!( - workspace_id = %workspace_id, - commit = ?storage_commit_hex, - "git_commit_pointer_repaired_from_storage" - ); - } else { - warn!( - workspace_id = %workspace_id, - db_commit = ?db_commit_hex, - storage_commit = ?storage_commit_hex, - "git_commit_pointer_attempting_realign" - ); - self.realign_commit_history( - workspace_id, - storage_latest.clone(), - latest_meta.clone(), - ) - .await?; - latest_meta = self.ensure_latest_meta(workspace_id).await?; - storage_latest = self.git_storage.latest_commit(workspace_id).await?; - storage_commit_hex = storage_latest - .as_ref() - .map(|m| encode_commit_id(&m.commit_id)); - db_commit_hex = latest_meta.as_ref().map(|m| encode_commit_id(&m.commit_id)); - if storage_commit_hex == db_commit_hex { - info!( - workspace_id = %workspace_id, - commit = ?db_commit_hex, - "git_commit_pointer_repaired_by_prune" - ); - } else { - error!( - workspace_id = %workspace_id, - db_commit = ?db_commit_hex, - storage_commit = ?storage_commit_hex, - "git_commit_pointer_irreparable" - ); - anyhow::bail!( - "repository latest commit mismatch between database ({db_commit_hex:?}) and storage ({storage_commit_hex:?})" - ); - } - } - } - - self.ensure_storage_commit_integrity(workspace_id).await?; - latest_meta = self.latest_commit_meta(workspace_id).await?; - - let use_full_scan = force_full_scan || latest_meta.is_none(); - - let previous_index = latest_meta - .as_ref() - .map(|c| c.file_hash_index.clone()) - .unwrap_or_default(); - let dirty_rows = self.fetch_dirty(workspace_id).await?; - - // Build change sets from dirty rows - let mut upserts: BTreeMap = BTreeMap::new(); - let mut deletes: BTreeSet = BTreeSet::new(); - if !use_full_scan { - for row in &dirty_rows { - match row.op.as_str() { - "upsert" => { - upserts.insert( - row.path.clone(), - DirtyUpsert { - is_text: row.is_text, - content_hash: row.content_hash.clone(), - }, - ); - // Upsert cancels previous delete on same path if any - deletes.remove(&row.path); - } - "delete" => { - upserts.remove(&row.path); - deletes.insert(row.path.clone()); - } - _ => {} - } - } - } - - // Filter out no-op upserts by comparing content_hash with previous index if available - if !use_full_scan { - upserts.retain( - |path, u| match (&u.content_hash, previous_index.get(path)) { - (Some(hnew), Some(hprev)) if hnew == hprev => false, - _ => true, - }, - ); - } - - // If still nothing to do, optionally push existing head when a remote is configured. - if !use_full_scan && upserts.is_empty() && deletes.is_empty() { - if push_required { - if let Some(latest) = latest_meta.as_ref() { - // Ensure pack chain exists to materialize the commit for push. - let pack_chain = self - .persist_pack_chain(workspace_id, Some(latest.commit_id.as_slice())) - .await?; - if let Some((temp_dir, pack_paths)) = pack_chain { - let repo = Repository::init_bare(temp_dir.path())?; - apply_pack_files(&repo, &pack_paths)?; - let oid = git2::Oid::from_bytes(&latest.commit_id)?; - let pushed = - perform_push(&repo, cfg.unwrap(), &branch_name, oid, force_push)?; - drop(repo); - drop(temp_dir); - let _ = self.clear_dirty(workspace_id).await; - return Ok(GitSyncOutcome { - files_changed: 0, - commit_hash: Some(encode_commit_id(&latest.commit_id)), - pushed, - message: if pushed { - "push completed".to_string() - } else { - "nothing to push".to_string() - }, - }); - } - } - } - // Nothing to commit/push: clear any leftover dirty and exit. - let _ = self.clear_dirty(workspace_id).await; - return Ok(GitSyncOutcome { - files_changed: 0, - commit_hash: latest_meta.map(|c| encode_commit_id(&c.commit_id)), - pushed: false, - message: "nothing to commit".to_string(), - }); - } - - let committed_at = Utc::now(); - let author_name = "RefMD".to_string(); - let author_email = "refmd@example.com".to_string(); - let message = req - .message - .clone() - .unwrap_or_else(|| "RefMD sync".to_string()); - - // Precompute data needed for tree build and meta before creating libgit2 objects - // This avoids holding non-Send libgit2 types across await points. - let mut precomputed_full_entries: Option>> = None; - let mut precomputed_upsert_bytes: BTreeMap> = BTreeMap::new(); - let mut changed_text_snapshots: HashMap = HashMap::new(); - let mut next_file_hash_index: HashMap = previous_index.clone(); - let mut files_changed_for_response: u32; - - if use_full_scan { - // Rebuild full-scan data fresh in case we fell back here after a pack failure. - next_file_hash_index.clear(); - let current = self.collect_current_state(workspace_id).await?; - let mut entries: BTreeMap> = BTreeMap::new(); - for (path, snapshot) in current.iter() { - let bytes = self.snapshot_bytes(snapshot).await?; - entries.insert(path.clone(), bytes); - next_file_hash_index.insert(path.clone(), snapshot.hash.clone()); - } - files_changed_for_response = next_file_hash_index.len() as u32; - precomputed_full_entries = Some(entries); - } else { - let mut stale_paths: Vec = Vec::new(); - for (path, up) in upserts.iter() { - if up.is_text { - match self - .export_markdown_for_repo_path(workspace_id, path) - .await? - { - Some((bytes, hash)) => { - precomputed_upsert_bytes.insert(path.clone(), bytes.clone()); - next_file_hash_index.insert(path.clone(), hash.clone()); - changed_text_snapshots.insert( - path.clone(), - FileSnapshot { - hash, - data: FileSnapshotData::Inline(bytes), - is_text: true, - }, - ); - } - None => { - stale_paths.push(path.clone()); - } - } - continue; - } - - let storage_rel = format!("{}/{}", workspace_id, path); - let abs = self.storage.absolute_from_relative(&storage_rel); - match self.storage.read_bytes(abs.as_path()).await { - Ok(bytes) => { - precomputed_upsert_bytes.insert(path.clone(), bytes.clone()); - let hash = match up.content_hash.as_ref() { - Some(h) => h.clone(), - None => sha256_hex(&bytes), - }; - next_file_hash_index.insert(path.clone(), hash); - } - Err(e) => { - let skip = e - .downcast_ref::() - .map(|ioe| ioe.kind() == ErrorKind::NotFound) - .unwrap_or_else(|| e.to_string().to_lowercase().contains("not found")); - if skip { - stale_paths.push(path.clone()); - continue; - } else { - return Err(e); - } - } - } - } - if !stale_paths.is_empty() { - for p in stale_paths { - let _ = sqlx::query( - "DELETE FROM git_dirty_files WHERE workspace_id = $1 AND path = $2", - ) - .bind(workspace_id) - .bind(&p) - .execute(&self.pool) - .await; - } - } - for d in deletes.iter() { - next_file_hash_index.remove(d); - } - files_changed_for_response = (upserts.len() + deletes.len()) as u32; - } - - let mut previous_pack = None; - if let Some(prev_meta) = latest_meta.as_ref() { - let prev_commit_hex = encode_commit_id(&prev_meta.commit_id); - match self - .persist_pack_chain(workspace_id, Some(prev_meta.commit_id.as_slice())) - .await? - { - Some(chain) => { - previous_pack = Some(chain); - } - None => { - // Attempt to repair from remote and retry once. - if let Some(cfg) = cfg { - if !cfg.repository_url.is_empty() { - warn!( - workspace_id = %workspace_id, - commit = %prev_commit_hex, - "git_sync_missing_pack_chain_recover" - ); - self.recover_missing_objects(workspace_id, cfg).await?; - latest_meta = self.ensure_latest_meta(workspace_id).await?; - if let Some(latest) = latest_meta.as_ref() { - previous_pack = self - .persist_pack_chain( - workspace_id, - Some(latest.commit_id.as_slice()), - ) - .await?; - } - } - } - if previous_pack.is_none() { - warn!( - workspace_id = %workspace_id, - "git_sync_missing_pack_chain_abort" - ); - anyhow::bail!( - "missing pack data for current head {}; pull/import required before sync", - prev_commit_hex - ); - } - } - } - } - - let (meta, pack_bytes, commit_hex, pushed) = { - let temp_dir = TempDirBuilder::new() - .prefix("git-sync-") - .tempdir() - .map_err(|e| anyhow::anyhow!(e))?; - let repo = Repository::init_bare(temp_dir.path())?; - - if let Some((_, ref pack_paths)) = previous_pack { - // Apply full chain to ensure delta bases are present - if let Err(err) = apply_pack_files(&repo, pack_paths) { - let lower = err.to_string().to_lowercase(); - let missing_obj = lower.contains("missing") && lower.contains("object"); - if missing_obj { - // Try to repair packs by re-bootstrap from remote, then retry apply once more. - warn!( - workspace_id = %workspace_id, - error = %err, - "git_sync_pack_missing_objects_retry_bootstrap" - ); - if let Some(cfg) = cfg { - if !cfg.repository_url.is_empty() { - let branch = branch_name.clone(); - self.bootstrap_remote_history(workspace_id, cfg, branch.as_str()) - .await?; - previous_pack = self - .persist_pack_chain( - workspace_id, - latest_meta.as_ref().map(|m| m.commit_id.as_slice()), - ) - .await?; - if let Some((_, ref pack_paths_retry)) = previous_pack { - if apply_pack_files(&repo, pack_paths_retry).is_err() { - // Last resort: recover objects and retry once more. - warn!( - workspace_id = %workspace_id, - "git_sync_pack_retry_still_missing_recovering_objects" - ); - self.recover_missing_objects(workspace_id, cfg).await?; - latest_meta = self.ensure_latest_meta(workspace_id).await?; - previous_pack = self - .persist_pack_chain( - workspace_id, - latest_meta - .as_ref() - .map(|m| m.commit_id.as_slice()), - ) - .await?; - if let Some((_, ref pack_paths_retry2)) = previous_pack { - apply_pack_files(&repo, pack_paths_retry2)?; - } else { - anyhow::bail!( - "missing pack objects after recovery; pull/import required before sync" - ); - } - } - } else { - anyhow::bail!( - "missing pack objects after bootstrap; pull/import required before sync" - ); - } - } - } - anyhow::bail!( - "missing pack objects for {}; pull/import to repair history", - latest_meta - .as_ref() - .map(|m| encode_commit_id(&m.commit_id)) - .unwrap_or_else(|| "unknown".to_string()) - ); - } else { - return Err(err); - } - } - } - - // Skip pre-fetch/verify to avoid remote redirect/auth loops; rely on push outcome. - // Build sources from either full scan or dirty set (no awaits here) - let tree_oid = if use_full_scan { - if precomputed_full_entries.is_none() { - // We fell back to full-scan after a pack failure; rebuild snapshots fresh. - next_file_hash_index.clear(); - let current = self.collect_current_state(workspace_id).await?; - let mut entries: BTreeMap> = BTreeMap::new(); - for (path, snapshot) in current.iter() { - let bytes = self.snapshot_bytes(snapshot).await?; - entries.insert(path.clone(), bytes); - next_file_hash_index.insert(path.clone(), snapshot.hash.clone()); - } - files_changed_for_response = next_file_hash_index.len() as u32; - precomputed_full_entries = Some(entries); - } - let entries = precomputed_full_entries - .as_ref() - .ok_or_else(|| anyhow!("full-scan entries missing"))?; - build_tree_from_entries(&repo, entries)? - } else { - // Incremental: reuse previous blobs for unchanged paths - let mut sources: BTreeMap = BTreeMap::new(); - if let Some(prev_meta) = latest_meta.as_ref() { - let prev_oids = read_commit_blob_oids(&repo, prev_meta.commit_id.as_slice())?; - for (p, oid) in prev_oids { - // start from previous - sources.insert(p, FileSource::Oid(oid)); - } - } - for d in deletes.iter() { - sources.remove(d); - } - for (path, bytes) in precomputed_upsert_bytes.iter() { - sources.insert(path.clone(), FileSource::Bytes(bytes.clone())); - } - build_tree_from_sources(&repo, &sources)? - }; - let tree = repo.find_tree(tree_oid)?; - - let mut parent_commits = Vec::new(); - if let Some(prev_meta) = latest_meta.as_ref() { - let parent_oid = git2::Oid::from_bytes(&prev_meta.commit_id)?; - parent_commits.push(repo.find_commit(parent_oid)?); - } - let parent_refs: Vec<&Commit> = parent_commits.iter().collect(); - - let branch_ref = format!("refs/heads/{}", branch_name); - let author_sig = signature_from_parts(&author_name, &author_email, committed_at)?; - let commit_oid = repo.commit( - Some(&branch_ref), - &author_sig, - &author_sig, - &message, - &tree, - &parent_refs, - )?; - let commit_hex = encode_commit_id(commit_oid.as_bytes()); - - let mut pack_builder = repo.packbuilder()?; - pack_builder.insert_commit(commit_oid)?; - // Include parent commit objects to avoid missing bases when applying packs later. - for parent in parent_commits.iter() { - pack_builder.insert_commit(parent.id())?; - } - let mut pack_buf = git2::Buf::new(); - pack_builder.write_buf(&mut pack_buf)?; - let pack_bytes = pack_buf.to_vec(); - drop(pack_builder); - drop(tree); - drop(parent_commits); - drop(author_sig); - - // Use precomputed next_file_hash_index for meta - let file_hash_index = next_file_hash_index; - - let message_opt = if message.trim().is_empty() { - None - } else { - Some(message.clone()) - }; - - let meta = CommitMeta { - commit_id: commit_oid.as_bytes().to_vec(), - parent_commit_id: latest_meta.as_ref().map(|c| c.commit_id.clone()), - message: message_opt, - author_name: Some(author_name.clone()), - author_email: Some(author_email.clone()), - committed_at, - pack_key: format!("git/packs/{}/{}.pack", workspace_id, commit_hex.clone()), - file_hash_index, - }; - - let mut pushed = false; - if let Some(cfg) = cfg { - if !cfg.repository_url.is_empty() && !skip_push { - // Propagate push errors so the caller can retry with force - pushed = perform_push(&repo, cfg, &branch_name, commit_oid, force_push)?; - } - } - - drop(repo); - let _ = temp_dir.close(); - - // files_changed_for_response computed earlier - - (meta, pack_bytes, commit_hex, pushed) - }; - - if let Some((dir, _)) = previous_pack { - drop(dir); - } - - // If push to a configured remote failed, do not advance local commit pointers or clear dirty state. - // Leave files as-is so the next sync attempt will retry the push instead of treating the workspace as clean. - if push_required && !pushed { - return Ok(GitSyncOutcome { - files_changed: files_changed_for_response, - commit_hash: None, - pushed: false, - message: "commit created (push failed)".to_string(), - }); - } - - // Short, focused transaction for DB writes only. - let mut tx = self.pool.begin().await?; - // Recheck repository state exists before writing. - let repo_row2 = - sqlx::query("SELECT initialized FROM git_repository_state WHERE workspace_id = $1") - .bind(workspace_id) - .fetch_optional(&mut *tx) - .await?; - let Some(repo_row2) = repo_row2 else { - tx.rollback().await.ok(); - anyhow::bail!("repository not initialized") - }; - let initialized2: bool = repo_row2.get("initialized"); - if !initialized2 { - tx.rollback().await.ok(); - anyhow::bail!("repository not initialized") - } - - sqlx::query( - r#"INSERT INTO git_commits ( - commit_id, - parent_commit_id, - workspace_id, - message, - author_name, - author_email, - committed_at, - pack_key, - file_hash_index - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)"#, - ) - .bind(meta.commit_id.clone()) - .bind(meta.parent_commit_id.clone()) - .bind(workspace_id) - .bind(meta.message.clone()) - .bind(meta.author_name.clone()) - .bind(meta.author_email.clone()) - .bind(meta.committed_at) - .bind(meta.pack_key.clone()) - .bind(Json(&meta.file_hash_index)) - .execute(&mut *tx) - .await?; - - sqlx::query("UPDATE git_repository_state SET updated_at = now() WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&mut *tx) - .await?; - - // Only store snapshots for changed text files (incremental), or all in initial full scan - let snapshot_keys = if use_full_scan { - // full state snapshot - let current = self.collect_current_state(workspace_id).await?; - match self - .store_commit_snapshots(workspace_id, &meta.commit_id, ¤t) - .await - { - Ok(keys) => keys, - Err(err) => { - tx.rollback().await.ok(); - return Err(err); - } - } - } else { - match self - .store_commit_snapshots(workspace_id, &meta.commit_id, &changed_text_snapshots) - .await - { - Ok(keys) => keys, - Err(err) => { - tx.rollback().await.ok(); - return Err(err); - } - } - }; - - if let Err(err) = self - .git_storage - .store_pack(workspace_id, &pack_bytes, &meta) - .await - { - for key in snapshot_keys.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - tx.rollback().await.ok(); - return Err(err); - } - - if let Err(err) = self - .git_storage - .set_latest_commit(workspace_id, Some(&meta)) - .await - { - let _ = self - .git_storage - .delete_pack(workspace_id, &meta.commit_id) - .await; - for key in snapshot_keys.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - tx.rollback().await.ok(); - return Err(err); - } - - if let Err(err) = tx.commit().await { - let _ = self - .git_storage - .delete_pack(workspace_id, &meta.commit_id) - .await; - for key in snapshot_keys.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - let _ = self - .git_storage - .set_latest_commit(workspace_id, latest_meta.as_ref()) - .await; - return Err(err.into()); - } - - // Best-effort clear of processed dirty entries - self.clear_dirty(workspace_id).await.map_err(|err| { - error!(workspace_id = %workspace_id, error = %err, "git_import_clear_dirty_failed"); - err - })?; - let outcome_message = if pushed { - "sync completed".to_string() - } else if skip_push { - "sync completed (push skipped)".to_string() - } else { - "commit created (push failed)".to_string() - }; - - Ok(GitSyncOutcome { - files_changed: files_changed_for_response, - commit_hash: Some(commit_hex), - pushed, - message: outcome_message, - }) - } - - async fn import_repository( - &self, - workspace_id: Uuid, - actor_id: Uuid, - cfg: &UserGitCfg, - ) -> anyhow::Result { - // Suppress dirty tracking globally during import so filesystem watcher/ingest won't re-mark files. - let _global_dirty_guard = crate::infrastructure::storage::suppress_git_dirty_global(); - let branch = if cfg.branch_name.is_empty() { - "main".to_string() - } else { - cfg.branch_name.clone() - }; - self.ensure_repository(workspace_id, &branch).await?; - - let previous_index = self - .latest_commit_meta(workspace_id) - .await? - .map(|m| m.file_hash_index) - .unwrap_or_default(); - - // Populate storage and DB with remote history; surface errors so we don't proceed with missing packs. - self.bootstrap_remote_history(workspace_id, cfg, branch.as_str()) - .await?; - let latest = self.ensure_latest_meta(workspace_id).await?; - let Some(latest_meta) = latest else { - return Ok(GitImportOutcome { - files_changed: 0, - commit_hash: None, - docs_created: 0, - attachments_created: 0, - message: "remote has no commits".to_string(), - }); - }; - - let state = self - .state_from_commit_meta(workspace_id, &latest_meta) - .await?; - let files_changed = crate::infrastructure::storage::suppress_git_dirty(async { - self.apply_state_to_workspace(workspace_id, &state, &previous_index) - .await - }) - .await?; - - // Materialize documents and attachments from imported state; surface failures so Import can fail loudly. - let (docs_created, attachments_created) = - crate::infrastructure::storage::suppress_git_dirty(async { - self.materialize_documents_from_state(workspace_id, actor_id, &state) - .await - }) - .await?; - - self.apply_merged_to_documents(workspace_id, &state).await?; - self.clear_dirty(workspace_id).await.map_err(|err| { - error!(workspace_id = %workspace_id, error = %err, "git_import_clear_dirty_failed"); - err - })?; - - Ok(GitImportOutcome { - files_changed, - docs_created, - attachments_created, - commit_hash: Some(encode_commit_id(&latest_meta.commit_id)), - message: "import completed".to_string(), - }) - } - - async fn pull( - &self, - workspace_id: Uuid, - actor_id: Uuid, - req: &GitPullRequestDto, - cfg: &UserGitCfg, - ) -> anyhow::Result { - let mut recover_attempts: u8 = 0; - let mut skip_local_pack_restore = false; - loop { - match self - .pull_once(workspace_id, actor_id, req, cfg, skip_local_pack_restore) - .await - { - Ok(dto) => return Ok(dto), - Err(err) => { - if Self::is_missing_objects(&err) { - if recover_attempts < 2 { - recover_attempts += 1; - skip_local_pack_restore = true; - warn!( - workspace_id = %workspace_id, - attempt = %recover_attempts, - error = %err, - "git_pull_missing_objects_recovering" - ); - self.recover_missing_objects(workspace_id, cfg).await?; - continue; - } - } - return Err(err); - } - } - } - } - - async fn head_commit(&self, workspace_id: Uuid) -> anyhow::Result>> { - Ok(self - .latest_commit_meta(workspace_id) - .await? - .map(|m| m.commit_id)) - } - - async fn remote_head( - &self, - workspace_id: Uuid, - cfg: &UserGitCfg, - ) -> anyhow::Result>> { - let state = self.load_repository_state(workspace_id).await?; - let Some((initialized, branch_default)) = state else { - anyhow::bail!("repository not initialized"); - }; - if !initialized { - anyhow::bail!("repository not initialized"); - } - if cfg.repository_url.is_empty() { - anyhow::bail!("remote not configured"); - } - let branch = if cfg.branch_name.is_empty() { - branch_default - } else { - cfg.branch_name.clone() - }; - let temp_dir = TempDirBuilder::new() - .prefix("git-remote-head-") - .tempdir() - .map_err(|e| anyhow!(e))?; - let repo = Repository::init_bare(temp_dir.path())?; - let head = fetch_remote_head(&repo, cfg, &branch)?; - Ok(head.map(|oid| oid.as_bytes().to_vec())) - } - - async fn has_pending_changes(&self, workspace_id: Uuid) -> anyhow::Result { - let dirty_rows = self.fetch_dirty(workspace_id).await?; - Ok(!dirty_rows.is_empty()) - } - - async fn drift_since_commit( - &self, - workspace_id: Uuid, - base_commit: &[u8], - ) -> anyhow::Result { - let Some(meta) = self.commit_meta_by_id(workspace_id, base_commit).await? else { - return Ok(true); - }; - let base_index = meta.file_hash_index; - let current_state = self.collect_current_state(workspace_id).await?; - if base_index.len() != current_state.len() { - return Ok(true); - } - for (path, snapshot) in current_state.into_iter() { - let Some(base_hash) = base_index.get(&path) else { - return Ok(true); - }; - if base_hash != &snapshot.hash { - return Ok(true); - } - } - Ok(false) - } - - async fn check_remote( - &self, - workspace_id: Uuid, - cfg: &UserGitCfg, - ) -> anyhow::Result { - if cfg.repository_url.is_empty() { - return Ok(GitRemoteCheckDto { - ok: true, - message: "remote not configured".to_string(), - reason: Some("no_remote".to_string()), - }); - } - let branch = cfg.branch_name.clone(); - let temp_dir = TempDirBuilder::new() - .prefix("git-check-") - .tempdir() - .map_err(|e| anyhow!(e))?; - let repo = Repository::init_bare(temp_dir.path())?; - let result = match fetch_remote_head(&repo, cfg, &branch) { - Ok(Some(_)) => GitRemoteCheckDto { - ok: true, - message: "remote reachable".to_string(), - reason: None, - }, - Ok(None) => GitRemoteCheckDto { - ok: false, - message: format!("branch '{branch}' not found on remote"), - reason: Some("branch_missing".to_string()), - }, - Err(err) => { - let lower = err.to_string().to_lowercase(); - let (reason, msg) = if lower.contains("git_http_auth_redirect") { - ( - Some("auth_required".to_string()), - "remote requires authentication or SSO approval".to_string(), - ) - } else if lower.contains("git_http_not_found") || lower.contains("status code: 404") - { - ( - Some("repo_not_found".to_string()), - "repository URL or branch not found".to_string(), - ) - } else { - (None, err.to_string()) - }; - GitRemoteCheckDto { - ok: false, - message: msg, - reason, - } - } - }; - drop(repo); - let _ = temp_dir.close(); - info!(workspace_id = %workspace_id, ok = %result.ok, reason = ?result.reason, "git_remote_check_completed"); - Ok(result) - } -} - -impl GitWorkspaceService { - async fn build_conflict_item( - &self, - workspace_id: Uuid, - path: &str, - current_state: &HashMap, - remote_state: &HashMap, - local_meta: Option<&CommitMeta>, - ) -> anyhow::Result { - let ours_bytes = if let Some(snap) = current_state.get(path) { - Some(self.snapshot_bytes(snap).await?) - } else { - None - }; - let theirs_bytes = if let Some(snap) = remote_state.get(path) { - Some(self.snapshot_bytes(snap).await?) - } else { - Some(Vec::new()) - }; - let base_bytes = if let Some(meta) = local_meta.as_ref() { - self.load_file_snapshot(workspace_id, meta.commit_id.as_slice(), path) - .await? - } else { - None - }; - - let (mut ours, ours_bin) = as_text_or_binary(path, ours_bytes.as_ref()); - let (mut theirs, theirs_bin) = as_text_or_binary(path, theirs_bytes.as_ref()); - let (mut base, base_bin) = as_text_or_binary(path, base_bytes.as_ref()); - let is_binary = ours_bin || theirs_bin || base_bin; - if !is_binary { - ours = strip_front_matter_body(path, ours); - theirs = strip_front_matter_body(path, theirs); - base = strip_front_matter_body(path, base); - } - - Ok(GitPullConflictItemDto { - path: path.to_string(), - is_binary, - ours, - theirs, - base, - document_id: None, - }) - } - - async fn pull_once( - &self, - workspace_id: Uuid, - actor_id: Uuid, - req: &GitPullRequestDto, - cfg: &UserGitCfg, - skip_local_pack_restore: bool, - ) -> anyhow::Result { - let state = self.load_repository_state(workspace_id).await?; - let Some((initialized, branch_default)) = state else { - anyhow::bail!("repository not initialized"); - }; - if !initialized { - anyhow::bail!("repository not initialized"); - } - if cfg.repository_url.is_empty() { - anyhow::bail!("remote not configured"); - } - - let branch = if cfg.branch_name.is_empty() { - branch_default - } else { - cfg.branch_name.clone() - }; - - // Capture current workspace head before touching remote history. - let mut local_meta = self.latest_commit_meta(workspace_id).await?; - // After a recovery we want to treat pull as a fresh fast-forward from remote. - if skip_local_pack_restore { - local_meta = None; - } - let mut local_history_reset = false; - let mut base_index: HashMap = local_meta - .as_ref() - .map(|m| m.file_hash_index.clone()) - .unwrap_or_default(); - let mut previous_index = base_index.clone(); - let mut base_commit = local_meta.as_ref().map(|m| m.commit_id.clone()); - - let temp_dir = TempDirBuilder::new() - .prefix("git-pull-") - .tempdir() - .map_err(|e| anyhow::anyhow!(e))?; - let repo = Repository::init_bare(temp_dir.path())?; - if !skip_local_pack_restore { - match self - .persist_pack_chain( - workspace_id, - local_meta.as_ref().map(|m| m.commit_id.as_slice()), - ) - .await? - { - Some((_, pack_paths)) => { - apply_pack_files(&repo, &pack_paths)?; - } - None => { - warn!( - workspace_id = %workspace_id, - "git_pull_pack_restore_missing_resetting_base" - ); - // Storage/DB history was reset; treat as fresh pull with no local history. - local_meta = None; - local_history_reset = true; - base_index.clear(); - previous_index.clear(); - base_commit = None; - } - } - } else { - info!(workspace_id = %workspace_id, "git_pull_skip_local_pack_restore"); - } - - let remote_oid = { - let Some(head) = fetch_remote_head(&repo, cfg, &branch)? else { - return Ok(GitPullResultDto { - success: false, - message: format!("branch '{branch}' not found on remote"), - files_changed: 0, - commit_hash: None, - conflicts: None, - base_commit: base_commit.clone(), - remote_commit: None, - }); - }; - head - }; - let remote_commit = Some(remote_oid.as_bytes().to_vec()); - - let mut local_oid = if local_history_reset { - None - } else { - local_meta - .as_ref() - .and_then(|m| git2::Oid::from_bytes(&m.commit_id).ok()) - }; - // If workspace has no local commit recorded (fresh pull), fall back to latest known meta after bootstrap. - if local_oid.is_none() && !skip_local_pack_restore && !local_history_reset { - if let Some(meta) = self.latest_commit_meta(workspace_id).await? { - base_index = meta.file_hash_index.clone(); - previous_index = base_index.clone(); - base_commit = Some(meta.commit_id.clone()); - local_oid = git2::Oid::from_bytes(&meta.commit_id).ok(); - local_meta = Some(meta); - } - } - // Detect drift between latest commit and current workspace using the same dirty set as Git Changes/Status. - let dirty_rows = self.fetch_dirty(workspace_id).await?; - let current_state = self.collect_current_state(workspace_id).await?; - info!(workspace_id = %workspace_id, dirty_count = dirty_rows.len(), skip_local_pack_restore = skip_local_pack_restore, "git_pull_dirty_state"); - - #[derive(Clone, Copy, PartialEq, Eq)] - enum CommitRelation { - NoLocal, - Same, - LocalAhead, - RemoteAhead, - Diverged, - } - - let commit_relation = if let Some(local_oid_val) = local_oid { - if local_oid_val == remote_oid { - CommitRelation::Same - } else if repo.graph_descendant_of(local_oid_val, remote_oid)? { - CommitRelation::LocalAhead - } else if repo.graph_descendant_of(remote_oid, local_oid_val)? { - CommitRelation::RemoteAhead - } else { - CommitRelation::Diverged - } - } else { - CommitRelation::NoLocal - }; - - // Nothing to do when remote is identical to or behind the local head. - if matches!( - commit_relation, - CommitRelation::Same | CommitRelation::LocalAhead - ) { - let commit_hash = local_oid - .as_ref() - .map(|oid| encode_commit_id(oid.as_bytes())); - return Ok(GitPullResultDto { - success: true, - message: "no remote changes".to_string(), - files_changed: 0, - commit_hash, - conflicts: None, - base_commit: base_commit.clone(), - remote_commit: remote_commit.clone(), - }); - } - - // Build remote state directly from fetched pack (git2 tree), independent of DB meta. - fn collect_remote_state( - repo: &Repository, - oid: git2::Oid, - ) -> anyhow::Result> { - let commit = repo.find_commit(oid)?; - let tree = commit.tree()?; - let mut out: HashMap = HashMap::new(); - - fn walk( - repo: &Repository, - tree: &git2::Tree, - prefix: &str, - out: &mut HashMap, - ) -> anyhow::Result<()> { - for entry in tree.iter() { - let name = entry.name().unwrap_or_default(); - let path = if prefix.is_empty() { - name.to_string() - } else { - format!("{prefix}{name}") - }; - match entry.kind() { - Some(git2::ObjectType::Tree) => { - if let Some(sub) = entry.to_object(repo)?.as_tree() { - walk(repo, sub, &(path.clone() + "/"), out)?; - } - } - Some(git2::ObjectType::Blob) => { - let blob = repo.find_blob(entry.id())?; - let bytes = blob.content().to_vec(); - let hash = sha256_hex(&bytes); - let is_text = std::str::from_utf8(&bytes).is_ok(); - out.insert( - path, - FileSnapshot { - hash, - data: FileSnapshotData::Inline(bytes), - is_text, - }, - ); - } - _ => {} - } - } - Ok(()) - } - - walk(repo, &tree, "", &mut out)?; - Ok(out) - } - - let remote_state = collect_remote_state(&repo, remote_oid)?; - let mut remote_conflicts: Vec = Vec::new(); - let mut remote_changed_paths: HashSet = HashSet::new(); - for (path, snap) in remote_state.iter() { - if base_index.get(path) != Some(&snap.hash) { - remote_changed_paths.insert(path.clone()); - } - } - for path in base_index.keys() { - if !remote_state.contains_key(path) { - remote_changed_paths.insert(path.clone()); - } - } - let remote_changed_paths_vec: Vec = remote_changed_paths.iter().cloned().collect(); - for path in remote_changed_paths_vec.iter() { - let item = self - .build_conflict_item( - workspace_id, - path, - ¤t_state, - &remote_state, - local_meta.as_ref(), - ) - .await?; - remote_conflicts.push(item); - } - - // First-time pull with no local history and no dirty changes: allow fast-forward without forcing conflicts. - if local_meta.is_none() && dirty_rows.is_empty() { - remote_conflicts.clear(); - } - - // If commits differ but no conflict paths were detected above, fallback to diff of current vs remote trees. - if remote_conflicts.is_empty() { - let local_oid_val = local_oid.unwrap_or(remote_oid); - if remote_oid != local_oid_val { - let mut all_paths: HashSet = HashSet::new(); - for p in remote_state.keys() { - all_paths.insert(p.clone()); - } - for p in current_state.keys() { - all_paths.insert(p.clone()); - } - for path in all_paths { - let remote_hash = remote_state.get(&path).map(|s| &s.hash); - let local_hash = current_state.get(&path).map(|s| &s.hash); - if remote_hash == local_hash { - continue; - } - - let item = self - .build_conflict_item( - workspace_id, - &path, - ¤t_state, - &remote_state, - local_meta.as_ref(), - ) - .await?; - remote_conflicts.push(item); - } - } - } - let remote_changes = !remote_conflicts.is_empty(); - let remote_ahead_clean = - matches!(commit_relation, CommitRelation::RemoteAhead) && dirty_rows.is_empty(); - let fast_forward_remote = - matches!(commit_relation, CommitRelation::NoLocal) || remote_ahead_clean; - - // Detect overlap between remote-changed paths and dirty rows to avoid false conflicts. - let dirty_paths: HashSet = dirty_rows.iter().map(|r| r.path.clone()).collect(); - let dirty_remote_overlap = remote_changed_paths_vec - .iter() - .any(|p| dirty_paths.contains(p)); - - info!( - workspace_id = %workspace_id, - dirty_count = dirty_rows.len(), - remote_conflict_count = remote_conflicts.len(), - remote_changes = remote_changes, - resolutions_count = req.resolutions.len(), - dirty_remote_overlap = dirty_remote_overlap, - "git_pull_debug_state" - ); - - // If workspace has dirty changes overlapping remote changes, require explicit resolutions. - if remote_changes && dirty_remote_overlap && req.resolutions.is_empty() { - let conflicts = if remote_conflicts.is_empty() { - vec![GitPullConflictItemDto { - path: "".to_string(), - is_binary: false, - ours: None, - theirs: None, - base: None, - document_id: None, - }] - } else { - remote_conflicts.clone() - }; - return Ok(GitPullResultDto { - success: false, - message: "conflicts detected".to_string(), - files_changed: 0, - commit_hash: None, - conflicts: Some(conflicts), - base_commit: base_commit.clone(), - remote_commit: remote_commit.clone(), - }); - } - - // Ensure remote head commit metadata/pack exists locally for merge parent and future syncs. - let mut remote_pack: Option<(CommitMeta, Vec)> = None; - if self - .commit_meta_by_id(workspace_id, remote_oid.as_bytes()) - .await? - .is_none() - { - let remote_index: HashMap = remote_state - .iter() - .map(|(path, snap)| (path.clone(), snap.hash.clone())) - .collect(); - let (remote_meta, remote_pack_bytes) = { - let remote_commit_obj = repo.find_commit(remote_oid)?; - let committed_at = git_time_to_datetime(remote_commit_obj.time())?; - let message = remote_commit_obj - .message() - .map(|m| m.trim_end_matches('\n').to_string()) - .filter(|m| !m.trim().is_empty()); - let author = remote_commit_obj.author(); - let author_name = author.name().map(|s| s.to_string()); - let author_email = author.email().map(|s| s.to_string()); - let parent_commit_id = if remote_commit_obj.parent_count() > 0 { - let parent = remote_commit_obj.parent_id(0)?; - Some(parent.as_bytes().to_vec()) - } else { - None - }; - - let mut pack_builder = repo.packbuilder()?; - pack_builder.insert_commit(remote_oid)?; - if let Some(parent_id) = parent_commit_id.as_ref() { - if let Ok(parent_oid) = git2::Oid::from_bytes(parent_id) { - let _ = pack_builder.insert_commit(parent_oid); - } - } - let mut pack_buf = git2::Buf::new(); - pack_builder.write_buf(&mut pack_buf)?; - let pack_bytes = pack_buf.to_vec(); - - let commit_hex = encode_commit_id(remote_oid.as_bytes()); - let remote_meta = CommitMeta { - commit_id: remote_oid.as_bytes().to_vec(), - parent_commit_id, - message, - author_name, - author_email, - committed_at, - pack_key: format!("git/packs/{}/{}.pack", workspace_id, commit_hex), - file_hash_index: remote_index, - }; - (remote_meta, pack_bytes) - }; - remote_pack = Some((remote_meta, remote_pack_bytes)); - } - - // Fast-forward when there is no local history or the workspace head cleanly trails remote. - // For fresh workspaces with dirty changes, surface conflicts instead of overwriting. - if fast_forward_remote { - if matches!(commit_relation, CommitRelation::NoLocal) - && (!dirty_rows.is_empty() || !remote_conflicts.is_empty()) - { - return Ok(GitPullResultDto { - success: false, - message: "conflicts detected".to_string(), - files_changed: 0, - commit_hash: None, - conflicts: Some(remote_conflicts.clone()), - base_commit: base_commit.clone(), - remote_commit: remote_commit.clone(), - }); - } - // Ensure we have pack data for the remote head regardless of existing metadata. - let (remote_meta, remote_pack_bytes) = if let Some((meta, pack)) = remote_pack.take() { - (meta, Some(pack)) - } else { - let mut pack_builder = repo.packbuilder()?; - pack_builder.insert_commit(remote_oid)?; - // Include parent to avoid missing bases later. - if let Ok(parent_id) = repo.find_commit(remote_oid).and_then(|c| c.parent_id(0)) { - let _ = pack_builder.insert_commit(parent_id); - } - let mut pack_buf = git2::Buf::new(); - pack_builder.write_buf(&mut pack_buf)?; - let pack_bytes = pack_buf.to_vec(); - - let remote_index: HashMap = remote_state - .iter() - .map(|(p, snap)| (p.clone(), snap.hash.clone())) - .collect(); - let commit = repo.find_commit(remote_oid)?; - let committed_at = git_time_to_datetime(commit.time())?; - let message = commit - .message() - .map(|m| m.trim_end_matches('\n').to_string()) - .filter(|m| !m.trim().is_empty()); - let author = commit.author(); - let author_name = author.name().map(|s| s.to_string()); - let author_email = author.email().map(|s| s.to_string()); - let parent_commit_id = if commit.parent_count() > 0 { - Some(commit.parent_id(0)?.as_bytes().to_vec()) - } else { - None - }; - let commit_hex = encode_commit_id(remote_oid.as_bytes()); - let meta = CommitMeta { - commit_id: remote_oid.as_bytes().to_vec(), - parent_commit_id, - message, - author_name, - author_email, - committed_at, - pack_key: format!("git/packs/{}/{}.pack", workspace_id, commit_hex), - file_hash_index: remote_index, - }; - (meta, Some(pack_bytes)) - }; - - if let Some(pack_bytes) = remote_pack_bytes.as_ref() { - self.git_storage - .store_pack(workspace_id, pack_bytes, &remote_meta) - .await?; - } - self.upsert_commit_record(workspace_id, &remote_meta) - .await?; - - let snapshot_keys = self - .store_commit_snapshots(workspace_id, &remote_meta.commit_id, &remote_state) - .await?; - - if let Err(err) = self - .git_storage - .set_latest_commit(workspace_id, Some(&remote_meta)) - .await - { - for key in snapshot_keys.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - return Err(err); - } - - let mut tx = self.pool.begin().await?; - // Ensure repo row still exists and initialized. - let repo_row = - sqlx::query("SELECT initialized FROM git_repository_state WHERE workspace_id = $1") - .bind(workspace_id) - .fetch_optional(&mut *tx) - .await?; - let Some(repo_row) = repo_row else { - tx.rollback().await.ok(); - anyhow::bail!("repository not initialized") - }; - let initialized: bool = repo_row.get("initialized"); - if !initialized { - tx.rollback().await.ok(); - anyhow::bail!("repository not initialized") - } - - sqlx::query( - r#"INSERT INTO git_commits ( - commit_id, - parent_commit_id, - workspace_id, - message, - author_name, - author_email, - committed_at, - pack_key, - file_hash_index - ) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9) - ON CONFLICT (commit_id, workspace_id) DO NOTHING"#, - ) - .bind(remote_meta.commit_id.clone()) - .bind(remote_meta.parent_commit_id.clone()) - .bind(workspace_id) - .bind(remote_meta.message.clone()) - .bind(remote_meta.author_name.clone()) - .bind(remote_meta.author_email.clone()) - .bind(remote_meta.committed_at) - .bind(remote_meta.pack_key.clone()) - .bind(Json(&remote_meta.file_hash_index)) - .execute(&mut *tx) - .await?; - - sqlx::query( - "UPDATE git_repository_state SET updated_at = now() WHERE workspace_id = $1", - ) - .bind(workspace_id) - .execute(&mut *tx) - .await?; - tx.commit().await?; - - let files_changed = self - .apply_state_to_workspace(workspace_id, &remote_state, &previous_index) - .await?; - - // Create any missing documents/attachments from the pulled state before syncing realtime. - self.materialize_documents_from_state(workspace_id, actor_id, &remote_state) - .await?; - self.apply_merged_to_documents(workspace_id, &remote_state) - .await?; - self.clear_dirty(workspace_id).await.map_err(|err| { - error!(workspace_id = %workspace_id, error = %err, "git_pull_clear_dirty_failed"); - err - })?; - - info!( - workspace_id = %workspace_id, - commit = %encode_commit_id(&remote_meta.commit_id), - "git_pull_fast_forward_remote" - ); - - return Ok(GitPullResultDto { - success: true, - message: "fast-forwarded to remote".to_string(), - files_changed, - commit_hash: Some(encode_commit_id(&remote_meta.commit_id)), - conflicts: None, - base_commit: base_commit.clone(), - remote_commit: Some(remote_meta.commit_id.clone()), - }); - } - - // Diverged: merge local into remote (linear, parent = remote) - let Some(local_oid_val) = local_oid else { - anyhow::bail!("no local commit to merge"); - }; - - let (meta, pack_bytes, merged_snapshots, commit_hex) = { - // Build a synthetic "ours" commit from the current workspace state anchored to the local head - // so dirty edits participate in the merge against remote changes. - let synthetic_ours = self.build_synthetic_commit(workspace_id, &repo, local_oid_val)?; - let ours_commit = repo.find_commit(synthetic_ours)?; - let remote_commit_obj = repo.find_commit(remote_oid)?; - let index = repo.merge_commits(&ours_commit, &remote_commit_obj, None)?; - - let conflict_items = collect_conflicts(&repo, &index)?; - if !conflict_items.is_empty() && req.resolutions.is_empty() { - return Ok(GitPullResultDto { - success: false, - message: "conflicts detected".to_string(), - files_changed: 0, - commit_hash: None, - conflicts: Some(conflict_items), - base_commit: base_commit.clone(), - remote_commit: remote_commit.clone(), - }); - } - - // Collect conflict entries for resolution application - let mut conflict_entries: Vec<( - String, - Option>, - Option>, - Option>, - )> = Vec::new(); - { - let mut conflicts_iter = index.conflicts()?; - while let Some(conflict) = conflicts_iter.next() { - let conflict = conflict?; - let path = conflict - .our - .as_ref() - .or(conflict.their.as_ref()) - .or(conflict.ancestor.as_ref()) - .and_then(|e| std::str::from_utf8(&e.path).ok()) - .ok_or_else(|| anyhow!("missing conflict path"))? - .to_string(); - - let to_bytes = - |entry: Option<&git2::IndexEntry>| -> anyhow::Result>> { - if let Some(e) = entry { - let blob = repo.find_blob(e.id)?; - Ok(Some(blob.content().to_vec())) - } else { - Ok(None) - } - }; - - conflict_entries.push(( - path, - to_bytes(conflict.our.as_ref())?, - to_bytes(conflict.their.as_ref())?, - to_bytes(conflict.ancestor.as_ref())?, - )); - } - } - - let resolution_map: std::collections::HashMap< - String, - &crate::application::dto::git::GitPullResolutionDto, - > = req - .resolutions - .iter() - .map(|r| (r.path.clone(), r)) - .collect(); - - // Build merged state from resolved index (stage 0) plus user resolutions. - let mut merged_snapshots: HashMap = HashMap::new(); - for entry in index.iter() { - if index_entry_stage(&entry) != 0 { - continue; - } - let path = index_entry_path(&entry)?; - let blob = repo.find_blob(entry.id)?; - let bytes = blob.content().to_vec(); - let hash = sha256_hex(&bytes); - let is_text = std::str::from_utf8(&bytes).is_ok(); - merged_snapshots.insert( - path, - FileSnapshot { - hash, - data: FileSnapshotData::Inline(bytes), - is_text, - }, - ); - } - - let mut unresolved: Vec = Vec::new(); - - for (path, ours_bytes, theirs_bytes, base_bytes) in conflict_entries { - let resolution = resolution_map.get(&path); - if resolution.is_none() { - let (mut ours_txt, ours_bin) = - as_text_or_binary(path.as_str(), ours_bytes.as_ref()); - let (mut theirs_txt, theirs_bin) = - as_text_or_binary(path.as_str(), theirs_bytes.as_ref()); - let (mut base_txt, base_bin) = - as_text_or_binary(path.as_str(), base_bytes.as_ref()); - let is_binary = ours_bin || theirs_bin || base_bin; - if !is_binary { - ours_txt = strip_front_matter_body(path.as_str(), ours_txt); - theirs_txt = strip_front_matter_body(path.as_str(), theirs_txt); - base_txt = strip_front_matter_body(path.as_str(), base_txt); - } - unresolved.push(GitPullConflictItemDto { - path: path.clone(), - is_binary, - ours: ours_txt, - theirs: theirs_txt, - base: base_txt, - document_id: None, - }); - continue; - } - - let res = *resolution.unwrap(); - let selected_bytes = match res.choice.as_str() { - "ours" => ours_bytes.clone(), - "theirs" => theirs_bytes.clone(), - "custom_text" => { - let content = res - .content - .as_ref() - .ok_or_else(|| anyhow!("custom_text content required"))?; - Some(content.as_bytes().to_vec()) - } - other => anyhow::bail!("unsupported resolution choice {other}"), - } - .unwrap_or_default(); - let hash = sha256_hex(&selected_bytes); - let is_text = std::str::from_utf8(&selected_bytes).is_ok(); - merged_snapshots.insert( - path.clone(), - FileSnapshot { - hash, - data: FileSnapshotData::Inline(selected_bytes), - is_text, - }, - ); - } - - if !unresolved.is_empty() { - return Ok(GitPullResultDto { - success: false, - message: "conflicts detected".to_string(), - files_changed: 0, - commit_hash: None, - conflicts: Some(unresolved), - base_commit: base_commit.clone(), - remote_commit: remote_commit.clone(), - }); - } - - // Build tree from merged snapshots without async work - let mut entry_map: BTreeMap> = BTreeMap::new(); - for (path, snap) in merged_snapshots.iter() { - let bytes = match &snap.data { - FileSnapshotData::Inline(b) => b.clone(), - FileSnapshotData::StoragePath(_) => { - anyhow::bail!("unexpected storage-backed snapshot during pull merge") - } - }; - entry_map.insert(path.clone(), bytes); - } - let tree_oid = build_tree_from_entries(&repo, &entry_map)?; - let tree = repo.find_tree(tree_oid)?; - let sig = signature_from_parts("RefMD", "refmd@example.com", chrono::Utc::now())?; - let base_parent = repo.find_commit(local_oid_val)?; - let remote_parent = repo.find_commit(remote_oid)?; - let parent_refs: [&git2::Commit; 2] = [&base_parent, &remote_parent]; - let commit_oid = repo.commit( - None, - &sig, - &sig, - "Merge remote changes", - &tree, - &parent_refs, - )?; - - let mut file_hash_index: HashMap = HashMap::new(); - for (path, snap) in merged_snapshots.iter() { - file_hash_index.insert(path.clone(), snap.hash.clone()); - } - - let mut pack_builder = repo.packbuilder()?; - pack_builder.insert_commit(commit_oid)?; - // Include both parents to avoid missing bases when applying packs later. - pack_builder.insert_commit(base_parent.id())?; - pack_builder.insert_commit(remote_parent.id())?; - let mut pack_buf = git2::Buf::new(); - pack_builder.write_buf(&mut pack_buf)?; - let pack_bytes = pack_buf.to_vec(); - - let commit_hex = encode_commit_id(commit_oid.as_bytes()); - let meta = CommitMeta { - commit_id: commit_oid.as_bytes().to_vec(), - // Keep workspace history linear: parent is previous workspace head. - parent_commit_id: base_commit.clone(), - message: Some("Merge remote changes".to_string()), - author_name: Some("RefMD".to_string()), - author_email: Some("refmd@example.com".to_string()), - committed_at: chrono::Utc::now(), - pack_key: format!("git/packs/{}/{}.pack", workspace_id, commit_hex), - file_hash_index, - }; - - (meta, pack_bytes, merged_snapshots, commit_hex) - }; - - // Persist remote parent if we created it above. - if let Some((remote_meta, remote_pack_bytes)) = remote_pack.take() { - self.git_storage - .store_pack(workspace_id, &remote_pack_bytes, &remote_meta) - .await?; - self.upsert_commit_record(workspace_id, &remote_meta) - .await?; - } - - let snapshot_keys = self - .store_commit_snapshots(workspace_id, &meta.commit_id, &merged_snapshots) - .await?; - - if let Err(err) = self - .git_storage - .store_pack(workspace_id, &pack_bytes, &meta) - .await - { - for key in snapshot_keys.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - return Err(err); - } - - if let Err(err) = self - .git_storage - .set_latest_commit(workspace_id, Some(&meta)) - .await - { - let _ = self - .git_storage - .delete_pack(workspace_id, &meta.commit_id) - .await; - for key in snapshot_keys.iter().rev() { - let _ = self.git_storage.delete_blob(key).await; - } - return Err(err); - } - - let mut tx = self.pool.begin().await?; - sqlx::query( - r#"INSERT INTO git_commits ( - commit_id, - parent_commit_id, - workspace_id, - message, - author_name, - author_email, - committed_at, - pack_key, - file_hash_index - ) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9)"#, - ) - .bind(meta.commit_id.clone()) - .bind(meta.parent_commit_id.clone()) - .bind(workspace_id) - .bind(meta.message.clone()) - .bind(meta.author_name.clone()) - .bind(meta.author_email.clone()) - .bind(meta.committed_at) - .bind(meta.pack_key.clone()) - .bind(Json(&meta.file_hash_index)) - .execute(&mut *tx) - .await?; - - sqlx::query("UPDATE git_repository_state SET updated_at = now() WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&mut *tx) - .await?; - tx.commit().await?; - - let files_changed = self - .apply_state_to_workspace(workspace_id, &merged_snapshots, &previous_index) - .await?; - - // Create any missing documents/attachments from the merged state before syncing realtime. - self.materialize_documents_from_state(workspace_id, actor_id, &merged_snapshots) - .await?; - // Apply merged markdown back into realtime/doc storage immediately. - self.apply_merged_to_documents(workspace_id, &merged_snapshots) - .await?; - - self.clear_dirty(workspace_id).await.map_err(|err| { - error!(workspace_id = %workspace_id, error = %err, "git_pull_merge_clear_dirty_failed"); - err - })?; - - Ok(GitPullResultDto { - success: true, - message: "remote changes merged".to_string(), - files_changed, - commit_hash: Some(commit_hex), - conflicts: None, - base_commit, - remote_commit, - }) - } - - async fn persist_pack_chain( - &self, - workspace_id: Uuid, - until: Option<&[u8]>, - ) -> anyhow::Result)>> { - // Attempt to rebuild pack chain from stored snapshots if packs are missing or corrupted. - async fn rebuild_from_snapshots( - svc: &GitWorkspaceService, - workspace_id: Uuid, - until: Option<&[u8]>, - ) -> anyhow::Result)>> { - // Collect commit metas from oldest to newest - let mut chain: Vec = Vec::new(); - let mut cursor = match until { - Some(id) => svc.commit_meta_by_id(workspace_id, id).await?, - None => svc.latest_commit_meta(workspace_id).await?, - }; - while let Some(meta) = cursor { - chain.push(meta.clone()); - if let Some(parent) = meta.parent_commit_id.as_ref() { - cursor = svc.commit_meta_by_id(workspace_id, parent).await?; - } else { - break; - } - } - if chain.is_empty() { - return Ok(None); - } - chain.reverse(); - - // Preload snapshots async - let mut prepared: Vec<(CommitMeta, Vec<(String, Vec)>)> = Vec::new(); - for meta in chain.iter() { - let mut entries: Vec<(String, Vec)> = Vec::new(); - for path in meta.file_hash_index.keys() { - let Some(bytes) = svc - .load_file_snapshot(workspace_id, meta.commit_id.as_slice(), path) - .await? - else { - anyhow::bail!( - "missing snapshot blob for {} at commit {}", - path, - encode_commit_id(&meta.commit_id) - ); - }; - entries.push((path.clone(), bytes)); - } - prepared.push((meta.clone(), entries)); - } - - // Build packs synchronously to avoid Send issues with git2 types - let (temp_dir, pack_paths) = tokio::task::block_in_place(|| -> anyhow::Result<_> { - let temp_dir = tempfile::tempdir()?; - let repo = Repository::init_bare(temp_dir.path())?; - let mut built_commits: HashMap, git2::Oid> = HashMap::new(); - let mut pack_paths: Vec = Vec::new(); - - for (meta, entries) in prepared.into_iter() { - let mut builder = repo.treebuilder(None)?; - for (path, bytes) in entries.iter() { - let blob_oid = repo.blob(bytes)?; - builder.insert(path, blob_oid, FileMode::Blob.into())?; - } - let tree_oid = builder.write()?; - let tree = repo.find_tree(tree_oid)?; - - let sig = signature_from_parts( - meta.author_name.as_deref().unwrap_or("RefMD"), - meta.author_email.as_deref().unwrap_or("refmd@example.com"), - meta.committed_at, - )?; - let mut parents = Vec::new(); - if let Some(parent) = meta.parent_commit_id.as_ref() { - if let Some(existing) = built_commits.get(parent) { - parents.push(repo.find_commit(*existing)?); - } - } - let parent_refs: Vec<&Commit> = parents.iter().collect(); - let commit_oid = repo.commit( - None, - &sig, - &sig, - meta.message - .as_deref() - .unwrap_or("Recovered commit from snapshots"), - &tree, - &parent_refs, - )?; - if commit_oid.as_bytes() != meta.commit_id.as_slice() { - anyhow::bail!( - "reconstructed commit id mismatch for {}", - encode_commit_id(&meta.commit_id) - ); - } - built_commits.insert(meta.commit_id.clone(), commit_oid); - - let mut pack_builder = repo.packbuilder()?; - pack_builder.insert_commit(commit_oid)?; - for p in parents.iter() { - pack_builder.insert_commit(p.id())?; - } - let mut pack_buf = git2::Buf::new(); - pack_builder.write_buf(&mut pack_buf)?; - let pack_bytes = pack_buf.to_vec(); - - let pack_path = temp_dir - .path() - .join(format!("{:08}.pack", pack_paths.len())); - std::fs::write(&pack_path, &pack_bytes)?; - pack_paths.push(pack_path); - } - - Ok((temp_dir, pack_paths)) - })?; - - // Persist rebuilt packs and metas back to storage - for (idx, meta) in chain.iter().enumerate() { - let pack_bytes = std::fs::read(&pack_paths[idx])?; - svc.git_storage - .store_pack(workspace_id, &pack_bytes, meta) - .await?; - svc.upsert_commit_record(workspace_id, meta).await?; - let _ = svc - .git_storage - .set_latest_commit(workspace_id, Some(meta)) - .await; - } - - Ok(Some((temp_dir, pack_paths))) - } - - let mut attempts = 0; - loop { - match self.git_storage.load_pack_chain(workspace_id, until).await { - Ok(mut stream) => { - let temp_dir = tempfile::tempdir()?; - let mut pack_paths = Vec::new(); - let mut index: usize = 0; - while let Some(pack) = stream.next().await { - let pack = pack?; - let path = temp_dir.path().join(format!("{:08}.pack", index)); - tokio::fs::write(&path, &pack.bytes).await?; - pack_paths.push(path); - index += 1; - } - if pack_paths.is_empty() { - return Ok(None); - } else { - return Ok(Some((temp_dir, pack_paths))); - } - } - Err(err) => { - let err_str = err.to_string(); - let is_missing_objects = err_str.to_lowercase().contains("missing") - && err_str.to_lowercase().contains("object"); - if let Some(rebuilt) = rebuild_from_snapshots(self, workspace_id, until).await? - { - return Ok(Some(rebuilt)); - } - if attempts == 0 { - if let Some(commit_hex) = missing_metadata_commit(&err) { - match self - .repair_missing_commit_metadata(workspace_id, &commit_hex) - .await - { - Ok(_) => { - attempts += 1; - continue; - } - Err(repair_err) => { - warn!( - workspace_id = %workspace_id, - commit = %commit_hex, - error = ?repair_err, - "git_commit_metadata_repair_failed" - ); - } - } - } - // If pack is missing objects, fall back by resetting git storage pointer and DB history. - if is_missing_objects { - warn!( - workspace_id = %workspace_id, - error = %err, - "git_pack_missing_objects_detected_resetting_history" - ); - // Drop storage latest pointer and DB commits for this workspace. - let _ = self.git_storage.set_latest_commit(workspace_id, None).await; - let _ = sqlx::query("DELETE FROM git_commits WHERE workspace_id = $1") - .bind(workspace_id) - .execute(&self.pool) - .await; - return Ok(None); - } - } - return Err(err); - } - } - } - } - - async fn repair_missing_commit_metadata( - &self, - workspace_id: Uuid, - start_hex: &str, - ) -> anyhow::Result<()> { - let mut current_hex = start_hex.to_string(); - let mut visited = HashSet::new(); - loop { - if !visited.insert(current_hex.clone()) { - break; - } - let meta = - if let Some(meta) = self.commit_meta_by_hex(workspace_id, ¤t_hex).await? { - meta - } else if let Some(meta) = self - .reconstruct_commit_meta_from_pack(workspace_id, ¤t_hex) - .await? - { - meta - } else { - anyhow::bail!( - "commit {} not found in database or pack storage", - current_hex - ); - }; - self.git_storage - .restore_commit_meta(workspace_id, &meta) - .await?; - self.upsert_commit_record(workspace_id, &meta).await?; - if let Some(parent) = meta.parent_commit_id.as_ref() { - current_hex = encode_commit_id(parent); - } else { - break; - } - } - Ok(()) - } - - async fn upsert_commit_record( - &self, - workspace_id: Uuid, - meta: &CommitMeta, - ) -> anyhow::Result<()> { - sqlx::query( - r#"INSERT INTO git_commits ( - commit_id, - parent_commit_id, - workspace_id, - message, - author_name, - author_email, - committed_at, - pack_key, - file_hash_index - ) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9) - ON CONFLICT (workspace_id, commit_id) DO UPDATE SET - parent_commit_id = EXCLUDED.parent_commit_id, - message = EXCLUDED.message, - author_name = EXCLUDED.author_name, - author_email = EXCLUDED.author_email, - committed_at = EXCLUDED.committed_at, - pack_key = EXCLUDED.pack_key, - file_hash_index = EXCLUDED.file_hash_index"#, - ) - .bind(meta.commit_id.clone()) - .bind(meta.parent_commit_id.clone()) - .bind(workspace_id) - .bind(meta.message.clone()) - .bind(meta.author_name.clone()) - .bind(meta.author_email.clone()) - .bind(meta.committed_at) - .bind(meta.pack_key.clone()) - .bind(Json(&meta.file_hash_index)) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn reconstruct_commit_meta_from_pack( - &self, - workspace_id: Uuid, - commit_hex: &str, - ) -> anyhow::Result> { - let commit_id = decode_commit_id(commit_hex)?; - let Some(pack_bytes) = self - .git_storage - .fetch_pack_for_commit(workspace_id, &commit_id) - .await? - else { - return Ok(None); - }; - let temp_dir = tempfile::tempdir()?; - let repo = Repository::init_bare(temp_dir.path())?; - apply_pack_to_repo(&repo, &pack_bytes)?; - let oid = git2::Oid::from_bytes(&commit_id)?; - let commit = repo.find_commit(oid)?; - let committed_at = git_time_to_datetime(commit.time())?; - let message = commit - .message() - .map(|m| m.trim_end_matches('\n').to_string()) - .filter(|m| !m.trim().is_empty()); - let author = commit.author(); - let author_name = author.name().map(|s| s.to_string()); - let author_email = author.email().map(|s| s.to_string()); - let parent_commit_id = if commit.parent_count() > 0 { - let parent = commit.parent_id(0)?; - Some(parent.as_bytes().to_vec()) - } else { - None - }; - let files = read_commit_files(&repo, commit_id.as_slice())?; - let mut file_hash_index: HashMap = HashMap::new(); - for (path, bytes) in files.into_iter() { - file_hash_index.insert(path, sha256_hex(&bytes)); - } - let meta = CommitMeta { - commit_id, - parent_commit_id, - message, - author_name, - author_email, - committed_at, - pack_key: format!("git/packs/{}/{}.pack", workspace_id, commit_hex), - file_hash_index, - }; - Ok(Some(meta)) - } -} - -fn row_to_commit_meta(row: sqlx::postgres::PgRow) -> anyhow::Result { - let commit_id: Vec = row.get("commit_id"); - let parent_commit_id: Option> = row.try_get("parent_commit_id").ok(); - let message: Option = row.try_get("message").ok(); - let author_name: Option = row.try_get("author_name").ok(); - let author_email: Option = row.try_get("author_email").ok(); - let committed_at: DateTime = row.get("committed_at"); - let pack_key: String = row.get("pack_key"); - let file_hash_index: Json> = row.get("file_hash_index"); - - Ok(CommitMeta { - commit_id, - parent_commit_id, - message, - author_name, - author_email, - committed_at, - pack_key, - file_hash_index: file_hash_index.0, - }) -} - -fn apply_pack_to_repo(repo: &Repository, pack: &[u8]) -> anyhow::Result<()> { - let objects_dir = repo.path().join("objects").join("pack"); - fs::create_dir_all(&objects_dir)?; - let odb = repo.odb()?; - let mut indexer = Indexer::new(Some(&odb), objects_dir.as_path(), 0o644, true)?; - indexer.write_all(pack)?; - indexer.commit()?; - Ok(()) -} - -fn read_first_pack(repo_path: &Path) -> anyhow::Result>> { - let pack_dir = repo_path.join("objects").join("pack"); - if !pack_dir.exists() { - return Ok(None); - } - let mut entries: Vec<_> = std::fs::read_dir(&pack_dir)? - .filter_map(|e| e.ok()) - .filter(|e| { - e.path() - .extension() - .map(|ext| ext == "pack") - .unwrap_or(false) - }) - .collect(); - entries.sort_by_key(|e| e.file_name()); - if let Some(entry) = entries.first() { - let bytes = std::fs::read(entry.path())?; - return Ok(Some(bytes)); - } - Ok(None) -} - -fn find_front_matter_end(s: &str) -> Option<(usize, usize)> { - let bytes = s.as_bytes(); - let mut idx = 0; - while idx < bytes.len() { - if bytes[idx] == b'\n' { - let after_newline = &s[idx + 1..]; - if after_newline.starts_with("---") { - let mut body_start = idx + 1 + 3; - let mut remainder = &s[body_start..]; - // Skip trailing newlines after the closing delimiter to mirror ingest. - while remainder.starts_with("\r\n") || remainder.starts_with('\n') { - if remainder.starts_with("\r\n") { - body_start += 2; - remainder = &s[body_start..]; - } else { - body_start += 1; - remainder = &s[body_start..]; - } - } - return Some((idx, body_start)); - } - } - idx += 1; - } - None -} - -fn split_front_matter(input: &str) -> Option<(&str, &str)> { - let Some(after_open) = input - .strip_prefix("---\r\n") - .or_else(|| input.strip_prefix("---\n")) - else { - return None; - }; - if let Some((front_len, body_start)) = find_front_matter_end(after_open) { - let front = &after_open[..front_len]; - let body = &after_open[body_start..]; - return Some((front, body)); - } - None -} - -fn strip_front_matter_body(path: &str, text: Option) -> Option { - let Some(txt) = text else { - return None; - }; - let lower = path.to_ascii_lowercase(); - let is_markdown = lower.ends_with(".md") || lower.ends_with(".markdown"); - if !is_markdown { - return Some(txt); - } - if let Some((_, body)) = split_front_matter(txt.as_str()) { - return Some(body.to_string()); - } - Some(txt) -} - -fn extract_markdown_body(bytes: &[u8]) -> Option { - let text = std::str::from_utf8(bytes).ok()?; - let trimmed = text.trim_start_matches('\u{feff}'); - if let Some((_, body)) = split_front_matter(trimmed) { - return Some(body.to_string()); - } - Some(trimmed.to_string()) -} - -fn missing_metadata_commit(err: &anyhow::Error) -> Option { - let needle = "metadata not found for commit "; - for cause in err.chain() { - let msg = cause.to_string(); - if let Some(idx) = msg.find(needle) { - let start = idx + needle.len(); - let rest = &msg[start..]; - let commit: String = rest - .chars() - .take_while(|ch| ch.is_ascii_hexdigit()) - .collect(); - if !commit.is_empty() { - return Some(commit); - } - } - } - None -} - -fn apply_pack_files(repo: &Repository, pack_paths: &[PathBuf]) -> anyhow::Result<()> { - for path in pack_paths { - let bytes = fs::read(path)?; - apply_pack_to_repo(repo, &bytes)?; - } - Ok(()) -} - -fn collect_conflicts( - repo: &Repository, - index: &git2::Index, -) -> anyhow::Result> { - let mut out = Vec::new(); - let mut conflicts = index.conflicts()?; - while let Some(conflict) = conflicts.next() { - let conflict = conflict?; - let path = conflict - .our - .as_ref() - .or(conflict.their.as_ref()) - .or(conflict.ancestor.as_ref()) - .and_then(|e| std::str::from_utf8(&e.path).ok()) - .unwrap_or("") - .to_string(); - - let to_bytes = |entry: Option<&git2::IndexEntry>| -> anyhow::Result>> { - if let Some(e) = entry { - let blob = repo.find_blob(e.id)?; - Ok(Some(blob.content().to_vec())) - } else { - Ok(None) - } - }; - - let ours_bytes = to_bytes(conflict.our.as_ref())?; - let theirs_bytes = to_bytes(conflict.their.as_ref())?; - let base_bytes = to_bytes(conflict.ancestor.as_ref())?; - - let (mut ours, ours_bin) = as_text_or_binary(path.as_str(), ours_bytes.as_ref()); - let (mut theirs, theirs_bin) = as_text_or_binary(path.as_str(), theirs_bytes.as_ref()); - let (mut base, base_bin) = as_text_or_binary(path.as_str(), base_bytes.as_ref()); - let is_binary = ours_bin || theirs_bin || base_bin; - if !is_binary { - ours = strip_front_matter_body(path.as_str(), ours); - theirs = strip_front_matter_body(path.as_str(), theirs); - base = strip_front_matter_body(path.as_str(), base); - } - - out.push(GitPullConflictItemDto { - path, - is_binary, - ours, - theirs, - base, - document_id: None, - }); - } - Ok(out) -} - -fn index_entry_path(entry: &git2::IndexEntry) -> anyhow::Result { - let raw = &entry.path; - if raw.is_empty() { - anyhow::bail!("empty index entry path"); - } - if let Ok(cstr) = std::ffi::CStr::from_bytes_with_nul(raw) { - Ok(cstr - .to_str() - .unwrap_or_default() - .trim_end_matches('\0') - .to_string()) - } else { - Ok(String::from_utf8_lossy(raw) - .trim_end_matches('\0') - .to_string()) - } -} - -fn index_entry_stage(entry: &git2::IndexEntry) -> i32 { - ((entry.flags as u32 >> 12) & 0b11) as i32 -} - -fn as_text_or_binary(path: &str, data: Option<&Vec>) -> (Option, bool) { - let Some(bytes) = data else { - return (None, false); - }; - match std::str::from_utf8(bytes) { - Ok(s) => (Some(s.to_string()), false), - Err(_) => { - let lower = path.to_ascii_lowercase(); - let looks_text = lower.ends_with(".md") - || lower.ends_with(".markdown") - || lower.ends_with(".txt") - || lower.ends_with(".json") - || lower.ends_with(".yaml") - || lower.ends_with(".yml") - || lower.ends_with(".toml") - || lower.ends_with(".ini"); - if looks_text { - let lossy = String::from_utf8_lossy(bytes).to_string(); - return (Some(lossy), false); - } - (None, true) - } - } -} - -fn extract_host(url: &str) -> Option { - let s = url.trim(); - let s = s - .strip_prefix("https://") - .or_else(|| s.strip_prefix("http://")) - .unwrap_or(s); - let mut parts = s.split('/'); - let host_port = parts.next().unwrap_or(""); - let host = host_port.split(':').next().unwrap_or(""); - if host.is_empty() { - None - } else { - Some(host.to_string()) - } -} - -fn default_token_username_for(host: Option<&str>) -> &'static str { - match host { - Some(h) if h.contains("github") => "x-access-token", - Some(h) if h.contains("gitlab") => "oauth2", - Some(h) if h.contains("dev.azure.com") || h.contains("visualstudio.com") => "pat", - _ => "git", - } -} - -fn build_remote_callbacks(cfg: &UserGitCfg) -> RemoteCallbacks<'static> { - let auth_type = cfg.auth_type.clone().unwrap_or_default(); - let auth_data = cfg.auth_data.clone(); - let host_hint = extract_host(&cfg.repository_url); - let mut callbacks = RemoteCallbacks::new(); - callbacks.credentials( - move |_url, username_from_url, _allowed| match auth_type.as_str() { - "token" => { - if let Some(token) = auth_data - .as_ref() - .and_then(|v| v.get("token")) - .and_then(|v| v.as_str()) - { - let user = username_from_url - .unwrap_or(default_token_username_for(host_hint.as_deref())); - Cred::userpass_plaintext(user, token) - } else { - Cred::default() - } - } - "ssh" => { - if let Some(key) = auth_data - .as_ref() - .and_then(|v| v.get("private_key")) - .and_then(|v| v.as_str()) - { - let user = username_from_url.unwrap_or("git"); - let passphrase = auth_data - .as_ref() - .and_then(|v| v.get("passphrase")) - .and_then(|v| v.as_str()) - .filter(|s| !s.is_empty()); - let trimmed = key.trim(); - if trimmed.starts_with("v1:") { - return Err(GitError::from_str( - "failed to decrypt stored SSH key; check ENCRYPTION_KEY and re-save credentials", - )); - } - if trimmed.contains("BEGIN OPENSSH PRIVATE KEY") { - return Err(GitError::from_str( - "OpenSSH private key format is not supported; provide PEM (BEGIN RSA/EC PRIVATE KEY)", - )); - } - let needs_passphrase = trimmed.contains("ENCRYPTED"); - if needs_passphrase && passphrase.is_none() { - return Err(GitError::from_str( - "SSH private key is encrypted; passphrase is required", - )); - } - Cred::ssh_key_from_memory(user, None, trimmed, passphrase) - } else { - Cred::default() - } - } - _ => Cred::default(), - }, - ); - callbacks.certificate_check(|_, _| Ok(CertificateCheckStatus::CertificateOk)); - callbacks -} - -fn prepare_remote<'repo>( - repo: &'repo Repository, - cfg: &UserGitCfg, -) -> anyhow::Result> { - let mut remote = match repo.find_remote("origin") { - Ok(remote) => remote, - Err(_) => repo.remote("origin", &cfg.repository_url)?, - }; - if remote.url() != Some(cfg.repository_url.as_str()) { - repo.remote_set_url("origin", &cfg.repository_url)?; - remote = repo.find_remote("origin")?; - } - Ok(remote) -} - -fn fetch_remote_head( - repo: &Repository, - cfg: &UserGitCfg, - branch: &str, -) -> anyhow::Result> { - let mut remote = prepare_remote(repo, cfg)?; - let callbacks = build_remote_callbacks(cfg); - let mut fetch_options = FetchOptions::new(); - fetch_options.remote_callbacks(callbacks); - let refspec = format!("refs/heads/{branch}:refs/remotes/origin/{branch}"); - remote - .fetch(&[&refspec], Some(&mut fetch_options), None) - .map_err(map_git_http_error)?; - let reference_name = format!("refs/remotes/origin/{branch}"); - match repo.find_reference(&reference_name) { - Ok(reference) => Ok(reference.target()), - Err(err) if err.code() == git2::ErrorCode::NotFound => Ok(None), - Err(err) => Err(err.into()), - } -} - -#[allow(dead_code)] - -fn read_commit_files( - repo: &Repository, - commit_id: &[u8], -) -> anyhow::Result>> { - let oid = git2::Oid::from_bytes(commit_id)?; - let commit = repo.find_commit(oid)?; - let tree = commit.tree()?; - let mut files = HashMap::new(); - tree.walk(TreeWalkMode::PreOrder, |root, entry| { - if entry.kind() == Some(ObjectType::Blob) { - if let Some(name) = entry.name() { - if let Ok(blob) = repo.find_blob(entry.id()) { - let key = format!("{}{}", root, name); - files.insert(key, blob.content().to_vec()); - } - } - } - TreeWalkResult::Ok - })?; - Ok(files) -} - -fn perform_push( - repo: &Repository, - cfg: &UserGitCfg, - branch: &str, - commit_oid: git2::Oid, - force: bool, -) -> anyhow::Result { - let ref_name = format!("refs/heads/{}", branch); - repo.reference(&ref_name, commit_oid, true, "update branch for sync")?; - - let mut remote = prepare_remote(repo, cfg)?; - let callbacks = build_remote_callbacks(cfg); - let mut push_options = PushOptions::new(); - push_options.remote_callbacks(callbacks); - let refspec = if force { - format!("+refs/heads/{0}:refs/heads/{0}", branch) - } else { - format!("refs/heads/{0}:refs/heads/{0}", branch) - }; - remote - .push(&[&refspec], Some(&mut push_options)) - .map_err(map_git_http_error)?; - Ok(true) -} - -fn map_git_http_error(err: git2::Error) -> anyhow::Error { - if err.class() == ErrorClass::Http { - let msg = err.to_string().to_lowercase(); - if msg.contains("status code: 401") - || msg.contains("status code: 407") - || msg.contains("redirect") - { - // Avoid leaking raw libgit2 error strings to the user; normalize to a short tag. - return anyhow!("git_http_auth_redirect"); - } - if msg.contains("status code: 403") || msg.contains("status code: 404") { - return anyhow!("git_http_not_found"); - } - } - err.into() -} - -fn build_tree_from_entries( - repo: &Repository, - entries: &BTreeMap>, -) -> anyhow::Result { - let mut root = DirNode::default(); - for (path, data) in entries.iter() { - let parts: Vec<&str> = path.split('/').filter(|s| !s.is_empty()).collect(); - if parts.is_empty() { - continue; - } - insert_into_dir(&mut root, &parts, data.clone()); - } - write_dir(repo, &root) -} - -fn signature_from_parts( - name: &str, - email: &str, - at: DateTime, -) -> anyhow::Result> { - let git_time = Time::new(at.timestamp(), 0); - Signature::new(name, email, &git_time).map_err(anyhow::Error::from) -} - -fn git_time_to_datetime(time: Time) -> anyhow::Result> { - DateTime::::from_timestamp(time.seconds(), 0) - .ok_or_else(|| anyhow!("invalid git timestamp")) -} - -#[derive(Default)] -struct DirNode { - entries: BTreeMap, -} - -enum DirEntry { - File(Vec), - Oid(git2::Oid), - Dir(Box), -} - -fn insert_into_dir(dir: &mut DirNode, parts: &[&str], data: Vec) { - use std::collections::btree_map::Entry; - - if parts.is_empty() { - return; - } - - if parts.len() == 1 { - dir.entries - .insert(parts[0].to_string(), DirEntry::File(data)); - return; - } - - match dir.entries.entry(parts[0].to_string()) { - Entry::Occupied(mut occ) => { - let next = occ.get_mut(); - match next { - DirEntry::Dir(child) => insert_into_dir(child, &parts[1..], data), - DirEntry::File(_) => { - let mut new_dir = DirNode::default(); - insert_into_dir(&mut new_dir, &parts[1..], data); - *next = DirEntry::Dir(Box::new(new_dir)); - } - DirEntry::Oid(_) => { - let mut new_dir = DirNode::default(); - insert_into_dir(&mut new_dir, &parts[1..], data); - *next = DirEntry::Dir(Box::new(new_dir)); - } - } - } - Entry::Vacant(vac) => { - if parts.len() == 1 { - vac.insert(DirEntry::File(data)); - } else { - let mut new_dir = DirNode::default(); - insert_into_dir(&mut new_dir, &parts[1..], data); - vac.insert(DirEntry::Dir(Box::new(new_dir))); - } - } - } -} - -fn write_dir(repo: &Repository, dir: &DirNode) -> anyhow::Result { - let mut builder = repo.treebuilder(None)?; - for (name, entry) in dir.entries.iter() { - match entry { - DirEntry::File(content) => { - let oid = repo.blob(content)?; - builder.insert(name, oid, FileMode::Blob.into())?; - } - DirEntry::Oid(oid) => { - builder.insert(name, *oid, FileMode::Blob.into())?; - } - DirEntry::Dir(child) => { - let oid = write_dir(repo, child)?; - builder.insert(name, oid, FileMode::Tree.into())?; - } - } - } - Ok(builder.write()?) -} - -enum FileSnapshotData { - Inline(Vec), - StoragePath(String), -} - -struct FileSnapshot { - hash: String, - data: FileSnapshotData, - is_text: bool, -} - -struct FileDeltaSummary { - added: Vec, - modified: Vec, - deleted: Vec, -} - -struct DirtyRow { - path: String, - is_text: bool, - op: String, - content_hash: Option, -} - -struct DirtyUpsert { - is_text: bool, - content_hash: Option, -} - -fn repo_relative_path(path: &str) -> anyhow::Result { - let trimmed = path.trim_start_matches('/'); - let mut parts = trimmed.splitn(2, '/'); - let leading = parts.next().unwrap_or(""); - if let Some(rest) = parts.next() { - Ok(rest.replace('\\', "/")) - } else if !leading.is_empty() { - Ok(leading.replace('\\', "/")) - } else { - Err(anyhow!("invalid storage path for repository: {path}")) - } -} - -fn normalize_repo_path(path: String) -> String { - let trimmed = path.trim_start_matches('/'); - if trimmed.is_empty() { - String::new() - } else { - trimmed - .replace('\\', "/") - .trim_start_matches("./") - .trim_start_matches('/') - .to_string() - } -} - -fn blob_key(workspace_id: Uuid, commit_id: &[u8], path: &str) -> BlobKey { - let encoded_path = urlencoding::encode(path); - let commit_hex = encode_commit_id(commit_id); - BlobKey { - path: format!("{}/{}/{}", workspace_id, commit_hex, encoded_path), - } -} - -enum FileSource { - Bytes(Vec), - Oid(git2::Oid), -} - -fn insert_source_into_dir( - dir: &mut DirNode, - parts: &[&str], - source: &FileSource, -) -> anyhow::Result<()> { - use std::collections::btree_map::Entry; - if parts.is_empty() { - return Ok(()); - } - if parts.len() == 1 { - match source { - FileSource::Bytes(data) => { - dir.entries - .insert(parts[0].to_string(), DirEntry::File(data.clone())); - } - FileSource::Oid(oid) => { - dir.entries - .insert(parts[0].to_string(), DirEntry::Oid(*oid)); - } - } - Ok(()) - } else { - match dir.entries.entry(parts[0].to_string()) { - Entry::Occupied(mut occ) => match occ.get_mut() { - DirEntry::Dir(child) => insert_source_into_dir(child, &parts[1..], source), - DirEntry::File(_) | DirEntry::Oid(_) => { - let mut new_dir = DirNode::default(); - insert_source_into_dir(&mut new_dir, &parts[1..], source)?; - *occ.get_mut() = DirEntry::Dir(Box::new(new_dir)); - Ok(()) - } - }, - Entry::Vacant(vac) => { - let mut new_dir = DirNode::default(); - insert_source_into_dir(&mut new_dir, &parts[1..], source)?; - vac.insert(DirEntry::Dir(Box::new(new_dir))); - Ok(()) - } - } - } -} - -fn read_commit_blob_oids( - repo: &Repository, - commit_id: &[u8], -) -> anyhow::Result> { - let oid = git2::Oid::from_bytes(commit_id)?; - let commit = repo.find_commit(oid)?; - let tree = commit.tree()?; - let mut blobs = HashMap::new(); - tree.walk(TreeWalkMode::PreOrder, |root, entry| { - if entry.kind() == Some(ObjectType::Blob) { - if let Some(name) = entry.name() { - let key = format!("{}{}", root, name); - blobs.insert(key, entry.id()); - } - } - TreeWalkResult::Ok - })?; - Ok(blobs) -} - -fn build_tree_from_sources( - repo: &Repository, - entries: &BTreeMap, -) -> anyhow::Result { - // We'll reconstruct a DirNode and then write it, but we need to preserve existing blob OIDs for FileSource::Oid. - let mut root = DirNode::default(); - for (path, src) in entries.iter() { - let parts: Vec<&str> = path.split('/').filter(|s| !s.is_empty()).collect(); - if parts.is_empty() { - continue; - } - insert_source_into_dir(&mut root, &parts, src)?; - } - write_dir(repo, &root) -} - -// write_dir(repo, &DirNode) now supports DirEntry::Oid, so no extra variant needed diff --git a/api/src/infrastructure/mod.rs b/api/src/infrastructure/mod.rs deleted file mode 100644 index 7d1f64ec..00000000 --- a/api/src/infrastructure/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub mod auth; -pub mod crypto; -pub mod db; -pub mod documents; -pub mod git; -pub mod health; -pub mod plugins; -pub mod realtime; -pub mod storage; diff --git a/api/src/infrastructure/plugins/filesystem_store.rs b/api/src/infrastructure/plugins/filesystem_store.rs deleted file mode 100644 index bb4ed3cd..00000000 --- a/api/src/infrastructure/plugins/filesystem_store.rs +++ /dev/null @@ -1,860 +0,0 @@ -use std::collections::HashMap; -use std::io::Read; -use std::path::{Component, Path, PathBuf}; -use std::sync::{Arc, Mutex}; -use std::time::{Duration, SystemTime}; - -use anyhow::{Context, bail}; -use async_trait::async_trait; -use chrono::Utc; -use extism::{Manifest, Plugin, PluginBuilder, Wasm}; -use once_cell::sync::Lazy; -use regex::Regex; -use semver::Version; -use serde_json::{Map as JsonMap, Value as JsonValue, json}; -use tokio::{sync::RwLock, task}; -use uuid::Uuid; - -use crate::application::dto::plugins::ExecResult; -use crate::application::ports::plugin_asset_store::{ - PluginAssetPayload, PluginAssetStore, PluginAssetStoreScope, -}; -use crate::application::ports::plugin_installer::{ - InstalledPlugin, PluginInstallError, PluginInstaller, -}; -use crate::application::ports::plugin_runtime::PluginRuntime; - -static PLUGIN_ID_RE: Lazy = - Lazy::new(|| Regex::new(r"^[A-Za-z0-9_-]+$").expect("valid regex")); -static PLUGIN_VERSION_RE: Lazy = - Lazy::new(|| Regex::new(r"^[A-Za-z0-9._-]+$").expect("valid regex")); - -pub struct FilesystemPluginStore { - root: PathBuf, - plugin_cache: Arc>>, - limits: PluginExecutionLimits, -} - -struct CachedPlugin { - modified: SystemTime, - plugin: Arc>, -} - -#[derive(Clone, Copy)] -pub struct PluginExecutionLimits { - pub timeout: Option, - pub memory_max_pages: Option, - pub fuel_limit: Option, -} - -impl PluginExecutionLimits { - pub const fn new( - timeout: Option, - memory_max_pages: Option, - fuel_limit: Option, - ) -> Self { - Self { - timeout, - memory_max_pages, - fuel_limit, - } - } -} - -impl Default for PluginExecutionLimits { - fn default() -> Self { - Self { - timeout: Some(Duration::from_secs(10)), - memory_max_pages: Some(4096), // ~256 MiB - fuel_limit: Some(50_000_000), - } - } -} - -#[derive(Clone, Copy)] -enum InvocationKind { - Exec, - Render, -} - -impl InvocationKind { - fn as_str(&self) -> &'static str { - match self { - InvocationKind::Exec => "exec", - InvocationKind::Render => "render", - } - } -} - -impl FilesystemPluginStore { - pub(crate) fn is_valid_plugin_id(plugin_id: &str) -> bool { - !plugin_id.is_empty() && PLUGIN_ID_RE.is_match(plugin_id) - } - - pub(crate) fn ensure_valid_plugin_id(plugin_id: &str) -> anyhow::Result<()> { - if Self::is_valid_plugin_id(plugin_id) { - Ok(()) - } else { - bail!("invalid plugin id"); - } - } - - pub fn new(configured_dir: &str, limits: PluginExecutionLimits) -> anyhow::Result { - let root = Self::resolve_root(configured_dir)?; - Ok(Self { - root, - plugin_cache: Arc::new(RwLock::new(HashMap::new())), - limits, - }) - } - - pub fn root(&self) -> &Path { - &self.root - } - - pub fn global_root(&self) -> PathBuf { - self.root.join("global") - } - - pub fn user_root(&self, user_id: &Uuid) -> PathBuf { - self.root.join(user_id.to_string()) - } - - pub fn user_plugin_manifest_path( - &self, - user_id: &Uuid, - plugin_id: &str, - version: &str, - ) -> PathBuf { - self.user_root(user_id) - .join(plugin_id) - .join(version) - .join("plugin.json") - } - - pub fn global_plugin_manifest_path(&self, plugin_id: &str, version: &str) -> PathBuf { - self.global_root() - .join(plugin_id) - .join(version) - .join("plugin.json") - } - - fn resolve_root(configured_dir: &str) -> anyhow::Result { - let configured = configured_dir.trim(); - if !configured.is_empty() { - let path = PathBuf::from(configured); - if !path.exists() { - std::fs::create_dir_all(&path)?; - } - return path.canonicalize().or_else(|_| Ok(path)); - } - let candidates = [PathBuf::from("./plugins"), PathBuf::from("../plugins")]; - for candidate in &candidates { - if candidate.exists() { - return candidate.canonicalize().or_else(|_| Ok(candidate.clone())); - } - } - let fallback = PathBuf::from("./plugins"); - std::fs::create_dir_all(&fallback)?; - match fallback.canonicalize() { - Ok(p) => Ok(p), - Err(_) => Ok(fallback), - } - } - - pub fn latest_version_dir(&self, base: &Path) -> anyhow::Result> { - if !base.exists() { - return Ok(None); - } - let mut best: Option<(PathBuf, String, Option)> = None; - for entry in std::fs::read_dir(base)? { - let entry = entry?; - if !entry.file_type()?.is_dir() { - continue; - } - let candidate_name = entry.file_name().to_string_lossy().into_owned(); - let candidate_semver = Version::parse(&candidate_name).ok(); - match &best { - Some((_path, current_name, current_semver)) => { - let is_newer = match (&candidate_semver, current_semver) { - (Some(candidate), Some(current)) => candidate > current, - (Some(_), None) => true, - (None, Some(_)) => false, - (None, None) => candidate_name.as_str() > current_name.as_str(), - }; - if is_newer { - best = Some((entry.path(), candidate_name, candidate_semver)); - } - } - None => best = Some((entry.path(), candidate_name, candidate_semver)), - } - } - Ok(best.map(|(path, _, _)| path)) - } - - fn locate_plugin_dir( - &self, - user_id: Option, - plugin: &str, - ) -> anyhow::Result> { - if !Self::is_valid_plugin_id(plugin) { - return Ok(None); - } - if let Some(uid) = user_id { - let base = self.user_root(&uid).join(plugin); - if let Some(dir) = self.latest_version_dir(&base)? { - return Ok(Some(dir)); - } - } - let base = self.global_root().join(plugin); - self.latest_version_dir(&base) - } - - async fn resolve_backend_wasm_path(&self, plugin_dir: &Path) -> anyhow::Result { - let manifest_path = plugin_dir.join("plugin.json"); - let manifest_str = tokio::fs::read_to_string(&manifest_path) - .await - .with_context(|| format!("read plugin manifest at {}", manifest_path.display()))?; - let manifest: JsonValue = serde_json::from_str(&manifest_str) - .with_context(|| format!("parse plugin manifest at {}", manifest_path.display()))?; - - let wasm_rel = manifest - .get("backend") - .and_then(|b| b.get("wasm")) - .and_then(|w| w.as_str()) - .unwrap_or("backend/plugin.wasm"); - let sanitized = Self::sanitize_relative_path(wasm_rel)?; - Ok(plugin_dir.join(sanitized)) - } - - fn extract_permissions(manifest: &JsonValue) -> Vec { - manifest - .get("permissions") - .and_then(|value| value.as_array()) - .map(|items| { - items - .iter() - .filter_map(|item| item.as_str().map(|s| s.to_string())) - .collect::>() - }) - .unwrap_or_else(Vec::new) - } - - async fn load_plugin_instance(&self, plugin_dir: &Path) -> anyhow::Result>> { - let wasm_path = self.resolve_backend_wasm_path(plugin_dir).await?; - let metadata = tokio::fs::metadata(&wasm_path) - .await - .with_context(|| format!("read metadata for {}", wasm_path.display()))?; - let modified = metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH); - - { - let cache = self.plugin_cache.read().await; - if let Some(entry) = cache.get(&wasm_path) { - if entry.modified == modified { - return Ok(entry.plugin.clone()); - } - } - } - - let wasm_bytes = tokio::fs::read(&wasm_path) - .await - .with_context(|| format!("read wasm module at {}", wasm_path.display()))?; - let wasm_key = wasm_path.clone(); - let limits = self.limits; - let plugin = task::spawn_blocking(move || -> anyhow::Result { - let mut manifest = Manifest::new([Wasm::data(wasm_bytes)]); - if let Some(timeout) = limits.timeout { - manifest = manifest.with_timeout(timeout); - } - if let Some(memory_max) = limits.memory_max_pages { - manifest = manifest.with_memory_max(memory_max); - } - let builder = PluginBuilder::new(manifest).with_wasi(true); - let builder = if let Some(fuel_limit) = limits.fuel_limit { - builder.with_fuel_limit(fuel_limit) - } else { - builder - }; - builder.build().context("create plugin") - }) - .await - .context("join extism initialization task")??; - - let plugin_arc = Arc::new(Mutex::new(plugin)); - let mut cache = self.plugin_cache.write().await; - cache.insert( - wasm_key, - CachedPlugin { - modified, - plugin: plugin_arc.clone(), - }, - ); - Ok(plugin_arc) - } - - async fn invoke_plugin( - &self, - plugin_dir: &Path, - function: &str, - input: Vec, - ) -> anyhow::Result> { - let plugin = self.load_plugin_instance(plugin_dir).await?; - let function = function.to_string(); - let output = task::spawn_blocking(move || -> anyhow::Result> { - let mut guard = plugin - .lock() - .map_err(|_| anyhow::anyhow!("extism plugin mutex poisoned"))?; - let bytes: &[u8] = guard - .call(&function, &input) - .map_err(|err| anyhow::anyhow!(format!("extism call error: {err}")))?; - Ok(bytes.to_vec()) - }) - .await - .context("join extism call task")??; - Ok(output) - } - - fn sanitize_relative_path(path: &str) -> anyhow::Result { - let trimmed = path.trim(); - let without_root = trimmed.trim_start_matches('/'); - if without_root.is_empty() { - anyhow::bail!("invalid backend wasm path"); - } - if without_root - .split('/') - .any(|segment| segment.is_empty() || segment == "." || segment == "..") - { - anyhow::bail!("invalid backend wasm path segment"); - } - Ok(without_root.to_string()) - } - - fn build_invocation_context( - user_id: Option, - plugin: &str, - invocation: &str, - doc_id: Option, - kind: InvocationKind, - ) -> JsonValue { - let timestamp = Utc::now().to_rfc3339(); - let mut ctx = JsonMap::new(); - ctx.insert("plugin".to_string(), json!({ "id": plugin })); - ctx.insert("invocation".to_string(), json!(invocation)); - ctx.insert("timestamp".to_string(), json!(timestamp)); - ctx.insert( - "invocation_meta".to_string(), - json!({ - "name": invocation, - "kind": kind.as_str(), - "timestamp": timestamp, - }), - ); - if let Some(uid) = user_id { - ctx.insert("user".to_string(), json!({ "id": uid })); - ctx.insert("user_id".to_string(), json!(uid)); - } - if let Some(doc) = doc_id { - ctx.insert("doc".to_string(), json!({ "id": doc })); - ctx.insert("doc_id".to_string(), json!(doc)); - } - ctx.insert("kind".to_string(), json!(kind.as_str())); - JsonValue::Object(ctx) - } - - fn extract_doc_id(value: &JsonValue) -> Option { - match value { - JsonValue::Object(map) => { - let direct_keys = ["docId", "doc_id", "doc", "document"]; - for key in direct_keys { - if let Some(candidate) = map.get(key) { - if let Some(id) = Self::value_to_uuid(candidate) { - return Some(id); - } - } - } - - let nested_keys = ["options", "payload", "context", "meta"]; // fallback search - for key in nested_keys { - if let Some(nested) = map.get(key) { - if let Some(id) = Self::extract_doc_id(nested) { - return Some(id); - } - } - } - None - } - JsonValue::String(s) => Uuid::parse_str(s).ok(), - JsonValue::Array(items) => { - for item in items { - if let Some(id) = Self::extract_doc_id(item) { - return Some(id); - } - } - None - } - _ => None, - } - } - - fn value_to_uuid(value: &JsonValue) -> Option { - match value { - JsonValue::String(s) => Uuid::parse_str(s).ok(), - JsonValue::Object(obj) => obj - .get("id") - .and_then(|id| id.as_str()) - .and_then(|s| Uuid::parse_str(s).ok()), - _ => None, - } - } - - fn validate_manifest( - manifest: &serde_json::Value, - ) -> Result<(String, String), PluginInstallError> { - let id = manifest - .get("id") - .and_then(|v| v.as_str()) - .ok_or_else(|| PluginInstallError::InvalidPackage(anyhow::anyhow!("missing id")))? - .to_string(); - let version = manifest - .get("version") - .and_then(|v| v.as_str()) - .ok_or_else(|| PluginInstallError::InvalidPackage(anyhow::anyhow!("missing version")))? - .to_string(); - - if !PLUGIN_ID_RE.is_match(&id) { - return Err(PluginInstallError::InvalidPackage(anyhow::anyhow!( - "invalid plugin id" - ))); - } - if !PLUGIN_VERSION_RE.is_match(&version) { - return Err(PluginInstallError::InvalidPackage(anyhow::anyhow!( - "invalid plugin version" - ))); - } - Ok((id, version)) - } - - fn extract_archive(archive: &[u8], dest_root: &Path) -> Result<(), PluginInstallError> { - let reader = std::io::Cursor::new(archive); - let mut archive = zip::ZipArchive::new(reader) - .map_err(|e| PluginInstallError::InvalidPackage(anyhow::anyhow!(e)))?; - - let dest_root = dest_root - .canonicalize() - .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; - - for i in 0..archive.len() { - let mut file = archive - .by_index(i) - .map_err(|e| PluginInstallError::InvalidPackage(anyhow::anyhow!(e)))?; - let Some(rel_path) = file.enclosed_name().map(|p| p.to_path_buf()) else { - continue; - }; - - if let Some(mode) = file.unix_mode() { - if (mode & 0o170000) == 0o120000 { - continue; - } - } - - let outpath = dest_root.join(&rel_path); - if !outpath.starts_with(&dest_root) { - continue; - } - - if file.is_dir() { - std::fs::create_dir_all(&outpath) - .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; - } else { - if let Some(parent) = outpath.parent() { - std::fs::create_dir_all(parent) - .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; - } - let mut outfile = std::fs::File::create(&outpath) - .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; - std::io::copy(&mut file, &mut outfile) - .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; - } - } - - Ok(()) - } - - fn read_manifest_from_archive( - archive_vec: &[u8], - ) -> Result<(serde_json::Value, InstalledPlugin), PluginInstallError> { - let reader = std::io::Cursor::new(archive_vec); - let mut zip = zip::ZipArchive::new(reader) - .map_err(|e| PluginInstallError::InvalidPackage(anyhow::anyhow!(e)))?; - - let mut manifest_json: Option = None; - for i in 0..zip.len() { - let mut file = zip - .by_index(i) - .map_err(|e| PluginInstallError::InvalidPackage(anyhow::anyhow!(e)))?; - if file.name().ends_with("plugin.json") { - let mut contents = String::new(); - file.read_to_string(&mut contents) - .map_err(|e| PluginInstallError::InvalidPackage(anyhow::anyhow!(e)))?; - manifest_json = serde_json::from_str(&contents).ok(); - break; - } - } - - let manifest = manifest_json.ok_or_else(|| { - PluginInstallError::InvalidPackage(anyhow::anyhow!("plugin.json not found")) - })?; - let (id, version) = Self::validate_manifest(&manifest)?; - Ok((manifest, InstalledPlugin { id, version })) - } - - pub fn load_manifest(&self, manifest_path: &Path) -> Option { - std::fs::read_to_string(manifest_path) - .ok() - .and_then(|s| serde_json::from_str(&s).ok()) - } - - pub fn remove_user_plugin_dir(&self, user_id: &Uuid, plugin_id: &str) -> anyhow::Result<()> { - Self::ensure_valid_plugin_id(plugin_id)?; - let root = self.user_root(user_id); - let path = root.join(plugin_id); - if !path.starts_with(&root) { - bail!("invalid plugin path"); - } - if path.exists() { - std::fs::remove_dir_all(&path)?; - } - Ok(()) - } -} - -#[async_trait] -impl PluginInstaller for FilesystemPluginStore { - async fn install_for_user( - &self, - user_id: Uuid, - archive: &[u8], - ) -> Result { - let archive_vec = archive.to_vec(); - let (_manifest, installed) = Self::read_manifest_from_archive(&archive_vec)?; - - let dest_root = self - .user_root(&user_id) - .join(&installed.id) - .join(&installed.version); - - match tokio::fs::metadata(&dest_root).await { - Ok(_) => { - tokio::fs::remove_dir_all(&dest_root) - .await - .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; - } - Err(err) if err.kind() == std::io::ErrorKind::NotFound => {} - Err(err) => return Err(PluginInstallError::Storage(anyhow::anyhow!(err))), - } - if let Some(parent) = dest_root.parent() { - tokio::fs::create_dir_all(parent) - .await - .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; - } - tokio::fs::create_dir_all(&dest_root) - .await - .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))?; - - let dest_for_extract = dest_root.clone(); - let archive_for_extract = archive_vec; - tokio::task::spawn_blocking(move || { - FilesystemPluginStore::extract_archive(&archive_for_extract, &dest_for_extract) - }) - .await - .map_err(|e| PluginInstallError::Storage(anyhow::anyhow!(e)))??; - - Ok(installed) - } -} - -#[async_trait] -impl PluginAssetStore for FilesystemPluginStore { - async fn fetch_asset( - &self, - scope: PluginAssetStoreScope<'_>, - plugin_id: &str, - version: &str, - relative_path: &str, - ) -> anyhow::Result { - Self::ensure_valid_plugin_id(plugin_id)?; - if version.is_empty() - || version.len() > 128 - || version.contains("..") - || version.contains(['/', '\\']) - { - bail!("invalid plugin version"); - } - - let base_root = match scope { - PluginAssetStoreScope::Global => self.global_root(), - PluginAssetStoreScope::User { owner_id } => self.user_root(owner_id), - }; - - let mut sanitized = PathBuf::new(); - for component in Path::new(relative_path).components() { - match component { - Component::Normal(part) => sanitized.push(part), - Component::CurDir => continue, - _ => bail!("invalid asset path"), - } - } - if sanitized.as_os_str().is_empty() { - bail!("invalid asset path"); - } - - let plugin_dir = base_root.join(plugin_id).join(version); - let full_path = plugin_dir.join(&sanitized); - if !full_path.starts_with(&plugin_dir) { - bail!("invalid asset scope"); - } - - let bytes = tokio::fs::read(&full_path).await?; - let content_type = mime_guess::from_path(&full_path) - .first_raw() - .unwrap_or("application/octet-stream") - .to_string(); - Ok(PluginAssetPayload { - bytes, - content_type, - }) - } - - async fn remove_user_plugin_dir(&self, user_id: &Uuid, plugin_id: &str) -> anyhow::Result<()> { - FilesystemPluginStore::remove_user_plugin_dir(self, user_id, plugin_id) - } - - async fn list_latest_global_manifests( - &self, - ) -> anyhow::Result> { - use std::io::ErrorKind; - let mut items = Vec::new(); - let root = self.global_root(); - let mut entries = match tokio::fs::read_dir(&root).await { - Ok(iter) => iter, - Err(err) if err.kind() == ErrorKind::NotFound => return Ok(items), - Err(err) => return Err(err.into()), - }; - - while let Some(entry) = entries.next_entry().await? { - if !entry.file_type().await?.is_dir() { - continue; - } - - let plugin_id = entry.file_name().to_string_lossy().to_string(); - let base = entry.path(); - let best = match self.latest_version_dir(&base) { - Ok(Some(path)) => path, - Ok(None) => continue, - Err(err) => { - tracing::warn!( - error = ?err, - plugin_id = plugin_id.as_str(), - path = ?base, - "resolve_global_plugin_version_failed" - ); - continue; - } - }; - - let version = best - .file_name() - .and_then(|v| v.to_str()) - .unwrap_or("0.0.0") - .to_string(); - let manifest_path = best.join("plugin.json"); - let contents = match tokio::fs::read_to_string(&manifest_path).await { - Ok(contents) => contents, - Err(err) if err.kind() == ErrorKind::NotFound => continue, - Err(err) => { - tracing::warn!( - error = ?err, - plugin_id = plugin_id.as_str(), - version = version.as_str(), - path = ?manifest_path, - "read_global_plugin_manifest_failed" - ); - continue; - } - }; - - match serde_json::from_str::(&contents) { - Ok(json) => items.push((plugin_id.clone(), version.clone(), json)), - Err(err) => tracing::warn!( - error = ?err, - plugin_id = plugin_id.as_str(), - version = version.as_str(), - path = ?manifest_path, - "parse_global_plugin_manifest_failed" - ), - } - } - - Ok(items) - } - - async fn load_user_manifest( - &self, - user_id: &Uuid, - plugin_id: &str, - version: &str, - ) -> anyhow::Result> { - use std::io::ErrorKind; - let manifest_path = self.user_plugin_manifest_path(user_id, plugin_id, version); - match tokio::fs::read_to_string(&manifest_path).await { - Ok(contents) => { - let json = serde_json::from_str::(&contents)?; - Ok(Some(json)) - } - Err(err) if err.kind() == ErrorKind::NotFound => Ok(None), - Err(err) => Err(err.into()), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use tempfile::TempDir; - - #[test] - fn prefers_semver_when_available() { - let temp = TempDir::new().unwrap(); - let root = temp.path().join("plugins_test"); - std::fs::create_dir_all(root.as_path()).unwrap(); - - let store = - FilesystemPluginStore::new(root.to_str().unwrap(), PluginExecutionLimits::default()) - .unwrap(); - - let base = store.root().join("marp"); - std::fs::create_dir_all(base.join("1.9.0")).unwrap(); - std::fs::create_dir_all(base.join("1.10.0")).unwrap(); - - let latest = store.latest_version_dir(&base).unwrap().unwrap(); - assert_eq!(latest.file_name().unwrap(), "1.10.0"); - } - - #[test] - fn falls_back_to_lexical_for_non_semver() { - let temp = TempDir::new().unwrap(); - let root = temp.path().join("plugins_test_non_semver"); - std::fs::create_dir_all(root.as_path()).unwrap(); - - let store = - FilesystemPluginStore::new(root.to_str().unwrap(), PluginExecutionLimits::default()) - .unwrap(); - - let base = store.root().join("example"); - std::fs::create_dir_all(base.join("beta")).unwrap(); - std::fs::create_dir_all(base.join("alpha")).unwrap(); - - let latest = store.latest_version_dir(&base).unwrap().unwrap(); - assert_eq!(latest.file_name().unwrap(), "beta"); - } -} - -#[async_trait] -impl PluginRuntime for FilesystemPluginStore { - async fn execute( - &self, - user_id: Option, - plugin: &str, - action: &str, - payload: &serde_json::Value, - ) -> anyhow::Result> { - let plugin_dir = self.locate_plugin_dir(user_id, plugin)?; - - let Some(plugin_dir) = plugin_dir else { - return Ok(None); - }; - - let doc_hint = Self::extract_doc_id(payload); - let ctx = - Self::build_invocation_context(user_id, plugin, action, doc_hint, InvocationKind::Exec); - let input = json!({ - "action": action, - "payload": payload, - "ctx": ctx - }); - - let out = self - .invoke_plugin(&plugin_dir, "exec", serde_json::to_vec(&input)?) - .await?; - - if out.is_empty() { - return Ok(None); - } - - let res: ExecResult = serde_json::from_slice(&out)?; - Ok(Some(res)) - } - - async fn render_placeholder( - &self, - user_id: Option, - plugin: &str, - function: &str, - request: &serde_json::Value, - ) -> anyhow::Result> { - let plugin_dir = self.locate_plugin_dir(user_id, plugin)?; - let Some(plugin_dir) = plugin_dir else { - return Ok(None); - }; - - let doc_hint = Self::extract_doc_id(request); - - let ctx = Self::build_invocation_context( - user_id, - plugin, - function, - doc_hint, - InvocationKind::Render, - ); - - let envelope = match request.clone() { - JsonValue::Object(mut map) => { - map.insert("context".to_string(), ctx); - JsonValue::Object(map) - } - other => json!({ - "payload": other, - "context": ctx - }), - }; - - let out = self - .invoke_plugin(&plugin_dir, function, serde_json::to_vec(&envelope)?) - .await?; - if out.is_empty() { - return Ok(None); - } - let value = serde_json::from_slice(&out)?; - Ok(Some(value)) - } - - async fn permissions( - &self, - user_id: Option, - plugin: &str, - ) -> anyhow::Result>> { - let plugin_dir = self.locate_plugin_dir(user_id, plugin)?; - let Some(plugin_dir) = plugin_dir else { - return Ok(None); - }; - - let manifest_path = plugin_dir.join("plugin.json"); - let manifest_str = tokio::fs::read_to_string(&manifest_path) - .await - .with_context(|| format!("read plugin manifest at {}", manifest_path.display()))?; - let manifest: JsonValue = serde_json::from_str(&manifest_str) - .with_context(|| format!("parse plugin manifest at {}", manifest_path.display()))?; - - Ok(Some(Self::extract_permissions(&manifest))) - } -} diff --git a/api/src/infrastructure/plugins/package_fetcher_reqwest.rs b/api/src/infrastructure/plugins/package_fetcher_reqwest.rs deleted file mode 100644 index 731e2dc2..00000000 --- a/api/src/infrastructure/plugins/package_fetcher_reqwest.rs +++ /dev/null @@ -1,37 +0,0 @@ -use async_trait::async_trait; - -use crate::application::ports::plugin_package_fetcher::PluginPackageFetcher; - -pub struct ReqwestPluginPackageFetcher { - client: reqwest::Client, -} - -impl ReqwestPluginPackageFetcher { - pub fn new() -> Self { - Self { - client: reqwest::Client::new(), - } - } -} - -#[async_trait] -impl PluginPackageFetcher for ReqwestPluginPackageFetcher { - async fn fetch(&self, url: &str, token: Option<&str>) -> anyhow::Result> { - let mut req = self.client.get(url); - if let Some(t) = token { - req = req.bearer_auth(t); - } - let resp = req - .send() - .await - .map_err(|e| anyhow::anyhow!("request failed: {e}"))?; - if !resp.status().is_success() { - anyhow::bail!("upstream returned status {}", resp.status()); - } - let bytes = resp - .bytes() - .await - .map_err(|e| anyhow::anyhow!("failed to read body: {e}"))?; - Ok(bytes.to_vec()) - } -} diff --git a/api/src/infrastructure/realtime/doc_persistence.rs b/api/src/infrastructure/realtime/doc_persistence.rs deleted file mode 100644 index 846cbc42..00000000 --- a/api/src/infrastructure/realtime/doc_persistence.rs +++ /dev/null @@ -1,131 +0,0 @@ -use async_trait::async_trait; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::realtime_persistence_port::{ - DocPersistencePort, DocumentMissingError, -}; -use crate::infrastructure::db::PgPool; - -#[derive(Clone)] -pub struct SqlxDocPersistenceAdapter { - pool: PgPool, -} - -impl SqlxDocPersistenceAdapter { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl DocPersistencePort for SqlxDocPersistenceAdapter { - async fn append_update_with_seq( - &self, - doc_id: &Uuid, - seq: i64, - update: &[u8], - ) -> anyhow::Result<()> { - sqlx::query("INSERT INTO document_updates (document_id, seq, update) VALUES ($1, $2, $3)") - .bind(doc_id) - .bind(seq) - .bind(update) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn latest_update_seq(&self, doc_id: &Uuid) -> anyhow::Result> { - let row = - sqlx::query("SELECT MAX(seq) AS max_seq FROM document_updates WHERE document_id = $1") - .bind(doc_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.and_then(|row| row.try_get("max_seq").ok())) - } - - async fn persist_snapshot( - &self, - doc_id: &Uuid, - version: i64, - snapshot: &[u8], - ) -> anyhow::Result<()> { - let result = sqlx::query( - "INSERT INTO document_snapshots (document_id, version, snapshot) VALUES ($1, $2, $3) - ON CONFLICT (document_id, version) DO UPDATE SET snapshot = EXCLUDED.snapshot", - ) - .bind(doc_id) - .bind(version as i32) - .bind(snapshot) - .execute(&self.pool) - .await; - - match result { - Ok(_) => Ok(()), - Err(sqlx::Error::Database(db_err)) - if matches!( - db_err.constraint(), - Some("document_snapshots_document_id_fkey") - ) => - { - Err(DocumentMissingError { - document_id: *doc_id, - } - .into()) - } - Err(err) => Err(err.into()), - } - } - - async fn latest_snapshot_entry(&self, doc_id: &Uuid) -> anyhow::Result)>> { - let row = sqlx::query( - "SELECT version, snapshot FROM document_snapshots WHERE document_id = $1 - ORDER BY version DESC LIMIT 1", - ) - .bind(doc_id) - .fetch_optional(&self.pool) - .await?; - Ok(row.map(|row| { - let version = row.get::("version") as i64; - let snapshot = row.get("snapshot"); - (version, snapshot) - })) - } - - async fn latest_snapshot_version(&self, doc_id: &Uuid) -> anyhow::Result> { - Ok(self - .latest_snapshot_entry(doc_id) - .await? - .map(|(version, _)| version)) - } - - async fn prune_snapshots(&self, doc_id: &Uuid, keep_latest: i64) -> anyhow::Result<()> { - sqlx::query( - "DELETE FROM document_snapshots WHERE document_id = $1 AND version NOT IN ( - SELECT version FROM document_snapshots WHERE document_id = $1 ORDER BY version DESC LIMIT $2 - )", - ) - .bind(doc_id) - .bind(keep_latest) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn prune_updates_before(&self, doc_id: &Uuid, seq_inclusive: i64) -> anyhow::Result<()> { - sqlx::query("DELETE FROM document_updates WHERE document_id = $1 AND seq <= $2") - .bind(doc_id) - .bind(seq_inclusive) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn clear_updates(&self, doc_id: &Uuid) -> anyhow::Result<()> { - sqlx::query("DELETE FROM document_updates WHERE document_id = $1") - .bind(doc_id) - .execute(&self.pool) - .await?; - Ok(()) - } -} diff --git a/api/src/infrastructure/realtime/doc_state_reader.rs b/api/src/infrastructure/realtime/doc_state_reader.rs deleted file mode 100644 index 005817c2..00000000 --- a/api/src/infrastructure/realtime/doc_state_reader.rs +++ /dev/null @@ -1,83 +0,0 @@ -use anyhow::Context; -use async_trait::async_trait; -use futures_util::TryStreamExt; -use sqlx::Row; -use uuid::Uuid; - -use crate::application::ports::realtime_hydration_port::{ - DocSnapshot, DocStateReader, DocUpdate, DocumentRecord, -}; -use crate::infrastructure::db::PgPool; - -#[derive(Clone)] -pub struct SqlxDocStateReader { - pool: PgPool, -} - -impl SqlxDocStateReader { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } -} - -#[async_trait] -impl DocStateReader for SqlxDocStateReader { - async fn latest_snapshot(&self, doc_id: &Uuid) -> anyhow::Result> { - let row = sqlx::query( - "SELECT version, snapshot FROM document_snapshots WHERE document_id = $1 ORDER BY version DESC LIMIT 1", - ) - .bind(doc_id) - .fetch_optional(&self.pool) - .await?; - - if let Some(row) = row { - let version: i32 = row.get("version"); - let snapshot = row - .try_get::, _>("snapshot") - .context("doc_snapshot_missing")?; - Ok(Some(DocSnapshot { - version: version as i64, - snapshot, - })) - } else { - Ok(None) - } - } - - async fn updates_since(&self, doc_id: &Uuid, from_seq: i64) -> anyhow::Result> { - let mut rows = sqlx::query( - "SELECT seq, update FROM document_updates WHERE document_id = $1 AND seq > $2 ORDER BY seq ASC", - ) - .bind(doc_id) - .bind(from_seq) - .fetch(&self.pool); - - let mut result = Vec::new(); - while let Some(row) = rows.try_next().await? { - let seq: i64 = row.get("seq"); - let update = row - .try_get::, _>("update") - .context("doc_update_missing")?; - result.push(DocUpdate { seq, update }); - } - Ok(result) - } - - async fn document_record(&self, doc_id: &Uuid) -> anyhow::Result> { - let row = sqlx::query( - "SELECT type, path, desired_path, title, owner_id, workspace_id FROM documents WHERE id = $1", - ) - .bind(doc_id) - .fetch_optional(&self.pool) - .await?; - - Ok(row.map(|row| DocumentRecord { - doc_type: row.get("type"), - path: row.try_get("path").ok(), - desired_path: row.try_get("desired_path").ok(), - title: row.get("title"), - owner_id: row.try_get("owner_id").ok(), - workspace_id: row.get("workspace_id"), - })) - } -} diff --git a/api/src/infrastructure/realtime/local_engine.rs b/api/src/infrastructure/realtime/local_engine.rs deleted file mode 100644 index 6d647520..00000000 --- a/api/src/infrastructure/realtime/local_engine.rs +++ /dev/null @@ -1,37 +0,0 @@ -use crate::application::ports::realtime_port::RealtimeEngine; -use crate::application::ports::realtime_types::{DynRealtimeSink, DynRealtimeStream}; -use crate::application::services::realtime::snapshot::doc_from_snapshot_bytes; - -pub struct LocalRealtimeEngine { - pub hub: crate::infrastructure::realtime::Hub, -} - -#[async_trait::async_trait] -impl RealtimeEngine for LocalRealtimeEngine { - async fn subscribe( - &self, - doc_id: &str, - sink: DynRealtimeSink, - stream: DynRealtimeStream, - can_edit: bool, - ) -> anyhow::Result<()> { - self.hub.subscribe(doc_id, sink, stream, can_edit).await - } - - async fn get_content(&self, doc_id: &str) -> anyhow::Result> { - self.hub.get_content(doc_id).await - } - - async fn force_persist(&self, doc_id: &str) -> anyhow::Result<()> { - self.hub.force_save_to_fs(doc_id).await - } - - async fn apply_snapshot(&self, doc_id: &str, snapshot: &[u8]) -> anyhow::Result<()> { - let doc = doc_from_snapshot_bytes(snapshot)?; - self.hub.apply_snapshot(doc_id, &doc).await - } - - async fn set_document_editable(&self, doc_id: &str, editable: bool) -> anyhow::Result<()> { - self.hub.set_document_editable(doc_id, editable).await - } -} diff --git a/api/src/infrastructure/storage/gitignore_port_impl.rs b/api/src/infrastructure/storage/gitignore_port_impl.rs deleted file mode 100644 index efb93902..00000000 --- a/api/src/infrastructure/storage/gitignore_port_impl.rs +++ /dev/null @@ -1,108 +0,0 @@ -use crate::application::ports::gitignore_port::GitignorePort; - -pub struct FsGitignorePort; - -#[async_trait::async_trait] -impl GitignorePort for FsGitignorePort { - async fn ensure_gitignore(&self, dir: &str) -> anyhow::Result { - use tokio::io::AsyncWriteExt; - let path = std::path::Path::new(dir).join(".gitignore"); - if let Some(parent) = path.parent() { - tokio::fs::create_dir_all(parent).await?; - } - let defaults = vec![ - "# RefMD auto-generated .gitignore", - "*.md.tmp", - ".DS_Store", - "Thumbs.db", - ".env", - ".env.local", - ]; - let mut created_or_updated = false; - if tokio::fs::try_exists(&path).await.unwrap_or(false) { - let existing = tokio::fs::read_to_string(&path).await.unwrap_or_default(); - let mut lines: std::collections::BTreeSet = - existing.lines().map(|s| s.to_string()).collect(); - let mut changed = false; - for d in &defaults { - if !lines.contains(&d.to_string()) { - lines.insert(d.to_string()); - changed = true; - } - } - if changed { - let mut buf = String::new(); - for l in lines { - buf.push_str(&l); - buf.push('\n'); - } - let mut f = tokio::fs::File::create(&path).await?; - f.write_all(buf.as_bytes()).await?; - created_or_updated = true; - } - } else { - let mut f = tokio::fs::File::create(&path).await?; - for d in &defaults { - f.write_all(d.as_bytes()).await?; - f.write_all(b"\n").await?; - } - created_or_updated = true; - } - Ok(created_or_updated) - } - - async fn upsert_gitignore_patterns( - &self, - dir: &str, - patterns: &[String], - ) -> anyhow::Result { - use tokio::io::AsyncWriteExt; - let path = std::path::Path::new(dir).join(".gitignore"); - if let Some(parent) = path.parent() { - tokio::fs::create_dir_all(parent).await?; - } - let mut set: std::collections::BTreeSet = - if tokio::fs::try_exists(&path).await.unwrap_or(false) { - tokio::fs::read_to_string(&path) - .await - .unwrap_or_default() - .lines() - .map(|s| s.to_string()) - .collect() - } else { - Default::default() - }; - let before = set.len(); - for p in patterns { - if !p.trim().is_empty() { - set.insert(p.trim().to_string()); - } - } - if set.len() != before { - let mut buf = String::new(); - for l in &set { - buf.push_str(l); - buf.push('\n'); - } - let mut f = tokio::fs::File::create(&path).await?; - f.write_all(buf.as_bytes()).await?; - return Ok(set.len() - before); - } - Ok(0) - } - - async fn read_gitignore_patterns(&self, dir: &str) -> anyhow::Result> { - let path = std::path::Path::new(dir).join(".gitignore"); - let content = if tokio::fs::try_exists(&path).await.unwrap_or(false) { - tokio::fs::read_to_string(&path).await.unwrap_or_default() - } else { - String::new() - }; - let patterns: Vec = content - .lines() - .map(|s| s.trim().to_string()) - .filter(|s| !s.is_empty() && !s.starts_with('#')) - .collect(); - Ok(patterns) - } -} diff --git a/api/src/infrastructure/storage/job_queue.rs b/api/src/infrastructure/storage/job_queue.rs deleted file mode 100644 index b95e0867..00000000 --- a/api/src/infrastructure/storage/job_queue.rs +++ /dev/null @@ -1,336 +0,0 @@ -use async_trait::async_trait; -use chrono::{DateTime, Utc}; -use sqlx::{Postgres, Row, Transaction}; -use uuid::Uuid; - -use crate::{ - application::ports::storage_projection_queue::{ - StorageProjectionJob, StorageProjectionJobKind, StorageProjectionQueue, - }, - infrastructure::db::PgPool, -}; - -pub struct PgStorageProjectionQueue { - pool: PgPool, -} - -impl PgStorageProjectionQueue { - pub fn new(pool: PgPool) -> Self { - Self { pool } - } - - fn kind_to_str(kind: StorageProjectionJobKind) -> &'static str { - match kind { - StorageProjectionJobKind::DocSync => "doc_sync", - StorageProjectionJobKind::FolderSync => "folder_sync", - StorageProjectionJobKind::DeleteDoc => "delete_doc", - StorageProjectionJobKind::DeleteFolder => "delete_folder", - } - } - - fn str_to_kind(raw: &str) -> anyhow::Result { - match raw { - "doc_sync" => Ok(StorageProjectionJobKind::DocSync), - "folder_sync" => Ok(StorageProjectionJobKind::FolderSync), - "delete_doc" => Ok(StorageProjectionJobKind::DeleteDoc), - "delete_folder" => Ok(StorageProjectionJobKind::DeleteFolder), - _ => anyhow::bail!("unsupported_storage_projection_job_type {raw}"), - } - } -} - -#[async_trait] -impl StorageProjectionQueue for PgStorageProjectionQueue { - async fn enqueue_doc_job( - &self, - workspace_id: Uuid, - doc_id: Uuid, - kind: StorageProjectionJobKind, - reason: Option<&str>, - ) -> anyhow::Result<()> { - match kind { - StorageProjectionJobKind::DocSync | StorageProjectionJobKind::DeleteDoc => {} - other => anyhow::bail!("job_kind {other:?} requires a folder_id"), - } - - let job_type = Self::kind_to_str(kind); - sqlx::query( - r#" - INSERT INTO storage_projection_jobs (workspace_id, job_type, doc_id, reason, attempts, locked_at, last_error) - VALUES ($1, $2, $3, $4, 0, NULL, NULL) - ON CONFLICT (job_type, doc_id) WHERE doc_id IS NOT NULL - DO UPDATE SET reason = EXCLUDED.reason, - locked_at = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN NULL - ELSE storage_projection_jobs.locked_at - END, - attempts = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN 0 - ELSE storage_projection_jobs.attempts - END, - last_error = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN NULL - ELSE storage_projection_jobs.last_error - END, - workspace_id = EXCLUDED.workspace_id, - pending_retry = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN false - ELSE true - END, - updated_at = now() - "#, - ) - .bind(workspace_id) - .bind(job_type) - .bind(doc_id) - .bind(reason) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn enqueue_doc_job_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - doc_id: Uuid, - kind: StorageProjectionJobKind, - reason: Option<&str>, - ) -> anyhow::Result<()> { - match kind { - StorageProjectionJobKind::DocSync | StorageProjectionJobKind::DeleteDoc => {} - other => anyhow::bail!("job_kind {other:?} requires a folder_id"), - } - - let job_type = Self::kind_to_str(kind); - sqlx::query( - r#" - INSERT INTO storage_projection_jobs (workspace_id, job_type, doc_id, reason, attempts, locked_at, last_error) - VALUES ($1, $2, $3, $4, 0, NULL, NULL) - ON CONFLICT (job_type, doc_id) WHERE doc_id IS NOT NULL - DO UPDATE SET reason = EXCLUDED.reason, - locked_at = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN NULL - ELSE storage_projection_jobs.locked_at - END, - attempts = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN 0 - ELSE storage_projection_jobs.attempts - END, - last_error = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN NULL - ELSE storage_projection_jobs.last_error - END, - workspace_id = EXCLUDED.workspace_id, - pending_retry = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN false - ELSE true - END, - updated_at = now() - "#, - ) - .bind(workspace_id) - .bind(job_type) - .bind(doc_id) - .bind(reason) - .execute(tx.as_mut()) - .await?; - Ok(()) - } - - async fn enqueue_folder_job( - &self, - workspace_id: Uuid, - folder_id: Uuid, - kind: StorageProjectionJobKind, - reason: Option<&str>, - ) -> anyhow::Result<()> { - match kind { - StorageProjectionJobKind::FolderSync | StorageProjectionJobKind::DeleteFolder => {} - other => anyhow::bail!("job_kind {other:?} requires a doc_id"), - } - - let job_type = Self::kind_to_str(kind); - sqlx::query( - r#" - INSERT INTO storage_projection_jobs (workspace_id, job_type, folder_id, reason, attempts, locked_at, last_error) - VALUES ($1, $2, $3, $4, 0, NULL, NULL) - ON CONFLICT (job_type, folder_id) WHERE folder_id IS NOT NULL - DO UPDATE SET reason = EXCLUDED.reason, - locked_at = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN NULL - ELSE storage_projection_jobs.locked_at - END, - attempts = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN 0 - ELSE storage_projection_jobs.attempts - END, - last_error = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN NULL - ELSE storage_projection_jobs.last_error - END, - workspace_id = EXCLUDED.workspace_id, - pending_retry = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN false - ELSE true - END, - updated_at = now() - "#, - ) - .bind(workspace_id) - .bind(job_type) - .bind(folder_id) - .bind(reason) - .execute(&self.pool) - .await?; - Ok(()) - } - - async fn enqueue_folder_job_tx( - &self, - tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - folder_id: Uuid, - kind: StorageProjectionJobKind, - reason: Option<&str>, - ) -> anyhow::Result<()> { - match kind { - StorageProjectionJobKind::FolderSync | StorageProjectionJobKind::DeleteFolder => {} - other => anyhow::bail!("job_kind {other:?} requires a doc_id"), - } - - let job_type = Self::kind_to_str(kind); - sqlx::query( - r#" - INSERT INTO storage_projection_jobs (workspace_id, job_type, folder_id, reason, attempts, locked_at, last_error) - VALUES ($1, $2, $3, $4, 0, NULL, NULL) - ON CONFLICT (job_type, folder_id) WHERE folder_id IS NOT NULL - DO UPDATE SET reason = EXCLUDED.reason, - locked_at = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN NULL - ELSE storage_projection_jobs.locked_at - END, - attempts = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN 0 - ELSE storage_projection_jobs.attempts - END, - last_error = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN NULL - ELSE storage_projection_jobs.last_error - END, - workspace_id = EXCLUDED.workspace_id, - pending_retry = CASE - WHEN storage_projection_jobs.locked_at IS NULL THEN false - ELSE true - END, - updated_at = now() - "#, - ) - .bind(workspace_id) - .bind(job_type) - .bind(folder_id) - .bind(reason) - .execute(tx.as_mut()) - .await?; - Ok(()) - } - - async fn fetch_next_job( - &self, - lock_timeout_secs: i64, - ) -> anyhow::Result> { - let row = sqlx::query( - r#" - WITH next_job AS ( - SELECT id FROM storage_projection_jobs - WHERE locked_at IS NULL - OR locked_at < now() - ($1 * interval '1 second') - ORDER BY created_at - LIMIT 1 - FOR UPDATE SKIP LOCKED - ) - UPDATE storage_projection_jobs j - SET locked_at = now(), - attempts = attempts + 1, - updated_at = now() - WHERE j.id IN (SELECT id FROM next_job) - RETURNING j.id, j.workspace_id, j.job_type, j.doc_id, j.folder_id, j.reason, j.attempts, j.locked_at - "#, - ) - .bind(lock_timeout_secs.max(1)) - .fetch_optional(&self.pool) - .await?; - - let Some(row) = row else { - return Ok(None); - }; - - let job_type: String = row.get("job_type"); - let kind = Self::str_to_kind(&job_type)?; - - Ok(Some(StorageProjectionJob { - id: row.get("id"), - workspace_id: row.get("workspace_id"), - job_type: kind, - doc_id: row.try_get::, _>("doc_id").unwrap_or(None), - folder_id: row.try_get::, _>("folder_id").unwrap_or(None), - reason: row.try_get::, _>("reason").unwrap_or(None), - attempts: row.try_get("attempts").unwrap_or_default(), - locked_at: row.get::, _>("locked_at"), - })) - } - - async fn complete_job(&self, job_id: i64, locked_at: DateTime) -> anyhow::Result<()> { - let mut tx = self.pool.begin().await?; - let updated = sqlx::query( - r#" - UPDATE storage_projection_jobs - SET locked_at = NULL, - attempts = 0, - last_error = NULL, - pending_retry = false, - updated_at = now() - WHERE id = $1 AND locked_at = $2 AND pending_retry = true - "#, - ) - .bind(job_id) - .bind(locked_at) - .execute(&mut *tx) - .await?; - if updated.rows_affected() == 0 { - sqlx::query( - "DELETE FROM storage_projection_jobs WHERE id = $1 AND locked_at = $2 AND pending_retry = false", - ) - .bind(job_id) - .bind(locked_at) - .execute(&mut *tx) - .await?; - } - tx.commit().await?; - Ok(()) - } - - async fn fail_job( - &self, - job_id: i64, - locked_at: DateTime, - error: &str, - ) -> anyhow::Result<()> { - sqlx::query( - r#" - UPDATE storage_projection_jobs - SET last_error = $2, - locked_at = NULL, - pending_retry = false, - updated_at = now() - WHERE id = $1 AND locked_at = $3 - "#, - ) - .bind(job_id) - .bind(error) - .bind(locked_at) - .execute(&self.pool) - .await?; - Ok(()) - } -} diff --git a/api/src/infrastructure/storage/storage_port_impl.rs b/api/src/infrastructure/storage/storage_port_impl.rs deleted file mode 100644 index 6f773201..00000000 --- a/api/src/infrastructure/storage/storage_port_impl.rs +++ /dev/null @@ -1,272 +0,0 @@ -use std::path::{Path, PathBuf}; -use uuid::Uuid; - -use crate::application::ports::storage_port::{ - StorageProjectionPort, StorageResolverPort, StoredAttachment, -}; -use crate::application::utils::hash::sha256_hex; - -pub struct FsStoragePort { - pub pool: crate::infrastructure::db::PgPool, - pub uploads_root: PathBuf, -} - -#[async_trait::async_trait] -impl StorageProjectionPort for FsStoragePort { - async fn move_folder_subtree(&self, folder_id: Uuid) -> anyhow::Result { - crate::infrastructure::storage::move_folder_subtree( - &self.pool, - self.uploads_root.as_path(), - folder_id, - ) - .await - } - - async fn delete_doc_physical(&self, doc_id: Uuid) -> anyhow::Result<()> { - crate::infrastructure::storage::delete_doc_physical( - &self.pool, - self.uploads_root.as_path(), - doc_id, - ) - .await - } - - async fn delete_folder_physical(&self, folder_id: Uuid) -> anyhow::Result { - crate::infrastructure::storage::delete_folder_physical( - &self.pool, - self.uploads_root.as_path(), - folder_id, - ) - .await - } - - async fn sync_doc_paths(&self, doc_id: Uuid) -> anyhow::Result<()> { - crate::infrastructure::storage::move_doc_paths( - &self.pool, - self.uploads_root.as_path(), - doc_id, - ) - .await - } - - async fn delete_relative_path(&self, rel: &str) -> anyhow::Result<()> { - use std::io::ErrorKind; - - let abs = self.absolute_from_relative(rel); - if tokio::fs::try_exists(&abs).await.unwrap_or(false) { - match tokio::fs::metadata(&abs).await { - Ok(meta) => { - if meta.is_dir() { - tokio::fs::remove_dir_all(&abs).await?; - } else { - tokio::fs::remove_file(&abs).await?; - } - } - Err(err) if err.kind() == ErrorKind::NotFound => {} - Err(err) => return Err(err.into()), - } - crate::infrastructure::storage::mark_dirty_delete_relative(&self.pool, rel).await?; - } - Ok(()) - } -} - -#[async_trait::async_trait] -impl StorageResolverPort for FsStoragePort { - async fn build_doc_dir(&self, doc_id: Uuid) -> anyhow::Result { - crate::infrastructure::storage::build_doc_dir( - &self.pool, - self.uploads_root.as_path(), - doc_id, - ) - .await - } - - async fn build_doc_file_path(&self, doc_id: Uuid) -> anyhow::Result { - crate::infrastructure::storage::build_doc_file_path( - &self.pool, - self.uploads_root.as_path(), - doc_id, - ) - .await - } - - fn relative_from_uploads(&self, abs: &Path) -> String { - crate::infrastructure::storage::relative_from_uploads(self.uploads_root.as_path(), abs) - } - - fn user_repo_dir(&self, user_id: Uuid) -> String { - let path = self.uploads_root.join(user_id.to_string()); - path.to_string_lossy().to_string() - } - - fn absolute_from_relative(&self, rel: &str) -> PathBuf { - self.uploads_root.join(rel) - } - - async fn resolve_upload_path(&self, doc_id: Uuid, rest_path: &str) -> anyhow::Result { - use std::path::Component; - use tokio::fs; - - // Build base directory for the document (guaranteed to live under uploads dir). - let doc_dir = crate::infrastructure::storage::build_doc_dir( - &self.pool, - self.uploads_root.as_path(), - doc_id, - ) - .await?; - let uploads_root = self.uploads_root.as_path(); - - if !doc_dir.starts_with(uploads_root) { - anyhow::bail!("forbidden"); - } - - // Normalise the rest path and reject any traversal attempts. - let mut relative = PathBuf::new(); - for component in Path::new(rest_path).components() { - match component { - Component::Normal(part) => relative.push(part), - Component::CurDir => continue, - _ => anyhow::bail!("forbidden"), - } - } - - if relative.as_os_str().is_empty() { - anyhow::bail!("forbidden"); - } - - let full_path = doc_dir.join(relative); - if !full_path.starts_with(uploads_root) { - anyhow::bail!("forbidden"); - } - - if !fs::try_exists(&full_path).await.unwrap_or(false) { - anyhow::bail!("not_found"); - } - - Ok(full_path) - } - - async fn read_bytes(&self, abs_path: &Path) -> anyhow::Result> { - let data = tokio::fs::read(abs_path).await?; - Ok(data) - } - - async fn exists(&self, abs_path: &Path) -> anyhow::Result { - Ok(tokio::fs::try_exists(abs_path).await.unwrap_or(false)) - } - - async fn write_bytes(&self, abs_path: &Path, data: &[u8]) -> anyhow::Result<()> { - if let Some(parent) = abs_path.parent() { - tokio::fs::create_dir_all(parent).await?; - } - // Short-circuit when content is unchanged to avoid unnecessary dirty tracking. - let new_hash = sha256_hex(data); - if tokio::fs::try_exists(abs_path).await.unwrap_or(false) { - match tokio::fs::read(abs_path).await { - Ok(existing) => { - let old_hex = sha256_hex(&existing); - if old_hex == new_hash { - // No-op write; do not mark dirty. - return Ok(()); - } - } - Err(err) if err.kind() == std::io::ErrorKind::NotFound => {} - Err(err) => return Err(err.into()), - } - } - tokio::fs::write(abs_path, data).await?; - // Mark dirty (best-effort) - let is_text = abs_path - .extension() - .and_then(|e| e.to_str()) - .map(|e| e.eq_ignore_ascii_case("md")) - .unwrap_or(false); - let _ = crate::infrastructure::storage::mark_dirty_upsert_abs_path( - &self.pool, - self.uploads_root.as_path(), - abs_path, - is_text, - Some(&new_hash), - ) - .await; - Ok(()) - } - - async fn store_doc_attachment( - &self, - doc_id: Uuid, - original_filename: Option<&str>, - bytes: &[u8], - ) -> anyhow::Result { - use tokio::fs; - - let base_dir = crate::infrastructure::storage::build_doc_dir( - &self.pool, - self.uploads_root.as_path(), - doc_id, - ) - .await?; - let attachments_dir = base_dir.join("attachments"); - let _ = fs::create_dir_all(&attachments_dir).await; - - let original = original_filename.unwrap_or("file.bin"); - let mut safe = crate::infrastructure::storage::sanitize_title(original); - - let ts = chrono::Utc::now().format("%Y%m%d-%H%M%S"); - let (stem, ext) = { - let p = Path::new(&safe); - let stem = p - .file_stem() - .and_then(|s| s.to_str()) - .filter(|s| !s.is_empty()) - .unwrap_or("file") - .to_string(); - let ext = p - .extension() - .and_then(|s| s.to_str()) - .unwrap_or("") - .to_string(); - (stem, ext) - }; - - safe = if ext.is_empty() { - format!("{}_{}", stem, ts) - } else { - format!("{}_{}.{}", stem, ts, ext) - }; - - let mut candidate = attachments_dir.join(&safe); - let mut counter = 1; - while fs::try_exists(&candidate).await.unwrap_or(false) { - let p = Path::new(&safe); - let stem = p.file_stem().and_then(|s| s.to_str()).unwrap_or("file"); - let ext = p.extension().and_then(|s| s.to_str()).unwrap_or(""); - let new_name = if ext.is_empty() { - format!("{}-{}", stem, counter) - } else { - format!("{}-{}.{}", stem, counter, ext) - }; - candidate = attachments_dir.join(&new_name); - safe = new_name; - counter += 1; - } - - fs::write(&candidate, bytes).await?; - let relative = crate::infrastructure::storage::relative_from_uploads( - self.uploads_root.as_path(), - &candidate, - ) - .replace('\\', "/"); - let size = bytes.len() as i64; - - let content_hash = sha256_hex(bytes); - - Ok(StoredAttachment { - filename: safe, - relative_path: relative, - size, - content_hash, - }) - } -} diff --git a/api/src/infrastructure/storage/worker.rs b/api/src/infrastructure/storage/worker.rs deleted file mode 100644 index f02ed6e9..00000000 --- a/api/src/infrastructure/storage/worker.rs +++ /dev/null @@ -1,1041 +0,0 @@ -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Duration; - -use anyhow::Error; -use serde_json::json; -use sqlx::Error as SqlxError; -use tracing::{Instrument, error, info, info_span, warn}; -use uuid::Uuid; - -use crate::application::ports::doc_event_log::DocEventLog; -use crate::application::ports::storage_port::{StorageProjectionPort, StorageResolverPort}; -use crate::application::ports::storage_projection_queue::{ - StorageDeleteJobMetadata, StorageJobReason, StorageProjectionJob, StorageProjectionJobKind, - StorageProjectionQueue, -}; -use crate::application::services::metrics::MetricsRegistry; -use crate::application::services::realtime::snapshot::MarkdownExportProvider; -use crate::application::services::storage_projection_cache::RecentProjectionCache; -use crate::application::services::workspaces::WorkspacePermissionResolver; -use crate::application::services::workspaces::permission_snapshot::permission_set_from_snapshot; -use crate::domain::workspaces::permissions::{ - PERM_DOC_DELETE, PERM_FILE_DELETE, PERM_FOLDER_DELETE, PermissionSet, -}; -use crate::infrastructure::storage::suppress_git_dirty; - -pub struct StorageProjectionWorker { - jobs: Arc, - storage: Arc, - resolver: Arc, - markdown: Arc, - events: Arc, - recent_exports: Arc, - lock_timeout_secs: i64, - idle_backoff: Duration, - max_attempts: i32, - metrics: Arc, - permission_resolver: Arc, -} - -impl StorageProjectionWorker { - pub fn new( - jobs: Arc, - storage: Arc, - resolver: Arc, - markdown: Arc, - events: Arc, - metrics: Arc, - permission_resolver: Arc, - recent_exports: Arc, - ) -> Self { - Self { - jobs, - storage, - resolver, - markdown, - events, - recent_exports, - lock_timeout_secs: 30, - idle_backoff: Duration::from_millis(500), - max_attempts: 5, - metrics, - permission_resolver, - } - } - - pub fn with_lock_timeout(mut self, secs: i64) -> Self { - self.lock_timeout_secs = secs; - self - } - - pub fn with_idle_backoff(mut self, backoff: Duration) -> Self { - self.idle_backoff = backoff; - self - } - - pub fn with_max_attempts(mut self, attempts: i32) -> Self { - self.max_attempts = attempts.max(1); - self - } - - pub async fn run(self: Arc) { - loop { - match self.jobs.fetch_next_job(self.lock_timeout_secs).await { - Ok(Some(job)) => { - if let Err(err) = self.process_job(job).await { - error!(error = ?err, "storage_projection_job_failed"); - } - continue; - } - Ok(None) => { - tokio::time::sleep(self.idle_backoff).await; - } - Err(err) => { - error!(error = ?err, "storage_projection_job_fetch_failed"); - tokio::time::sleep(self.idle_backoff).await; - } - } - } - } - - async fn process_job(self: &Arc, job: StorageProjectionJob) -> anyhow::Result<()> { - let span = info_span!( - "storage_projection_job", - job_id = job.id, - job_type = ?job.job_type, - doc_id = job.doc_id.map(|id| id.to_string()), - folder_id = job.folder_id.map(|id| id.to_string()) - ); - - async move { - let delete_metadata = parse_delete_job_metadata(job.reason.as_ref()); - let result = suppress_git_dirty(async { - match job.job_type { - StorageProjectionJobKind::DocSync => { - let doc_id = job - .doc_id - .ok_or_else(|| anyhow::anyhow!("doc_id_required"))?; - let res = self.handle_doc_sync(doc_id).await; - if res.is_ok() { - self.emit_projection_event(doc_id, &job, "succeeded", None) - .await; - } - res - } - StorageProjectionJobKind::FolderSync => { - self.handle_folder_sync( - job.folder_id - .ok_or_else(|| anyhow::anyhow!("folder_id_required"))?, - ) - .await - } - StorageProjectionJobKind::DeleteDoc => { - let doc_id = job - .doc_id - .ok_or_else(|| anyhow::anyhow!("doc_id_required"))?; - let res = self - .handle_delete_doc(doc_id, delete_metadata.as_ref()) - .await; - if res.is_ok() { - self.emit_projection_event(doc_id, &job, "succeeded", None) - .await; - } - res - } - StorageProjectionJobKind::DeleteFolder => { - self.handle_delete_folder( - job.folder_id - .ok_or_else(|| anyhow::anyhow!("folder_id_required"))?, - delete_metadata.as_ref(), - ) - .await - } - } - }) - .await; - - match result { - Ok(()) => { - self.jobs.complete_job(job.id, job.locked_at).await?; - self.metrics.inc_storage_projection_success(); - info!("storage_projection_job_succeeded"); - } - Err(err) if missing_target(&err) => { - warn!( - error = ?err, - "storage_projection_job_missing_target_skip" - ); - self.jobs.complete_job(job.id, job.locked_at).await?; - self.metrics.inc_storage_projection_success(); - if let Some(doc_id) = job.doc_id { - self.emit_projection_event( - doc_id, - &job, - "skipped", - Some(&format!("{err:#}")), - ) - .await; - } - } - Err(err) => { - let msg = format!("{err:#}"); - if job.attempts >= self.max_attempts { - self.jobs.complete_job(job.id, job.locked_at).await?; - self.metrics.inc_storage_projection_failure(); - warn!( - error = ?err, - attempts = job.attempts, - "storage_projection_job_gave_up" - ); - if let Some(doc_id) = job.doc_id { - self.emit_projection_event( - doc_id, - &job, - "failed", - Some("max_attempts_exceeded"), - ) - .await; - } - } else { - self.jobs.fail_job(job.id, job.locked_at, &msg).await?; - self.metrics.inc_storage_projection_retry(); - warn!(error = ?err, "storage_projection_job_failed_once"); - if let Some(doc_id) = job.doc_id { - self.emit_projection_event(doc_id, &job, "failed", Some(&msg)) - .await; - } - } - } - } - - Ok(()) - } - .instrument(span) - .await - } - - async fn handle_doc_sync(&self, doc_id: Uuid) -> anyhow::Result<()> { - self.storage.sync_doc_paths(doc_id).await?; - self.persist_markdown(doc_id).await - } - - async fn handle_folder_sync(&self, folder_id: Uuid) -> anyhow::Result<()> { - self.storage.move_folder_subtree(folder_id).await?; - Ok(()) - } - - async fn handle_delete_doc( - &self, - doc_id: Uuid, - metadata: Option<&StorageDeleteJobMetadata>, - ) -> anyhow::Result<()> { - self.storage.delete_doc_physical(doc_id).await?; - if let Some(meta) = metadata { - self.delete_doc_by_metadata(meta).await?; - } - Ok(()) - } - - async fn handle_delete_folder( - &self, - folder_id: Uuid, - metadata: Option<&StorageDeleteJobMetadata>, - ) -> anyhow::Result<()> { - self.storage.delete_folder_physical(folder_id).await?; - if let Some(meta) = metadata { - self.delete_folder_by_metadata(meta).await?; - } - Ok(()) - } - - async fn delete_doc_by_metadata( - &self, - metadata: &StorageDeleteJobMetadata, - ) -> anyhow::Result<()> { - let permissions = self.permission_set_from_metadata(metadata).await?; - if metadata.doc_type == "folder" { - if !permissions.allows(PERM_FOLDER_DELETE) { - warn!( - workspace_id = %metadata.workspace_id, - "storage_projection_folder_delete_permission_denied" - ); - } - return Ok(()); - } - if !permissions.allows(PERM_DOC_DELETE) { - warn!( - workspace_id = %metadata.workspace_id, - "storage_projection_doc_delete_permission_denied" - ); - return Ok(()); - } - let Some(repo_path) = metadata.repo_path.as_deref() else { - return Ok(()); - }; - let doc_relative = workspace_repo_relative(metadata.workspace_id, repo_path); - self.storage.delete_relative_path(&doc_relative).await?; - if let Some(paths) = metadata.attachment_paths.as_ref() { - let can_delete_attachments = permissions.allows(PERM_FILE_DELETE); - for rel in paths { - if !can_delete_attachments { - warn!( - workspace_id = %metadata.workspace_id, - attachment_path = rel.as_str(), - "storage_projection_attachment_delete_permission_denied" - ); - break; - } - if let Err(err) = self.storage.delete_relative_path(rel).await { - warn!( - workspace_id = %metadata.workspace_id, - attachment_path = rel.as_str(), - error = ?err, - "storage_attachment_delete_failed" - ); - } - } - } - Ok(()) - } - - async fn delete_folder_by_metadata( - &self, - metadata: &StorageDeleteJobMetadata, - ) -> anyhow::Result<()> { - let Some(repo_path) = metadata.repo_path.as_deref() else { - return Ok(()); - }; - let permissions = self.permission_set_from_metadata(metadata).await?; - if !permissions.allows(PERM_FOLDER_DELETE) { - warn!( - workspace_id = %metadata.workspace_id, - "storage_projection_folder_delete_permission_denied" - ); - return Ok(()); - } - let folder_relative = workspace_repo_relative(metadata.workspace_id, repo_path); - self.storage.delete_relative_path(&folder_relative).await?; - Ok(()) - } -} - -impl StorageProjectionWorker { - async fn persist_markdown(&self, doc_id: Uuid) -> anyhow::Result<()> { - if let Some(export) = self.markdown.export_markdown_for_doc(&doc_id).await? { - let path = self.resolver.build_doc_file_path(doc_id).await?; - self.resolver - .write_bytes(path.as_path(), &export.bytes) - .await?; - if let Some(repo_path) = export.repo_path.as_deref() { - self.recent_exports - .record(export.workspace_id, repo_path, &export.content_hash); - } - } - Ok(()) - } -} - -impl StorageProjectionWorker { - async fn emit_projection_event( - &self, - doc_id: Uuid, - job: &StorageProjectionJob, - status: &str, - error: Option<&str>, - ) { - let Some(event_type) = projection_event_type(job.job_type) else { - return; - }; - let payload = json!({ - "job_id": job.id, - "job_type": job_type_label(job.job_type), - "status": status, - "reason": job.reason, - "attempts": job.attempts, - "error": error, - }); - if let Err(err) = self - .events - .append(job.workspace_id, doc_id, event_type, Some(payload)) - .await - { - warn!( - error = ?err, - doc_id = %doc_id, - event_type, - "storage_projection_event_emit_failed" - ); - } - } -} - -fn parse_delete_job_metadata(reason: Option<&String>) -> Option { - reason.and_then(|raw| { - serde_json::from_str::>(raw) - .ok() - .and_then(|wrapper| wrapper.metadata) - }) -} - -fn workspace_repo_relative(workspace_id: Uuid, repo_path: &str) -> String { - let mut full = PathBuf::from(workspace_id.to_string()); - full.push(repo_path.trim_start_matches('/')); - normalize_relative_path(full) -} - -const FALLBACK_DELETE_PERMISSIONS: &[&str] = - &[PERM_DOC_DELETE, PERM_FOLDER_DELETE, PERM_FILE_DELETE]; - -impl StorageProjectionWorker { - async fn permission_set_from_metadata( - &self, - metadata: &StorageDeleteJobMetadata, - ) -> anyhow::Result { - let set = permission_set_from_snapshot(&metadata.permission_snapshot); - if !set.is_empty() { - return Ok(set); - } - if let Some(actor_id) = metadata.actor_id { - match self - .permission_resolver - .load_permission_set(metadata.workspace_id, actor_id) - .await - { - Ok(Some(resolved)) => { - info!( - workspace_id = %metadata.workspace_id, - actor_id = %actor_id, - "storage_projection_permissions_rehydrated" - ); - return Ok(resolved); - } - Ok(None) => { - warn!( - workspace_id = %metadata.workspace_id, - actor_id = %actor_id, - "storage_projection_actor_missing_for_permissions" - ); - } - Err(err) => { - warn!( - error = ?err, - workspace_id = %metadata.workspace_id, - actor_id = %actor_id, - "storage_projection_permission_resolve_failed" - ); - } - } - } else { - warn!( - workspace_id = %metadata.workspace_id, - "storage_projection_permission_snapshot_missing_no_actor" - ); - } - Ok(PermissionSet::from_slice(FALLBACK_DELETE_PERMISSIONS)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use async_trait::async_trait; - use sqlx::{Postgres, Transaction}; - use std::path::{Path, PathBuf}; - use std::sync::Mutex; - use std::sync::atomic::{AtomicBool, Ordering}; - - use crate::application::ports::storage_port::StoredAttachment; - use crate::application::services::errors::ServiceError; - use crate::application::services::realtime::snapshot::{ - MarkdownExport, MarkdownExportProvider, - }; - - struct AllowAllPermissions; - - #[async_trait] - impl WorkspacePermissionResolver for AllowAllPermissions { - async fn load_permission_set( - &self, - _workspace_id: Uuid, - _user_id: Uuid, - ) -> Result, ServiceError> { - Ok(Some(PermissionSet::all())) - } - } - - struct RecordingPermissionResolver { - called: AtomicBool, - } - - impl RecordingPermissionResolver { - fn new() -> Self { - Self { - called: AtomicBool::new(false), - } - } - - fn was_called(&self) -> bool { - self.called.load(Ordering::SeqCst) - } - } - - #[async_trait] - impl WorkspacePermissionResolver for RecordingPermissionResolver { - async fn load_permission_set( - &self, - _workspace_id: Uuid, - _user_id: Uuid, - ) -> Result, ServiceError> { - self.called.store(true, Ordering::SeqCst); - Ok(Some(PermissionSet::from_slice(&[ - PERM_DOC_DELETE, - PERM_FOLDER_DELETE, - ]))) - } - } - - struct NonePermissionResolver; - - #[async_trait] - impl WorkspacePermissionResolver for NonePermissionResolver { - async fn load_permission_set( - &self, - _workspace_id: Uuid, - _user_id: Uuid, - ) -> Result, ServiceError> { - Ok(None) - } - } - - #[tokio::test] - async fn doc_sync_invokes_storage_and_completes_job() { - let queue = Arc::new(MockQueue::default()); - let storage = Arc::new(RecordingStoragePort::default()); - let resolver_impl = Arc::new(MockResolver::default()); - let resolver: Arc = resolver_impl.clone(); - let markdown: Arc = Arc::new(MockMarkdownExporter::new()); - let events = Arc::new(RecordingDocEventLog::default()); - let metrics = Arc::new(MetricsRegistry::default()); - let permission_resolver: Arc = - Arc::new(AllowAllPermissions); - let worker = Arc::new(StorageProjectionWorker::new( - queue.clone(), - storage.clone(), - resolver.clone(), - markdown.clone(), - events.clone(), - metrics.clone(), - permission_resolver.clone(), - Arc::new(RecentProjectionCache::new(Duration::from_secs(5))), - )); - let job = StorageProjectionJob { - id: 1, - workspace_id: Uuid::new_v4(), - job_type: StorageProjectionJobKind::DocSync, - doc_id: Some(Uuid::new_v4()), - folder_id: None, - reason: None, - attempts: 0, - locked_at: chrono::Utc::now(), - }; - worker.process_job(job).await.unwrap(); - assert_eq!(queue.completed(), vec![1]); - assert_eq!(storage.calls(), vec!["sync_doc_paths".to_string()]); - assert_eq!(events.events().len(), 1); - assert_eq!(events.events()[0].2, "storage.projection.doc_sync"); - assert_eq!(resolver_impl.writes().len(), 1); - assert_eq!(metrics.snapshot().storage_projection_success, 1); - } - - #[tokio::test] - async fn failing_doc_sync_marks_job_failed() { - let queue = Arc::new(MockQueue::default()); - let storage = Arc::new(RecordingStoragePort::default()); - storage.fail_next_sync(); - let resolver_impl = Arc::new(MockResolver::default()); - let resolver: Arc = resolver_impl.clone(); - let markdown: Arc = Arc::new(MockMarkdownExporter::new()); - let events = Arc::new(RecordingDocEventLog::default()); - let metrics = Arc::new(MetricsRegistry::default()); - let permission_resolver: Arc = - Arc::new(AllowAllPermissions); - let worker = Arc::new(StorageProjectionWorker::new( - queue.clone(), - storage, - resolver, - markdown, - events, - metrics.clone(), - permission_resolver.clone(), - Arc::new(RecentProjectionCache::new(Duration::from_secs(5))), - )); - let job = StorageProjectionJob { - id: 2, - workspace_id: Uuid::new_v4(), - job_type: StorageProjectionJobKind::DocSync, - doc_id: Some(Uuid::new_v4()), - folder_id: None, - reason: None, - attempts: 0, - locked_at: chrono::Utc::now(), - }; - worker.process_job(job).await.unwrap(); - assert!(queue.completed().is_empty()); - assert_eq!(queue.failed().len(), 1); - assert_eq!(queue.failed()[0].0, 2); - assert_eq!(metrics.snapshot().storage_projection_retry, 1); - } - - #[tokio::test] - async fn delete_doc_metadata_removes_only_listed_attachments() { - let queue = Arc::new(MockQueue::default()); - let storage = Arc::new(RecordingStoragePort::default()); - let resolver_impl = Arc::new(MockResolver::default()); - let resolver: Arc = resolver_impl.clone(); - let markdown: Arc = Arc::new(MockMarkdownExporter::new()); - let events = Arc::new(RecordingDocEventLog::default()); - let metrics = Arc::new(MetricsRegistry::default()); - let permission_resolver: Arc = - Arc::new(AllowAllPermissions); - let worker = Arc::new(StorageProjectionWorker::new( - queue, - storage.clone(), - resolver, - markdown, - events, - metrics, - permission_resolver.clone(), - Arc::new(RecentProjectionCache::new(Duration::from_secs(5))), - )); - let owner = Uuid::new_v4(); - let metadata = StorageDeleteJobMetadata { - workspace_id: owner, - repo_path: Some("docs/foo.md".into()), - doc_type: "doc".into(), - attachment_paths: Some(vec![ - format!("{}/docs/attachments/image.png", owner), - format!("{}/docs/attachments/asset.bin", owner), - ]), - permission_snapshot: PermissionSet::all().to_vec(), - actor_id: None, - }; - worker.delete_doc_by_metadata(&metadata).await.unwrap(); - assert_eq!( - storage.calls(), - vec![ - format!("delete_relative_path:{}/docs/foo.md", owner), - format!("delete_relative_path:{}/docs/attachments/image.png", owner), - format!("delete_relative_path:{}/docs/attachments/asset.bin", owner) - ] - ); - } - - #[tokio::test] - async fn empty_snapshot_uses_resolver_permissions_when_available() { - let queue = Arc::new(MockQueue::default()); - let storage = Arc::new(RecordingStoragePort::default()); - let resolver_impl = Arc::new(MockResolver::default()); - let resolver: Arc = resolver_impl.clone(); - let markdown: Arc = Arc::new(MockMarkdownExporter::new()); - let events = Arc::new(RecordingDocEventLog::default()); - let metrics = Arc::new(MetricsRegistry::default()); - let resolver_stub = Arc::new(RecordingPermissionResolver::new()); - let permission_resolver: Arc = resolver_stub.clone(); - let worker = Arc::new(StorageProjectionWorker::new( - queue, - storage, - resolver, - markdown, - events, - metrics, - permission_resolver, - Arc::new(RecentProjectionCache::new(Duration::from_secs(5))), - )); - let metadata = StorageDeleteJobMetadata { - workspace_id: Uuid::new_v4(), - repo_path: Some("docs/foo.md".into()), - doc_type: "doc".into(), - attachment_paths: None, - permission_snapshot: Vec::new(), - actor_id: Some(Uuid::new_v4()), - }; - let set = worker - .permission_set_from_metadata(&metadata) - .await - .unwrap(); - assert!(resolver_stub.was_called()); - assert!(set.allows(PERM_DOC_DELETE)); - } - - #[tokio::test] - async fn empty_snapshot_without_actor_falls_back_to_minimum_permissions() { - let queue = Arc::new(MockQueue::default()); - let storage = Arc::new(RecordingStoragePort::default()); - let resolver_impl = Arc::new(MockResolver::default()); - let resolver: Arc = resolver_impl.clone(); - let markdown: Arc = Arc::new(MockMarkdownExporter::new()); - let events = Arc::new(RecordingDocEventLog::default()); - let metrics = Arc::new(MetricsRegistry::default()); - let permission_resolver: Arc = - Arc::new(NonePermissionResolver); - let worker = Arc::new(StorageProjectionWorker::new( - queue, - storage, - resolver, - markdown, - events, - metrics, - permission_resolver, - Arc::new(RecentProjectionCache::new(Duration::from_secs(5))), - )); - let metadata = StorageDeleteJobMetadata { - workspace_id: Uuid::new_v4(), - repo_path: Some("docs/foo.md".into()), - doc_type: "doc".into(), - attachment_paths: None, - permission_snapshot: Vec::new(), - actor_id: None, - }; - let set = worker - .permission_set_from_metadata(&metadata) - .await - .unwrap(); - assert!(set.allows(PERM_DOC_DELETE)); - assert!(set.allows(PERM_FOLDER_DELETE)); - assert!(set.allows(PERM_FILE_DELETE)); - } - - #[derive(Default)] - struct MockQueue { - completed: Mutex>, - failed: Mutex>, - } - - impl MockQueue { - fn completed(&self) -> Vec { - self.completed.lock().unwrap().clone() - } - - fn failed(&self) -> Vec<(i64, String)> { - self.failed.lock().unwrap().clone() - } - } - - #[async_trait] - impl StorageProjectionQueue for MockQueue { - async fn enqueue_doc_job( - &self, - _workspace_id: Uuid, - _doc_id: Uuid, - _kind: StorageProjectionJobKind, - _reason: Option<&str>, - ) -> anyhow::Result<()> { - unimplemented!() - } - - async fn enqueue_doc_job_tx( - &self, - _tx: &mut Transaction<'_, Postgres>, - _workspace_id: Uuid, - _doc_id: Uuid, - _kind: StorageProjectionJobKind, - _reason: Option<&str>, - ) -> anyhow::Result<()> { - unimplemented!() - } - - async fn enqueue_folder_job( - &self, - _workspace_id: Uuid, - _folder_id: Uuid, - _kind: StorageProjectionJobKind, - _reason: Option<&str>, - ) -> anyhow::Result<()> { - unimplemented!() - } - - async fn enqueue_folder_job_tx( - &self, - _tx: &mut Transaction<'_, Postgres>, - _workspace_id: Uuid, - _folder_id: Uuid, - _kind: StorageProjectionJobKind, - _reason: Option<&str>, - ) -> anyhow::Result<()> { - unimplemented!() - } - - async fn fetch_next_job( - &self, - _lock_timeout_secs: i64, - ) -> anyhow::Result> { - Ok(None) - } - - async fn complete_job( - &self, - job_id: i64, - _locked_at: chrono::DateTime, - ) -> anyhow::Result<()> { - self.completed.lock().unwrap().push(job_id); - Ok(()) - } - - async fn fail_job( - &self, - job_id: i64, - _locked_at: chrono::DateTime, - error: &str, - ) -> anyhow::Result<()> { - self.failed - .lock() - .unwrap() - .push((job_id, error.to_string())); - Ok(()) - } - } - - #[derive(Default)] - struct RecordingStoragePort { - calls: Mutex>, - fail_sync: AtomicBool, - } - - impl RecordingStoragePort { - fn calls(&self) -> Vec { - self.calls.lock().unwrap().clone() - } - - fn fail_next_sync(&self) { - self.fail_sync.store(true, Ordering::SeqCst); - } - } - - #[async_trait] - impl StorageProjectionPort for RecordingStoragePort { - async fn move_folder_subtree(&self, folder_id: Uuid) -> anyhow::Result { - let _ = folder_id; - self.calls - .lock() - .unwrap() - .push("move_folder_subtree".to_string()); - Ok(0) - } - - async fn delete_doc_physical(&self, doc_id: Uuid) -> anyhow::Result<()> { - let _ = doc_id; - self.calls - .lock() - .unwrap() - .push("delete_doc_physical".to_string()); - Ok(()) - } - - async fn delete_folder_physical(&self, folder_id: Uuid) -> anyhow::Result { - let _ = folder_id; - self.calls - .lock() - .unwrap() - .push("delete_folder_physical".to_string()); - Ok(0) - } - - async fn sync_doc_paths(&self, _doc_id: Uuid) -> anyhow::Result<()> { - self.calls - .lock() - .unwrap() - .push("sync_doc_paths".to_string()); - if self.fail_sync.swap(false, Ordering::SeqCst) { - anyhow::bail!("sync_failed"); - } - Ok(()) - } - - async fn delete_relative_path(&self, rel: &str) -> anyhow::Result<()> { - self.calls - .lock() - .unwrap() - .push(format!("delete_relative_path:{rel}")); - Ok(()) - } - } - - #[derive(Default)] - struct MockResolver { - writes: Mutex)>>, - } - - impl MockResolver { - fn writes(&self) -> Vec<(Uuid, Vec)> { - self.writes.lock().unwrap().clone() - } - } - - #[async_trait] - impl StorageResolverPort for MockResolver { - async fn build_doc_dir(&self, _doc_id: Uuid) -> anyhow::Result { - Ok(PathBuf::from("mock")) - } - - async fn build_doc_file_path(&self, doc_id: Uuid) -> anyhow::Result { - Ok(PathBuf::from(format!("mock/{doc_id}.md"))) - } - - fn relative_from_uploads(&self, _abs: &Path) -> String { - "mock".into() - } - - fn user_repo_dir(&self, _user_id: Uuid) -> String { - "mock".into() - } - - fn absolute_from_relative(&self, rel: &str) -> PathBuf { - PathBuf::from(rel) - } - - async fn resolve_upload_path( - &self, - _doc_id: Uuid, - _rest_path: &str, - ) -> anyhow::Result { - unimplemented!() - } - - async fn read_bytes(&self, _abs_path: &Path) -> anyhow::Result> { - unimplemented!() - } - - async fn exists(&self, _abs_path: &Path) -> anyhow::Result { - Ok(true) - } - - async fn write_bytes(&self, abs_path: &Path, data: &[u8]) -> anyhow::Result<()> { - let doc_id = abs_path - .file_stem() - .and_then(|s| s.to_str()) - .and_then(|raw| Uuid::parse_str(raw).ok()) - .unwrap_or_else(Uuid::nil); - self.writes.lock().unwrap().push((doc_id, data.to_vec())); - Ok(()) - } - - async fn store_doc_attachment( - &self, - _doc_id: Uuid, - _original_filename: Option<&str>, - _bytes: &[u8], - ) -> anyhow::Result { - unimplemented!() - } - } - - struct MockMarkdownExporter { - bytes: Vec, - } - - impl MockMarkdownExporter { - fn new() -> Self { - Self { - bytes: b"mock markdown".to_vec(), - } - } - } - - #[async_trait] - impl MarkdownExportProvider for MockMarkdownExporter { - async fn export_markdown_for_doc( - &self, - _doc_id: &Uuid, - ) -> anyhow::Result> { - Ok(Some(MarkdownExport { - bytes: self.bytes.clone(), - repo_path: Some("docs/mock.md".into()), - owner_id: Some(Uuid::new_v4()), - workspace_id: Uuid::new_v4(), - content_hash: "hash".into(), - })) - } - } - - #[derive(Default)] - struct RecordingDocEventLog { - events: Mutex)>>, - } - - impl RecordingDocEventLog { - fn events(&self) -> Vec<(Uuid, Uuid, String, Option)> { - self.events.lock().unwrap().clone() - } - } - - #[async_trait] - impl DocEventLog for RecordingDocEventLog { - async fn append( - &self, - workspace_id: Uuid, - doc_id: Uuid, - event_type: &str, - payload: Option, - ) -> anyhow::Result<()> { - self.events.lock().unwrap().push(( - workspace_id, - doc_id, - event_type.to_string(), - payload, - )); - Ok(()) - } - - async fn append_tx( - &self, - _tx: &mut Transaction<'_, Postgres>, - workspace_id: Uuid, - doc_id: Uuid, - event_type: &str, - payload: Option, - ) -> anyhow::Result<()> { - self.events.lock().unwrap().push(( - workspace_id, - doc_id, - event_type.to_string(), - payload, - )); - Ok(()) - } - } -} - -fn normalize_relative_path(path: PathBuf) -> String { - path.to_string_lossy().replace('\\', "/") -} - -fn missing_target(err: &Error) -> bool { - let needle = "document not found"; - err.chain().any(|cause| { - if let Some(sqlx_err) = cause.downcast_ref::() { - matches!(sqlx_err, SqlxError::RowNotFound) - } else if let Some(io_err) = cause.downcast_ref::() { - io_err.kind() == std::io::ErrorKind::NotFound - } else { - cause.to_string().to_lowercase().contains(needle) - } - }) -} - -fn job_type_label(kind: StorageProjectionJobKind) -> &'static str { - match kind { - StorageProjectionJobKind::DocSync => "doc_sync", - StorageProjectionJobKind::FolderSync => "folder_sync", - StorageProjectionJobKind::DeleteDoc => "delete_doc", - StorageProjectionJobKind::DeleteFolder => "delete_folder", - } -} - -fn projection_event_type(kind: StorageProjectionJobKind) -> Option<&'static str> { - match kind { - StorageProjectionJobKind::DocSync => Some("storage.projection.doc_sync"), - StorageProjectionJobKind::DeleteDoc => Some("storage.projection.doc_delete"), - _ => None, - } -} diff --git a/api/src/lib.rs b/api/src/lib.rs deleted file mode 100644 index ee41d7d1..00000000 --- a/api/src/lib.rs +++ /dev/null @@ -1,12 +0,0 @@ -// Module layout (Clean Architecture style) -// - bootstrap: configuration and startup -// - infrastructure: DB/filesystem/crypto/realtime adapters -// - presentation: HTTP/WS handlers and routing -// - application: cross-cutting policies and domain services -// - domain: core models - -pub mod application; -pub mod bootstrap; -pub mod domain; -pub mod infrastructure; -pub mod presentation; diff --git a/api/src/main.rs b/api/src/main.rs deleted file mode 100644 index e5b1e539..00000000 --- a/api/src/main.rs +++ /dev/null @@ -1,1258 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use anyhow::Context; -use axum::extract::DefaultBodyLimit; -use axum::extract::MatchedPath; -use axum::{Router, middleware, routing::get}; -use chrono::Utc; -use dotenvy::dotenv; -use http::HeaderValue; -use tokio::task::JoinHandle; -use tokio::time::{Duration, sleep}; -use tower_http::cors::{AllowOrigin, CorsLayer}; -use tower_http::trace::TraceLayer; -use tracing::{debug, error, info, warn}; - -use api::application::ports::doc_event_log::DocEventLog; -use api::application::ports::git_rebuild_job_queue::GitRebuildJobQueue; -use api::application::ports::plugin_asset_store::PluginAssetStore; -use api::application::ports::plugin_event_publisher::PluginEventPublisher; -use api::application::ports::plugin_event_subscriber::PluginEventSubscriber; -use api::application::ports::plugin_installation_repository::PluginInstallationRepository; -use api::application::ports::plugin_installer::PluginInstaller; -use api::application::ports::plugin_package_fetcher::PluginPackageFetcher; -use api::application::ports::plugin_runtime::PluginRuntime; -use api::application::ports::storage_ingest_queue::StorageIngestQueue; -use api::application::ports::storage_port::{StorageProjectionPort, StorageResolverPort}; -use api::application::ports::storage_projection_queue::StorageProjectionQueue; -use api::application::ports::storage_reconcile_backend::StorageReconcileBackend; -use api::application::ports::storage_reconcile_jobs::StorageReconcileJobs; -use api::application::ports::user_session_repository::UserSessionRepository; -use api::application::services::api_tokens::ApiTokenService; -use api::application::services::auth::account::AccountService; -use api::application::services::auth::external::{ExternalAuthRegistry, ExternalAuthVerifier}; -use api::application::services::auth::service::AuthService; -use api::application::services::auth::token_validation::TokenValidationService; -use api::application::services::auth::user_sessions::UserSessionService; -use api::application::services::authorization::AuthorizationService; -use api::application::services::doc_events::{ - DocEventSubscriber, FanoutDocEventSubscriber, LoggingDocEventSubscriber, -}; -use api::application::services::documents::DocumentService; -use api::application::services::files::FileService; -use api::application::services::git::GitService; -use api::application::services::git_rebuild::GitRebuildService; -use api::application::services::git_rebuild_scheduler::GitRebuildScheduler; -use api::application::services::health::HealthService; -use api::application::services::markdown_render::MarkdownRenderService; -use api::application::services::metrics::MetricsRegistry; -use api::application::services::plugins::asset_signer::AssetSigner; -use api::application::services::plugins::data::PluginDataService; -use api::application::services::plugins::execution::PluginExecutionService; -use api::application::services::plugins::management::PluginManagementService; -use api::application::services::plugins::permissions::PluginPermissionService; -use api::application::services::public::PublicService; -use api::application::services::realtime::snapshot::{MarkdownExportProvider, SnapshotService}; -use api::application::services::shares::ShareService; -use api::application::services::storage_ingest::StorageIngestService; -use api::application::services::storage_reconcile::StorageReconcileService; -use api::application::services::storage_reconcile_scheduler::StorageReconcileScheduler; -use api::application::services::tags::TagService; -use api::application::services::user_shortcuts::UserShortcutService; -use api::application::services::workspaces::{WorkspacePermissionResolver, WorkspaceService}; -use api::bootstrap::config::{Config, StorageBackend}; -use api::infrastructure::auth::github::GithubOAuthProvider; -use api::infrastructure::auth::google::GoogleIdentityProvider; -use api::infrastructure::auth::oidc::OidcIdentityProvider; -use api::infrastructure::db::advisory_lock::AdvisoryLock; -use api::infrastructure::documents::doc_event_log::PgDocEventLog; -use api::infrastructure::documents::event_poller::DocEventPoller; -use api::infrastructure::documents::exporter::DefaultDocumentExporter; -use api::infrastructure::documents::git_dirty_subscriber::GitDirtyDocEventSubscriber; -use api::infrastructure::git::PgGitRebuildJobQueue; -use api::infrastructure::plugins::filesystem_store::PluginExecutionLimits; -use api::infrastructure::storage::{ - FsIngestWatcher, FsReconcileBackend, PgStorageIngestQueue, PgStorageProjectionQueue, - PgStorageReconcileJobs, S3ReconcileBackend, StorageConsistencyMonitor, StorageIngestWorker, - StorageProjectionWorker, -}; -use api::presentation::context::{AppContext, AppServices, PresentationConfig}; -use utoipa::OpenApi; -use utoipa_swagger_ui::SwaggerUi; - -const SESSION_CLEANUP_INTERVAL_SECS: u64 = 15 * 60; -const SESSION_CLEANUP_BATCH_SIZE: i64 = 500; - -#[derive(OpenApi)] -#[openapi( - paths( - api::presentation::http::auth::register, - api::presentation::http::auth::login, - api::presentation::http::auth::oauth_state, - api::presentation::http::auth::oauth_login, - api::presentation::http::auth::list_oauth_providers, - api::presentation::http::auth::refresh_session, - api::presentation::http::auth::logout, - api::presentation::http::auth::me, - api::presentation::http::auth::list_sessions, - api::presentation::http::auth::revoke_session, - api::presentation::http::api_tokens::list_api_tokens, - api::presentation::http::api_tokens::create_api_token, - api::presentation::http::api_tokens::revoke_api_token, - api::presentation::http::shortcuts::get_user_shortcuts, - api::presentation::http::shortcuts::update_user_shortcuts, - api::presentation::http::tags::list_tags, - api::presentation::ws::axum_ws_entry, - api::presentation::http::documents::list_documents, - api::presentation::http::documents::create_document, - api::presentation::http::documents::get_document, - api::presentation::http::documents::update_document, - api::presentation::http::documents::duplicate_document, - api::presentation::http::documents::delete_document, - api::presentation::http::documents::get_document_content, - api::presentation::http::documents::download_document, - api::presentation::http::documents::list_document_snapshots, - api::presentation::http::documents::get_document_snapshot_diff, - api::presentation::http::documents::restore_document_snapshot, - api::presentation::http::documents::download_document_snapshot, - api::presentation::http::documents::search_documents, - api::presentation::http::documents::get_backlinks, - api::presentation::http::documents::get_outgoing_links, - api::presentation::http::files::upload_file, - api::presentation::http::files::get_file, - api::presentation::http::files::get_file_by_name, - api::presentation::http::shares::create_share, - api::presentation::http::shares::delete_share, - api::presentation::http::shares::list_document_shares, - api::presentation::http::shares::validate_share_token, - api::presentation::http::shares::browse_share, - api::presentation::http::shares::list_active_shares, - api::presentation::http::shares::create_share_mount, - api::presentation::http::shares::list_share_mounts, - api::presentation::http::shares::delete_share_mount, - api::presentation::http::shares::list_applicable_shares, - api::presentation::http::shares::materialize_folder_share, - api::presentation::http::public::publish_document, - api::presentation::http::public::unpublish_document, - api::presentation::http::public::get_publish_status, - api::presentation::http::public::list_workspace_public_documents, - api::presentation::http::public::get_public_by_workspace_and_id, - api::presentation::http::public::get_public_content_by_workspace_and_id, - api::presentation::http::git::get_config, - api::presentation::http::git::create_or_update_config, - api::presentation::http::git::delete_config, - api::presentation::http::git::get_status, - api::presentation::http::git::get_changes, - api::presentation::http::git::get_history, - api::presentation::http::git::get_working_diff, - api::presentation::http::git::get_commit_diff, - api::presentation::http::git::sync_now, - api::presentation::http::git::import_repository, - api::presentation::http::git::start_pull_session, - api::presentation::http::git::get_pull_session, - api::presentation::http::git::resolve_pull_session, - api::presentation::http::git::finalize_pull_session, - api::presentation::http::git::init_repository, - api::presentation::http::git::deinit_repository, - api::presentation::http::git::ignore_document, - api::presentation::http::git::ignore_folder, - api::presentation::http::git::get_gitignore_patterns, - api::presentation::http::git::add_gitignore_patterns, - api::presentation::http::git::check_path_ignored, - api::presentation::http::storage_ingest::enqueue_ingest_events, - api::presentation::http::markdown::render_markdown, - api::presentation::http::markdown::render_markdown_many, - api::presentation::http::workspaces::list_workspaces, - api::presentation::http::workspaces::create_workspace, - api::presentation::http::workspaces::switch_workspace, - api::presentation::http::workspaces::list_members, - api::presentation::http::workspaces::update_member_role, - api::presentation::http::workspaces::get_workspace_permissions, - api::presentation::http::workspaces::list_roles, - api::presentation::http::workspaces::create_role, - api::presentation::http::workspaces::update_role, - api::presentation::http::workspaces::delete_role, - api::presentation::http::workspaces::list_invitations, - api::presentation::http::workspaces::create_invitation, - api::presentation::http::workspaces::accept_invitation, - api::presentation::http::workspaces::download_workspace_archive, - api::presentation::http::plugins::get_manifest, - api::presentation::http::plugins::exec_action, - api::presentation::http::plugins::list_records, - api::presentation::http::plugins::create_record, - api::presentation::http::plugins::update_record, - api::presentation::http::plugins::delete_record, - api::presentation::http::plugins::get_kv_value, - api::presentation::http::plugins::put_kv_value, - api::presentation::http::plugins::install_from_url, - api::presentation::http::plugins::uninstall, - api::presentation::http::plugins::sse_updates, - api::presentation::http::health::health, - ), - components(schemas( - api::presentation::http::auth::RegisterRequest, - api::presentation::http::auth::LoginRequest, - api::presentation::http::auth::LoginResponse, - api::presentation::http::auth::OAuthLoginRequest, - api::presentation::http::auth::OAuthStateResponse, - api::presentation::http::auth::UserResponse, - api::presentation::http::auth::WorkspaceMembershipResponse, - api::presentation::http::api_tokens::ApiTokenItem, - api::presentation::http::api_tokens::ApiTokenCreateRequest, - api::presentation::http::api_tokens::ApiTokenCreateResponse, - api::presentation::http::tags::TagItem, - api::presentation::http::documents::Document, - api::presentation::http::documents::DocumentListResponse, - api::presentation::http::documents::CreateDocumentRequest, - api::presentation::http::documents::UpdateDocumentRequest, - api::presentation::http::documents::DuplicateDocumentRequest, - api::presentation::http::documents::BacklinkInfo, - api::presentation::http::documents::BacklinksResponse, - api::presentation::http::documents::OutgoingLink, - api::presentation::http::documents::OutgoingLinksResponse, - api::presentation::http::documents::SearchResult, - api::presentation::http::files::UploadFileResponse, - api::presentation::http::files::UploadFileMultipart, - api::presentation::http::shares::CreateShareRequest, - api::presentation::http::shares::CreateShareResponse, - api::presentation::http::shares::CreateShareMountRequest, - api::presentation::http::shares::ShareItem, - api::presentation::http::shares::ShareDocumentResponse, - api::presentation::http::shares::ShareBrowseTreeItem, - api::presentation::http::shares::ShareBrowseResponse, - api::presentation::http::shares::ApplicableShareItem, - api::presentation::http::shares::ActiveShareItem, - api::presentation::http::shares::ShareMountItem, - api::presentation::http::shares::MaterializeResponse, - api::presentation::http::public::PublishResponse, - api::presentation::http::public::PublicDocumentSummary, - api::presentation::http::git::GitConfigResponse, - api::presentation::http::git::CreateGitConfigRequest, - api::presentation::http::git::UpdateGitConfigRequest, - api::presentation::http::git::GitStatus, - api::presentation::http::git::GitSyncRequest, - api::presentation::http::git::GitSyncResponse, - api::presentation::http::git::GitChangeItem, - api::presentation::http::git::GitChangesResponse, - api::presentation::http::git::GitCommitItem, - api::presentation::http::git::GitHistoryResponse, - api::presentation::http::git::AddPatternsRequest, - api::presentation::http::git::CheckIgnoredRequest, - api::application::dto::diff::TextDiffLineType, - api::application::dto::diff::TextDiffLine, - api::application::dto::diff::TextDiffResult, - api::presentation::http::markdown::RenderOptionsPayload, - api::presentation::http::markdown::PlaceholderItemPayload, - api::presentation::http::workspaces::WorkspaceResponse, - api::presentation::http::workspaces::CreateWorkspaceRequest, - api::presentation::http::workspaces::WorkspaceMemberResponse, - api::presentation::http::workspaces::UpdateMemberRoleRequest, - api::presentation::http::workspaces::WorkspaceRoleResponse, - api::presentation::http::workspaces::PermissionOverridePayload, - api::presentation::http::workspaces::CreateWorkspaceRoleRequest, - api::presentation::http::workspaces::UpdateWorkspaceRoleRequest, - api::presentation::http::workspaces::SwitchWorkspaceResponse, - api::presentation::http::workspaces::WorkspacePermissionsResponse, - api::presentation::http::workspaces::WorkspaceInvitationResponse, - api::presentation::http::workspaces::CreateWorkspaceInvitationRequest, - api::presentation::http::workspaces::CreateWorkspaceRequest, - api::presentation::http::markdown::RenderResponseBody, - api::presentation::http::markdown::RenderRequest, - api::presentation::http::markdown::RenderManyRequest, - api::presentation::http::markdown::RenderManyResponse, - api::presentation::http::plugins::ManifestItem, - api::presentation::http::plugins::RecordsResponse, - api::presentation::http::plugins::CreateRecordBody, - api::presentation::http::plugins::UpdateRecordBody, - api::presentation::http::plugins::KvValueResponse, - api::presentation::http::plugins::KvValueBody, - api::presentation::http::plugins::ExecBody, - api::presentation::http::plugins::ExecResultResponse, - api::presentation::http::plugins::InstallFromUrlBody, - api::presentation::http::plugins::InstallResponse, - api::presentation::http::plugins::UninstallBody, - api::presentation::http::health::HealthResp, - api::presentation::http::shortcuts::UserShortcutResponse, - api::presentation::http::shortcuts::UpdateUserShortcutRequest, - )), - tags( - (name = "Auth", description = "Authentication"), - (name = "Documents", description = "Documents management"), - (name = "Files", description = "File management"), - (name = "Sharing", description = "Document sharing"), - (name = "Public Documents", description = "Public pages"), - (name = "Git", description = "Git integration"), - (name = "Markdown", description = "Markdown rendering"), - (name = "Plugins", description = "Plugins management & data APIs"), - (name = "Health", description = "System health checks"), - ) - )] -struct ApiDoc; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - dotenv().ok(); - - tracing_subscriber::fmt() - .with_env_filter( - std::env::var("RUST_LOG") - .unwrap_or_else(|_| "api=debug,warp=info,axum=info,tower_http=info".into()), - ) - .init(); - - let cfg = Config::from_env()?; - info!(?cfg, "Starting RefMD backend"); - - // Database - let pool = api::infrastructure::db::connect_pool(&cfg.database_url).await?; - api::infrastructure::db::migrate(&pool).await?; - - let asset_signer = Arc::new(AssetSigner::new(&cfg.plugin_asset_sign_key)); - - let uploads_root = std::path::PathBuf::from(&cfg.storage_root); - let (storage_resolver, storage_projection, reconcile_backend, reconcile_ingest_known_paths): ( - Arc, - Arc, - Arc, - bool, - ) = match cfg.storage_backend { - StorageBackend::Filesystem => { - let port = Arc::new(api::infrastructure::storage::port_impl::FsStoragePort { - pool: pool.clone(), - uploads_root: uploads_root.clone(), - }); - let backend = FsReconcileBackend::new(uploads_root.clone()); - (port.clone(), port, backend, false) - } - StorageBackend::S3 => { - let s3_settings = api::infrastructure::storage::s3::S3StorageConfig { - uploads_root: uploads_root.clone(), - bucket: cfg - .s3_bucket - .clone() - .context("S3_BUCKET must be configured when using S3 storage backend")?, - region: cfg.s3_region.clone(), - endpoint: cfg.s3_endpoint.clone(), - access_key: cfg.s3_access_key.clone(), - secret_key: cfg.s3_secret_key.clone(), - use_path_style: cfg.s3_use_path_style, - }; - let port = Arc::new( - api::infrastructure::storage::s3::S3StoragePort::new(pool.clone(), &s3_settings) - .await?, - ); - let backend = S3ReconcileBackend::new(&s3_settings).await?; - (port.clone(), port, backend, true) - } - }; - - let storage_job_queue: Arc = - Arc::new(PgStorageProjectionQueue::new(pool.clone())); - let storage_ingest_queue: Arc = - Arc::new(PgStorageIngestQueue::new(pool.clone())); - - if cfg.storage_monitor_enabled { - let monitor = Arc::new(StorageConsistencyMonitor::new( - pool.clone(), - storage_resolver.clone(), - storage_job_queue.clone(), - storage_ingest_queue.clone(), - Duration::from_secs(cfg.storage_monitor_interval_secs), - cfg.storage_monitor_batch_size, - )); - tracing::info!( - interval_secs = cfg.storage_monitor_interval_secs, - batch_size = cfg.storage_monitor_batch_size, - "storage_consistency_monitor_enabled" - ); - tokio::spawn(monitor.run()); - } else { - tracing::info!("storage_consistency_monitor_disabled"); - } - - let snapshot_archive_repo: Arc< - dyn api::application::ports::document_snapshot_archive_repository::DocumentSnapshotArchiveRepository, - > = Arc::new( - api::infrastructure::db::repositories::document_snapshot_archive_repository_sqlx::SqlxDocumentSnapshotArchiveRepository::new( - pool.clone(), - ), - ); - let document_repo = Arc::new( - api::infrastructure::db::repositories::document_repository_sqlx::SqlxDocumentRepository::new( - pool.clone(), - ), - ); - let doc_event_log: Arc = Arc::new(PgDocEventLog::new(pool.clone())); - let metrics = Arc::new(MetricsRegistry::default()); - let storage_reconcile_jobs: Arc = - Arc::new(PgStorageReconcileJobs::new(pool.clone())); - let git_rebuild_jobs: Arc = - Arc::new(PgGitRebuildJobQueue::new(pool.clone())); - let logging_subscriber: Arc = LoggingDocEventSubscriber::new(); - let git_dirty_subscriber: Arc = - GitDirtyDocEventSubscriber::new(pool.clone()); - let doc_event_subscriber: Arc = - FanoutDocEventSubscriber::new(vec![logging_subscriber.clone(), git_dirty_subscriber]); - if matches!(cfg.storage_backend, StorageBackend::Filesystem) { - let watcher = Arc::new(FsIngestWatcher::new( - uploads_root.clone(), - storage_ingest_queue.clone(), - "fs_watcher", - )); - tokio::spawn(async move { - watcher.run().await; - }); - } - { - let poller = Arc::new(DocEventPoller::new( - pool.clone(), - doc_event_subscriber.clone(), - Duration::from_millis(500), - 200, - "doc_event_poller", - )); - tokio::spawn(async move { - poller.run().await; - }); - } - let shares_repo_impl = Arc::new( - api::infrastructure::db::repositories::shares_repository_sqlx::SqlxSharesRepository::new( - pool.clone(), - ), - ); - let share_service = Arc::new(ShareService::new(shares_repo_impl.clone())); - let access_repo = Arc::new( - api::infrastructure::db::repositories::access_repository_sqlx::SqlxAccessRepository::new( - pool.clone(), - ), - ); - let authorization_service = Arc::new(AuthorizationService::new( - access_repo.clone(), - shares_repo_impl.clone(), - )); - let files_repo = Arc::new( - api::infrastructure::db::repositories::files_repository_sqlx::SqlxFilesRepository::new( - pool.clone(), - ), - ); - let public_repo = Arc::new( - api::infrastructure::db::repositories::public_repository_sqlx::SqlxPublicRepository::new( - pool.clone(), - ), - ); - let user_repo = Arc::new( - api::infrastructure::db::repositories::user_repository_sqlx::SqlxUserRepository::new( - pool.clone(), - ), - ); - let workspace_repo = Arc::new( - api::infrastructure::db::repositories::workspace_repository_sqlx::SqlxWorkspaceRepository::new( - pool.clone(), - ), - ); - let workspace_service = Arc::new(WorkspaceService::new(workspace_repo.clone())); - let workspace_permissions: Arc = workspace_service.clone(); - { - let reconcile_service = Arc::new(StorageReconcileService::new( - storage_reconcile_jobs.clone(), - document_repo.clone(), - files_repo.clone(), - storage_ingest_queue.clone(), - storage_job_queue.clone(), - reconcile_backend.clone(), - reconcile_ingest_known_paths, - )); - tokio::spawn({ - let svc = reconcile_service.clone(); - async move { - svc.run().await; - } - }); - let scheduler = StorageReconcileScheduler::new( - storage_reconcile_jobs.clone(), - workspace_repo.clone(), - Duration::from_secs(60 * 60), - ); - tokio::spawn(async move { - scheduler.run().await; - }); - } - let tag_repo = Arc::new( - api::infrastructure::db::repositories::tag_repository_sqlx::SqlxTagRepository::new( - pool.clone(), - ), - ); - let tag_service = Arc::new(TagService::new(tag_repo.clone())); - let api_token_repo = Arc::new( - api::infrastructure::db::repositories::api_token_repository_sqlx::SqlxApiTokenRepository::new( - pool.clone(), - ), - ); - let api_token_service = Arc::new(ApiTokenService::new(api_token_repo.clone())); - let token_validation_service = Arc::new(TokenValidationService::new(api_token_repo.clone())); - let cookie_secure = cfg - .frontend_url - .as_deref() - .map(|u| u.starts_with("https://")) - .unwrap_or(false); - let auth_service = Arc::new(AuthService::new( - cfg.jwt_secret_pem.clone(), - token_validation_service.clone(), - cfg.jwt_expires_secs as usize, - )); - let user_session_repo = Arc::new( - api::infrastructure::db::repositories::user_session_repository_sqlx::SqlxUserSessionRepository::new( - pool.clone(), - ), - ); - let session_service = Arc::new(UserSessionService::new( - user_session_repo.clone(), - auth_service.clone(), - cfg.session_refresh_ttl_secs, - cfg.session_refresh_remember_ttl_secs, - )); - - { - let repo = user_session_repo.clone(); - tokio::spawn(async move { - let mut ticker = - tokio::time::interval(Duration::from_secs(SESSION_CLEANUP_INTERVAL_SECS)); - loop { - ticker.tick().await; - let cutoff = Utc::now(); - let mut total_removed: u64 = 0; - loop { - match repo - .delete_expired(cutoff, SESSION_CLEANUP_BATCH_SIZE) - .await - { - Ok(removed) => { - if removed == 0 { - break; - } - total_removed += removed; - if removed < SESSION_CLEANUP_BATCH_SIZE as u64 { - break; - } - } - Err(err) => { - warn!(error = ?err, "user_session_cleanup_failed"); - break; - } - } - } - if total_removed > 0 { - debug!(removed = total_removed, "user_session_cleanup_deleted"); - } - } - }); - } - let user_shortcuts = Arc::new( - api::infrastructure::db::repositories::user_shortcut_repository_sqlx::SqlxUserShortcutRepository::new( - pool.clone(), - ), - ); - let user_shortcut_service = - Arc::new(UserShortcutService::new(user_shortcuts.clone(), 32 * 1024)); - let git_repo = Arc::new( - api::infrastructure::db::repositories::git_repository_sqlx::SqlxGitRepository::new( - pool.clone(), - cfg.encryption_key.clone(), - ), - ); - let git_pull_sessions = Arc::new( - api::infrastructure::db::repositories::git_pull_session_repository_sqlx::GitPullSessionRepositorySqlx::new( - pool.clone(), - ), - ); - let auto_archive_interval = Duration::from_secs(cfg.snapshot_archive_interval_secs); - let mut local_hub: Option = None; - let (realtime_engine, snapshot_service_arc): ( - Arc, - Arc, - ) = if cfg.cluster_mode { - tracing::info!("cluster_mode_enabled"); - let redis_settings = api::infrastructure::realtime::RedisRealtimeConfig { - redis_url: cfg - .redis_url - .clone() - .context("REDIS_URL must be set when CLUSTER_MODE=1")?, - stream_prefix: cfg.redis_stream_prefix.clone(), - stream_max_len: cfg.redis_stream_max_len, - task_debounce_ms: cfg.redis_task_debounce_ms, - min_message_lifetime_ms: cfg.redis_min_message_lifetime_ms, - awareness_ttl_ms: cfg.redis_awareness_ttl_ms, - snapshot_archive_interval_secs: cfg.snapshot_archive_interval_secs, - spawn_persistence_worker: true, - }; - let engine = Arc::new( - api::infrastructure::realtime::RedisRealtimeEngine::from_config( - redis_settings, - pool.clone(), - storage_resolver.clone(), - storage_job_queue.clone(), - )?, - ); - let snapshot_service = engine.snapshot_service(); - let engine_trait: Arc = - engine.clone(); - (engine_trait, snapshot_service) - } else { - tracing::info!("cluster_mode_disabled_using_local_hub"); - let doc_state_reader: Arc< - dyn api::application::ports::realtime_hydration_port::DocStateReader, - > = Arc::new(api::infrastructure::realtime::SqlxDocStateReader::new( - pool.clone(), - )); - let backlog_reader: Arc< - dyn api::application::ports::realtime_hydration_port::RealtimeBacklogReader, - > = Arc::new(api::infrastructure::realtime::NoopBacklogReader::default()); - let doc_persistence: Arc< - dyn api::application::ports::realtime_persistence_port::DocPersistencePort, - > = Arc::new(api::infrastructure::realtime::SqlxDocPersistenceAdapter::new(pool.clone())); - let linkgraph_repo: Arc = - Arc::new( - api::infrastructure::db::repositories::linkgraph_repository_sqlx::SqlxLinkGraphRepository::new( - pool.clone(), - ), - ); - let tagging_repo: Arc = - Arc::new( - api::infrastructure::db::repositories::tagging_repository_sqlx::SqlxTaggingRepository::new( - pool.clone(), - ), - ); - let hydration_service = Arc::new( - api::application::services::realtime::doc_hydration::DocHydrationService::new( - doc_state_reader.clone(), - backlog_reader, - storage_resolver.clone(), - ), - ); - let snapshot_service = Arc::new( - api::application::services::realtime::snapshot::SnapshotService::new( - doc_state_reader.clone(), - doc_persistence.clone(), - linkgraph_repo, - tagging_repo, - snapshot_archive_repo.clone(), - storage_job_queue.clone(), - ), - ); - let hub = api::infrastructure::realtime::Hub::new( - hydration_service, - snapshot_service.clone(), - doc_persistence, - auto_archive_interval, - ); - let engine = - Arc::new(api::infrastructure::realtime::LocalRealtimeEngine { hub: hub.clone() }); - let engine_trait: Arc = - engine.clone(); - local_hub = Some(hub); - (engine_trait, snapshot_service) - }; - - let recent_projection_cache = Arc::new( - api::application::services::storage_projection_cache::RecentProjectionCache::new( - Duration::from_secs(5), - ), - ); - - { - let markdown_exporter: Arc = snapshot_service_arc.clone(); - let worker = Arc::new(StorageProjectionWorker::new( - storage_job_queue.clone(), - storage_projection.clone(), - storage_resolver.clone(), - markdown_exporter, - doc_event_log.clone(), - metrics.clone(), - workspace_permissions.clone(), - recent_projection_cache.clone(), - )); - tokio::spawn(async move { - worker.run().await; - }); - } - - let git_storage_config = match cfg.storage_backend { - StorageBackend::Filesystem => { - api::infrastructure::git::storage::GitStorageDriverConfig::Filesystem { - root: uploads_root.clone(), - } - } - StorageBackend::S3 => { - let s3_settings = api::infrastructure::git::storage::S3GitStorageConfig { - storage_root_prefix: cfg.storage_root.clone(), - bucket: cfg - .s3_bucket - .clone() - .context("S3_BUCKET must be configured when using S3 storage backend")?, - region: cfg.s3_region.clone(), - endpoint: cfg.s3_endpoint.clone(), - access_key: cfg.s3_access_key.clone(), - secret_key: cfg.s3_secret_key.clone(), - use_path_style: cfg.s3_use_path_style, - }; - api::infrastructure::git::storage::GitStorageDriverConfig::S3(s3_settings) - } - }; - let git_storage = - api::infrastructure::git::storage::build_git_storage(git_storage_config).await?; - let gitignore_port = Arc::new(api::infrastructure::storage::gitignore::FsGitignorePort); - let git_workspace = Arc::new( - api::infrastructure::git::workspace::GitWorkspaceService::new( - pool.clone(), - git_storage.clone(), - storage_resolver.clone(), - snapshot_service_arc.clone(), - realtime_engine.clone(), - document_repo.clone(), - )?, - ); - let git_service = Arc::new(GitService::new( - git_repo.clone(), - storage_resolver.clone(), - files_repo.clone(), - document_repo.clone(), - gitignore_port.clone(), - git_workspace.clone(), - git_pull_sessions.clone(), - )); - if cfg.git_rebuild_enabled { - let rebuild_service = Arc::new(GitRebuildService::new( - git_rebuild_jobs.clone(), - git_workspace.clone(), - git_repo.clone(), - metrics.clone(), - workspace_permissions.clone(), - )); - tokio::spawn({ - let svc = rebuild_service.clone(); - async move { - svc.run().await; - } - }); - let rebuild_scheduler = GitRebuildScheduler::new( - git_rebuild_jobs.clone(), - git_repo.clone(), - git_workspace.clone(), - Duration::from_secs(cfg.git_rebuild_interval_secs), - ); - tokio::spawn(async move { - rebuild_scheduler.run().await; - }); - } else { - tracing::info!("git_rebuild_scheduler_disabled"); - } - let plugin_repo = Arc::new( - api::infrastructure::db::repositories::plugin_repository_sqlx::SqlxPluginRepository::new( - pool.clone(), - ), - ); - let plugin_data_service = Arc::new(PluginDataService::new(plugin_repo.clone())); - let plugin_installations = Arc::new( - api::infrastructure::db::repositories::plugin_installation_repository_sqlx::SqlxPluginInstallationRepository::new( - pool.clone(), - ), - ); - let plugin_limits = { - let timeout = if cfg.plugin_timeout_secs == 0 { - None - } else { - Some(std::time::Duration::from_secs(cfg.plugin_timeout_secs)) - }; - let memory_pages_raw = cfg.plugin_memory_max_mb.saturating_mul(16); - let memory_max_pages = if memory_pages_raw == 0 { - None - } else { - Some(memory_pages_raw.min(u32::MAX as u64) as u32) - }; - let fuel_limit = cfg - .plugin_fuel_limit - .and_then(|limit| if limit == 0 { None } else { Some(limit) }); - PluginExecutionLimits::new(timeout, memory_max_pages, fuel_limit) - }; - let mut s3_plugin_store: Option< - Arc, - > = None; - let (plugin_runtime, plugin_installer, plugin_assets): ( - Arc, - Arc, - Arc, - ) = match cfg.storage_backend { - StorageBackend::Filesystem => { - let store = Arc::new( - api::infrastructure::plugins::filesystem_store::FilesystemPluginStore::new( - &cfg.plugin_dir, - plugin_limits, - )?, - ); - let runtime: Arc = store.clone(); - let installer: Arc = store.clone(); - let assets: Arc = store.clone(); - (runtime, installer, assets) - } - StorageBackend::S3 => { - let s3_store_cfg = api::infrastructure::plugins::s3_store::S3PluginStoreConfig { - plugin_dir: cfg.plugin_dir.clone(), - bucket: cfg - .s3_bucket - .clone() - .context("S3_BUCKET must be configured when using S3 storage backend")?, - region: cfg.s3_region.clone(), - endpoint: cfg.s3_endpoint.clone(), - access_key: cfg.s3_access_key.clone(), - secret_key: cfg.s3_secret_key.clone(), - use_path_style: cfg.s3_use_path_style, - }; - let store = Arc::new( - api::infrastructure::plugins::s3_store::S3BackedPluginStore::new( - &s3_store_cfg, - plugin_limits, - ) - .await?, - ); - s3_plugin_store = Some(store.clone()); - let runtime: Arc = store.clone(); - let installer: Arc = store.clone(); - let assets: Arc = store.clone(); - (runtime, installer, assets) - } - }; - let plugin_permission_service = Arc::new(PluginPermissionService::new(plugin_runtime.clone())); - let plugin_fetcher: Arc = Arc::new( - api::infrastructure::plugins::package_fetcher_reqwest::ReqwestPluginPackageFetcher::new(), - ); - let plugin_execution_service = Arc::new(PluginExecutionService::new( - plugin_repo.clone(), - document_repo.clone(), - plugin_runtime.clone(), - authorization_service.clone(), - )); - let account_service = Arc::new(AccountService::new( - user_repo.clone(), - document_repo.clone(), - files_repo.clone(), - plugin_installations.clone(), - plugin_repo.clone(), - plugin_assets.clone(), - git_repo.clone(), - git_workspace.clone(), - storage_job_queue.clone(), - workspace_service.clone(), - )); - let plugin_event_bus = Arc::new( - api::infrastructure::plugins::event_bus_pg::PgPluginEventBus::new( - pool.clone(), - "plugin_events", - ), - ); - if let Some(store) = &s3_plugin_store { - store.spawn_event_listener(plugin_event_bus.clone()); - - let installations = plugin_installations.clone(); - let assets = store.clone(); - tokio::spawn(async move { - match installations.list_all().await { - Ok(installs) => { - for inst in installs.into_iter().filter(|i| i.status == "enabled") { - if let Err(err) = assets - .load_user_manifest(&inst.workspace_id, &inst.plugin_id, &inst.version) - .await - { - tracing::warn!( - error = ?err, - workspace_id = %inst.workspace_id, - plugin = inst.plugin_id.as_str(), - version = inst.version.as_str(), - "prefetch_user_plugin_failed" - ); - } - } - } - Err(err) => { - tracing::warn!(error = ?err, "list_all_plugin_installations_failed"); - } - } - }); - } - let plugin_event_publisher: Arc = plugin_event_bus.clone(); - let plugin_event_subscriber: Arc = plugin_event_bus.clone(); - - let document_exporter = Arc::new(DefaultDocumentExporter::new()); - - let document_service = Arc::new(DocumentService::new( - pool.clone(), - document_repo.clone(), - files_repo.clone(), - access_repo.clone(), - shares_repo_impl.clone(), - storage_resolver.clone(), - doc_event_log.clone(), - storage_job_queue.clone(), - realtime_engine.clone(), - snapshot_service_arc.clone(), - document_exporter.clone(), - )); - - { - let handler = Arc::new(StorageIngestService::new( - document_repo.clone(), - files_repo.clone(), - realtime_engine.clone(), - storage_resolver.clone(), - storage_projection.clone(), - doc_event_log.clone(), - document_service.clone(), - workspace_permissions.clone(), - recent_projection_cache.clone(), - )); - let worker = Arc::new(StorageIngestWorker::new( - storage_ingest_queue.clone(), - handler, - metrics.clone(), - )); - tokio::spawn(async move { - worker.run().await; - }); - } - let file_service = Arc::new(FileService::new( - files_repo.clone(), - storage_resolver.clone(), - access_repo.clone(), - shares_repo_impl.clone(), - doc_event_log.clone(), - )); - let public_service = Arc::new(PublicService::new( - public_repo.clone(), - realtime_engine.clone(), - )); - let plugin_management_service = Arc::new(PluginManagementService::new( - plugin_installations.clone(), - plugin_assets.clone(), - plugin_event_publisher.clone(), - asset_signer.clone(), - cfg.plugin_asset_url_ttl_secs, - plugin_fetcher.clone(), - plugin_installer.clone(), - )); - let markdown_render_service = Arc::new(MarkdownRenderService::new( - plugin_assets.clone(), - plugin_installations.clone(), - plugin_runtime.clone(), - asset_signer.clone(), - cfg.plugin_asset_url_ttl_secs, - )); - - let health_probe = - api::infrastructure::health::db_probe::DatabaseHealthProbe::new(pool.clone()); - let health_service = Arc::new(HealthService::new(health_probe)); - - let mut external_auth_providers: Vec> = Vec::new(); - if let Some(google_cfg) = cfg.google_oauth.clone() { - match GoogleIdentityProvider::new(google_cfg.client_ids.clone()) { - Ok(provider) => { - tracing::info!("google_oauth_provider_enabled"); - external_auth_providers.push(Arc::new(provider)); - } - Err(err) => { - tracing::warn!(error = ?err, "google_oauth_provider_init_failed"); - } - } - } - if let Some(github_cfg) = cfg.github_oauth.clone() { - match GithubOAuthProvider::new( - github_cfg.client_id.clone(), - github_cfg.client_secret.clone(), - github_cfg.redirect_uri.clone(), - ) { - Ok(provider) => { - tracing::info!("github_oauth_provider_enabled"); - external_auth_providers.push(Arc::new(provider)); - } - Err(err) => { - tracing::warn!(error = ?err, "github_oauth_provider_init_failed"); - } - } - } - if let Some(oidc_cfg) = cfg.oidc_oauth.clone() { - match OidcIdentityProvider::discover(oidc_cfg).await { - Ok(provider) => { - tracing::info!("oidc_oauth_provider_enabled"); - external_auth_providers.push(Arc::new(provider)); - } - Err(err) => { - tracing::warn!(error = ?err, "oidc_oauth_provider_init_failed"); - } - } - } - let external_auth_registry = Arc::new(ExternalAuthRegistry::new(external_auth_providers)); - - let services = AppServices::new( - authorization_service, - document_service.clone(), - share_service.clone(), - file_service.clone(), - public_service.clone(), - tag_service.clone(), - api_token_service.clone(), - user_shortcut_service.clone(), - git_service.clone(), - markdown_render_service.clone(), - workspace_service.clone(), - plugin_execution_service.clone(), - plugin_management_service.clone(), - plugin_permission_service.clone(), - plugin_data_service.clone(), - plugin_event_subscriber, - health_service.clone(), - account_service.clone(), - auth_service.clone(), - session_service.clone(), - realtime_engine.clone(), - storage_ingest_queue.clone(), - external_auth_registry.clone(), - ); - - let presentation_cfg = PresentationConfig { - frontend_url: cfg.frontend_url.clone(), - upload_max_bytes: cfg.upload_max_bytes, - public_base_url: cfg.public_base_url.clone(), - session_cookie_secure: cookie_secure, - }; - let ctx = AppContext::new(presentation_cfg, services, metrics.clone()); - - let frontend_origin = if let Some(origin) = cfg.frontend_url.clone() { - Some(HeaderValue::from_str(&origin).map_err(|_| { - anyhow::anyhow!("FRONTEND_URL must be a valid origin (e.g., https://app.example.com)") - })?) - } else { - None - }; - - // Build CORS - let cors_allow_headers = [ - http::header::CONTENT_TYPE, - http::header::AUTHORIZATION, - http::header::HeaderName::from_static("x-workspace-id"), - ]; - let cors_expose_headers = [http::header::WWW_AUTHENTICATE]; - let cors = if let Some(origin) = frontend_origin.clone() { - CorsLayer::new() - .allow_origin(origin) - .allow_methods([ - http::Method::GET, - http::Method::POST, - http::Method::PUT, - http::Method::DELETE, - http::Method::PATCH, - http::Method::OPTIONS, - ]) - .allow_headers(cors_allow_headers.clone()) - .expose_headers(cors_expose_headers.clone()) - .allow_credentials(true) - } else { - if cfg.is_production { - // In production, FRONTEND_URL is mandatory (enforced earlier), but fallback defensively to deny all - CorsLayer::new() - .allow_origin(AllowOrigin::exact(HeaderValue::from_static( - "http://invalid", - ))) - .allow_methods([ - http::Method::GET, - http::Method::POST, - http::Method::PUT, - http::Method::DELETE, - http::Method::PATCH, - http::Method::OPTIONS, - ]) - .allow_headers(cors_allow_headers.clone()) - .expose_headers(cors_expose_headers.clone()) - } else { - // Development convenience - CorsLayer::new() - .allow_origin(AllowOrigin::mirror_request()) - .allow_methods([ - http::Method::GET, - http::Method::POST, - http::Method::PUT, - http::Method::DELETE, - http::Method::PATCH, - http::Method::OPTIONS, - ]) - .allow_headers(cors_allow_headers.clone()) - .expose_headers(cors_expose_headers.clone()) - .allow_credentials(true) - } - }; - - // Ensure uploads dir exists even when using S3 backend (local staging is still required) - if let Err(e) = tokio::fs::create_dir_all(&cfg.storage_root).await { - tracing::warn!(error=?e, dir=%cfg.storage_root, "Failed to create uploads dir"); - } - - // Build upload router with state - let upload_router = Router::new() - .route("/*path", get(api::presentation::http::files::serve_upload)) - .with_state(ctx.clone()); - - // Build API router - let api_router = Router::new() - .nest("/api", api::presentation::http::health::routes(ctx.clone())) - .nest( - "/api", - api::presentation::http::documents::routes(ctx.clone()), - ) - .nest( - "/api/auth", - api::presentation::http::auth::routes(ctx.clone()), - ) - .nest("/api", api::presentation::http::shares::routes(ctx.clone())) - .nest("/api", api::presentation::http::files::routes(ctx.clone())) - .nest("/api", api::presentation::http::tags::routes(ctx.clone())) - .nest("/api", api::presentation::http::git::routes(ctx.clone())) - .nest( - "/api", - api::presentation::http::markdown::routes(ctx.clone()), - ) - .nest( - "/api", - api::presentation::http::plugins::routes(ctx.clone()), - ) - .nest( - "/api", - api::presentation::http::api_tokens::routes(ctx.clone()), - ) - .nest( - "/api", - api::presentation::http::storage_ingest::routes(ctx.clone()), - ) - .nest( - "/api", - api::presentation::http::workspaces::routes(ctx.clone()), - ) - .nest( - "/api", - api::presentation::http::shortcuts::routes(ctx.clone()), - ) - .nest( - "/api/public", - api::presentation::http::public::routes(ctx.clone()), - ) - .merge(SwaggerUi::new("/api/docs").url("/api/openapi.json", ApiDoc::openapi())) - .layer(middleware::from_fn_with_state( - ctx.clone(), - api::presentation::http::auth::refresh_middleware, - )) - .layer(middleware::from_fn( - api::presentation::http::auth::request_status::middleware, - )) - .layer(cors) - // Global body size limit for uploads (configurable) - .layer(DefaultBodyLimit::max(cfg.upload_max_bytes)) - .layer( - TraceLayer::new_for_http().make_span_with(|req: &http::Request<_>| { - let method = req.method().clone(); - let uri = req.uri().clone(); - let matched = req - .extensions() - .get::() - .map(|p| p.as_str().to_string()) - .unwrap_or_default(); - tracing::info_span!("http", %method, %uri, matched_path = %matched) - }), - ); - - let metrics_router = Router::new() - .route( - "/metrics", - get(api::presentation::http::metrics::metrics_handler), - ) - .with_state(ctx.clone()); - let api_router = api_router.merge(metrics_router); - - let api_router = api_router.nest("/api/uploads", upload_router); - - // Mount WS endpoint on the same port as HTTP - - // Compose final app for HTTP - let api_addr = SocketAddr::from(([0, 0, 0, 0], cfg.api_port)); - info!(%api_addr, "HTTP API listening"); - let listener = tokio::net::TcpListener::bind(api_addr).await?; - let ws_router = Router::new() - .route("/api/yjs/:id", get(api::presentation::ws::axum_ws_entry)) - .with_state(ctx.clone()) - .layer(middleware::from_fn_with_state( - ctx.clone(), - api::presentation::http::auth::refresh_middleware, - )) - .layer(middleware::from_fn( - api::presentation::http::auth::request_status::middleware, - )); - - let app = api_router.merge(ws_router); - - let api_handle: JoinHandle> = tokio::spawn(async move { - axum::serve(listener, app).await?; - Ok(()) - }); - - // Background snapshots - const SNAPSHOT_LOCK_KEY: i64 = i64::from_be_bytes(*b"REFSNAP1"); - - let snap_handle: Option>> = - if let Some(hub_for_snap) = local_hub.clone() { - let cfg_for_snap = cfg.clone(); - let pool_for_snap = pool.clone(); - Some(tokio::spawn(async move { - let interval = Duration::from_secs(cfg_for_snap.snapshot_interval_secs); - loop { - match AdvisoryLock::try_acquire(&pool_for_snap, SNAPSHOT_LOCK_KEY).await { - Ok(Some(lock)) => { - let snapshot_result = hub_for_snap - .snapshot_all( - cfg_for_snap.snapshot_keep_versions, - cfg_for_snap.updates_keep_window, - ) - .await; - - if let Err(e) = lock.release().await { - tracing::error!(error = ?e, "snapshot_lock_release_failed"); - } - - if let Err(e) = snapshot_result { - tracing::error!(error = ?e, "snapshot_loop_failed"); - } - } - Ok(None) => { - tracing::debug!("snapshot_loop_skipped_lock_held"); - } - Err(e) => { - tracing::error!(error = ?e, "snapshot_lock_error"); - } - } - sleep(interval).await; - } - })) - } else { - None - }; - - match api_handle.await { - Ok(Ok(())) => {} - Ok(Err(e)) => error!(?e, "API server task failed"), - Err(e) => error!(?e, "API server task panicked"), - } - - if let Some(handle) = snap_handle { - match handle.await { - Ok(Ok(())) => {} - Ok(Err(e)) => error!(?e, "Snapshot task failed"), - Err(e) => error!(?e, "Snapshot task panicked"), - } - } - Ok(()) -} diff --git a/api/src/presentation/context.rs b/api/src/presentation/context.rs deleted file mode 100644 index a59d1a6b..00000000 --- a/api/src/presentation/context.rs +++ /dev/null @@ -1,248 +0,0 @@ -use std::sync::Arc; - -use futures_util::stream::BoxStream; - -use crate::application::ports::plugin_event_publisher::PluginScopedEvent; -use crate::application::ports::plugin_event_subscriber::PluginEventSubscriber; -use crate::application::ports::realtime_port::RealtimeEngine; -pub use crate::application::ports::realtime_types::{DynRealtimeSink, DynRealtimeStream}; -use crate::application::ports::storage_ingest_queue::StorageIngestQueue; -use crate::application::services::api_tokens::ApiTokenService; -use crate::application::services::auth::account::AccountService; -use crate::application::services::auth::external::ExternalAuthRegistry; -use crate::application::services::auth::service::AuthService; -use crate::application::services::auth::user_sessions::UserSessionService; -use crate::application::services::authorization::AuthorizationService; -use crate::application::services::documents::DocumentService; -use crate::application::services::files::FileService; -use crate::application::services::git::GitService; -use crate::application::services::health::HealthService; -use crate::application::services::markdown_render::MarkdownRenderService; -use crate::application::services::metrics::MetricsRegistry; -use crate::application::services::plugins::data::PluginDataService; -use crate::application::services::plugins::execution::PluginExecutionService; -use crate::application::services::plugins::management::PluginManagementService; -use crate::application::services::plugins::permissions::PluginPermissionService; -use crate::application::services::public::PublicService; -use crate::application::services::shares::ShareService; -use crate::application::services::tags::TagService; -use crate::application::services::user_shortcuts::UserShortcutService; -use crate::application::services::workspaces::WorkspaceService; - -#[derive(Debug, Clone)] -pub struct PresentationConfig { - pub frontend_url: Option, - pub upload_max_bytes: usize, - pub public_base_url: Option, - pub session_cookie_secure: bool, -} - -#[derive(Clone)] -pub struct AppContext { - pub cfg: PresentationConfig, - services: Arc, - metrics: Arc, -} - -#[derive(Clone)] -pub struct AppServices { - authorization: Arc, - document_service: Arc, - share_service: Arc, - file_service: Arc, - public_service: Arc, - tag_service: Arc, - api_token_service: Arc, - user_shortcut_service: Arc, - git_service: Arc, - markdown_render_service: Arc, - workspace_service: Arc, - plugin_execution_service: Arc, - plugin_management_service: Arc, - plugin_permission_service: Arc, - plugin_data_service: Arc, - plugin_event_subscriber: Arc, - health_service: Arc, - account_service: Arc, - auth_service: Arc, - session_service: Arc, - realtime_engine: Arc, - storage_ingest_queue: Arc, - external_auth: Arc, -} - -impl AppServices { - #[allow(clippy::too_many_arguments)] - pub fn new( - authorization: Arc, - document_service: Arc, - share_service: Arc, - file_service: Arc, - public_service: Arc, - tag_service: Arc, - api_token_service: Arc, - user_shortcut_service: Arc, - git_service: Arc, - markdown_render_service: Arc, - workspace_service: Arc, - plugin_execution_service: Arc, - plugin_management_service: Arc, - plugin_permission_service: Arc, - plugin_data_service: Arc, - plugin_event_subscriber: Arc, - health_service: Arc, - account_service: Arc, - auth_service: Arc, - session_service: Arc, - realtime_engine: Arc, - storage_ingest_queue: Arc, - external_auth: Arc, - ) -> Self { - Self { - authorization, - document_service, - share_service, - file_service, - public_service, - tag_service, - api_token_service, - user_shortcut_service, - git_service, - markdown_render_service, - workspace_service, - plugin_execution_service, - plugin_management_service, - plugin_permission_service, - plugin_data_service, - plugin_event_subscriber, - health_service, - account_service, - auth_service, - session_service, - realtime_engine, - storage_ingest_queue, - external_auth, - } - } -} - -impl AppContext { - pub fn new( - cfg: PresentationConfig, - services: AppServices, - metrics: Arc, - ) -> Self { - Self { - cfg, - services: Arc::new(services), - metrics, - } - } - - pub fn authorization(&self) -> Arc { - self.services.authorization.clone() - } - - pub fn document_service(&self) -> Arc { - self.services.document_service.clone() - } - - pub fn share_service(&self) -> Arc { - self.services.share_service.clone() - } - - pub fn file_service(&self) -> Arc { - self.services.file_service.clone() - } - - pub fn public_service(&self) -> Arc { - self.services.public_service.clone() - } - - pub fn tag_service(&self) -> Arc { - self.services.tag_service.clone() - } - - pub fn user_shortcut_service(&self) -> Arc { - self.services.user_shortcut_service.clone() - } - - pub fn git_service(&self) -> Arc { - self.services.git_service.clone() - } - - pub fn markdown_renderer(&self) -> Arc { - self.services.markdown_render_service.clone() - } - - pub fn workspace_service(&self) -> Arc { - self.services.workspace_service.clone() - } - - pub fn storage_ingest_queue(&self) -> Arc { - self.services.storage_ingest_queue.clone() - } - - pub fn plugin_execution_service(&self) -> Arc { - self.services.plugin_execution_service.clone() - } - - pub fn plugin_management(&self) -> Arc { - self.services.plugin_management_service.clone() - } - - pub fn plugin_permissions(&self) -> Arc { - self.services.plugin_permission_service.clone() - } - - pub fn plugin_data_service(&self) -> Arc { - self.services.plugin_data_service.clone() - } - - pub fn health_service(&self) -> Arc { - self.services.health_service.clone() - } - - pub fn account_service(&self) -> Arc { - self.services.account_service.clone() - } - - pub fn auth_service(&self) -> Arc { - self.services.auth_service.clone() - } - - pub fn session_service(&self) -> Arc { - self.services.session_service.clone() - } - - pub fn external_auth(&self) -> Arc { - self.services.external_auth.clone() - } - - pub fn metrics(&self) -> Arc { - self.metrics.clone() - } - - pub async fn subscribe_plugin_events( - &self, - ) -> anyhow::Result> { - self.services.plugin_event_subscriber.subscribe().await - } - - pub fn api_token_service(&self) -> Arc { - self.services.api_token_service.clone() - } - - pub async fn subscribe_realtime( - &self, - doc_id: &str, - sink: DynRealtimeSink, - stream: DynRealtimeStream, - can_edit: bool, - ) -> anyhow::Result<()> { - self.services - .realtime_engine - .subscribe(doc_id, sink, stream, can_edit) - .await - } -} diff --git a/api/src/presentation/http/api_tokens.rs b/api/src/presentation/http/api_tokens.rs deleted file mode 100644 index 37ce4c6d..00000000 --- a/api/src/presentation/http/api_tokens.rs +++ /dev/null @@ -1,192 +0,0 @@ -use axum::{ - Json, Router, - extract::{Path, State}, - http::{HeaderMap, StatusCode}, - routing::{delete, get}, -}; -use serde::{Deserialize, Serialize}; -use tracing::error; -use utoipa::ToSchema; -use uuid::Uuid; - -use crate::application::dto::api_tokens::{ApiTokenDto, CreatedApiTokenDto}; -use crate::application::services::errors::ServiceError; -use crate::presentation::context::AppContext; -use crate::presentation::http::{ - auth::{self, Bearer}, - workspace_scope, -}; - -#[derive(Debug, Serialize, ToSchema)] -pub struct ApiTokenItem { - pub id: Uuid, - pub name: String, - pub created_at: chrono::DateTime, - pub last_used_at: Option>, - pub revoked_at: Option>, -} - -impl From for ApiTokenItem { - fn from(value: ApiTokenDto) -> Self { - Self { - id: value.id, - name: value.name, - created_at: value.created_at, - last_used_at: value.last_used_at, - revoked_at: value.revoked_at, - } - } -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct ApiTokenCreateResponse { - pub id: Uuid, - pub name: String, - pub created_at: chrono::DateTime, - pub token: String, -} - -impl From for ApiTokenCreateResponse { - fn from(value: CreatedApiTokenDto) -> Self { - Self { - id: value.token.id, - name: value.token.name, - created_at: value.token.created_at, - token: value.plaintext, - } - } -} - -fn map_token_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(inner) => { - error!(error = ?inner, "api_token_service_error"); - StatusCode::INTERNAL_SERVER_ERROR - } - } -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct ApiTokenCreateRequest { - #[schema(example = "Deploy token")] - pub name: Option, -} - -#[utoipa::path( - get, - path = "/api/me/api-tokens", - tag = "Auth", - responses((status = 200, body = [ApiTokenItem])) -)] -pub async fn list_api_tokens( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result>, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = auth::validate_bearer(&ctx, Bearer(bearer_token.clone())).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - - let service = ctx.api_token_service(); - let items = service - .list(workspace_id, &permissions) - .await - .map_err(map_token_error)?; - Ok(Json(items.into_iter().map(ApiTokenItem::from).collect())) -} - -#[utoipa::path( - post, - path = "/api/me/api-tokens", - tag = "Auth", - request_body = ApiTokenCreateRequest, - responses((status = 200, body = ApiTokenCreateResponse)) -)] -pub async fn create_api_token( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(payload): Json, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = auth::validate_bearer(&ctx, Bearer(bearer_token.clone())).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - - let service = ctx.api_token_service(); - let created = service - .create(workspace_id, user_id, &permissions, payload.name.as_deref()) - .await - .map_err(map_token_error)?; - Ok(Json(ApiTokenCreateResponse::from(created))) -} - -#[utoipa::path( - delete, - path = "/api/me/api-tokens/{id}", - tag = "Auth", - params(("id" = Uuid, Path, description = "Token ID")), - responses((status = 204)) -)] -pub async fn revoke_api_token( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(id): Path, -) -> Result { - let bearer_token = bearer.0.clone(); - let sub = auth::validate_bearer(&ctx, Bearer(bearer_token.clone())).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - - let service = ctx.api_token_service(); - let revoked = service - .revoke(workspace_id, id, &permissions) - .await - .map_err(map_token_error)?; - if revoked { - Ok(StatusCode::NO_CONTENT) - } else { - Err(StatusCode::NOT_FOUND) - } -} - -pub fn routes(ctx: AppContext) -> Router { - Router::new() - .route( - "/me/api-tokens", - get(list_api_tokens).post(create_api_token), - ) - .route("/me/api-tokens/:id", delete(revoke_api_token)) - .with_state(ctx) -} diff --git a/api/src/presentation/http/auth.rs b/api/src/presentation/http/auth.rs deleted file mode 100644 index aaf6fff7..00000000 --- a/api/src/presentation/http/auth.rs +++ /dev/null @@ -1,1152 +0,0 @@ -use crate::application::access; -use crate::application::dto::auth::UserDto; -use crate::application::ports::user_session_repository::UserSessionRecord; -use crate::application::ports::workspace_repository::WorkspaceListItem; -use crate::application::services::auth::external::{ExternalAuthPayload, ExternalAuthProviderKind}; -use crate::application::services::auth::user_sessions::{IssuedSessionBundle, SessionMetadata}; -use crate::application::services::errors::ServiceError; -use crate::presentation::context::AppContext; -use axum::{ - Json, Router, - body::Body, - extract::{Extension, Path, State}, - http::{HeaderMap, HeaderValue, Request, StatusCode, header}, - middleware::Next, - response::IntoResponse, - routing::{delete, get, post}, -}; -use chrono::{DateTime, Duration, Utc}; -use rand::{Rng, distributions::Alphanumeric, rngs::OsRng}; -use serde::{Deserialize, Serialize}; -use std::sync::Arc; -use tracing::{error, warn}; -use utoipa::ToSchema; -use uuid::Uuid; - -use crate::presentation::http::workspace_scope; - -const SESSION_COOKIE_NAME: &str = "access_token"; -const REFRESH_COOKIE_NAME: &str = "refresh_token"; -const OAUTH_STATE_COOKIE_NAME: &str = "oauth_state"; -const OAUTH_STATE_TTL_SECS: i64 = 300; - -pub mod request_status { - use std::cell::Cell; - - use axum::http::Request; - use axum::{body::Body, middleware::Next, response::Response}; - use http::{StatusCode, header}; - - tokio::task_local! { - static TOKEN_EXPIRED_FLAG: Cell; - } - - pub fn mark_token_expired() { - let _ = TOKEN_EXPIRED_FLAG.try_with(|flag| flag.set(true)); - } - - pub async fn middleware(req: Request, next: Next) -> Response { - TOKEN_EXPIRED_FLAG - .scope(Cell::new(false), async move { - let mut response = next.run(req).await; - let expired = TOKEN_EXPIRED_FLAG.with(|flag| flag.get()); - if expired && response.status() == StatusCode::UNAUTHORIZED { - response.headers_mut().insert( - header::WWW_AUTHENTICATE, - header::HeaderValue::from_static("Bearer error=\"token_expired\""), - ); - } - response - }) - .await - } -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct RegisterRequest { - pub email: String, - pub name: String, - pub password: String, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct UserResponse { - pub id: Uuid, - pub email: String, - pub name: String, - pub workspaces: Vec, - pub active_workspace_id: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub active_workspace: Option, - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub active_workspace_permissions: Vec, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct SessionResponse { - pub id: Uuid, - pub workspace_id: Uuid, - #[serde(skip_serializing_if = "Option::is_none")] - pub user_agent: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub ip_address: Option, - pub remember_me: bool, - pub created_at: DateTime, - pub last_seen_at: DateTime, - pub expires_at: DateTime, - pub current: bool, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct RefreshResponse { - pub access_token: String, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct AuthProviderInfoResponse { - pub id: String, - pub requires_state: bool, - pub client_ids: Vec, - #[serde(skip_serializing_if = "Option::is_none")] - pub redirect_uri: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub name: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub authorization_url: Option, - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub scopes: Vec, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct AuthProvidersResponse { - pub providers: Vec, -} - -#[derive(Debug, Serialize, ToSchema, Clone)] -pub struct WorkspaceMembershipResponse { - pub id: Uuid, - pub name: String, - pub slug: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub icon: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub description: Option, - pub is_personal: bool, - pub role_kind: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub system_role: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub custom_role_id: Option, - pub is_default: bool, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct LoginRequest { - pub email: String, - pub password: String, - #[serde(default)] - pub remember_me: bool, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct LoginResponse { - pub access_token: String, - pub user: UserResponse, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct OAuthLoginRequest { - #[serde(skip_serializing_if = "Option::is_none")] - pub credential: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub code: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub redirect_uri: Option, - #[serde(default)] - pub remember_me: bool, - #[serde(skip_serializing_if = "Option::is_none")] - pub state: Option, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct OAuthStateResponse { - pub state: String, -} - -pub fn routes(ctx: AppContext) -> Router { - Router::new() - .route("/register", post(register)) - .route("/login", post(login)) - .route("/oauth/:provider/state", post(oauth_state)) - .route("/oauth/:provider", post(oauth_login)) - .route("/providers", get(list_oauth_providers)) - .route("/logout", post(logout)) - .route("/refresh", post(refresh_session)) - .route("/sessions", get(list_sessions)) - .route("/sessions/:id", delete(revoke_session)) - .route("/me", get(me).delete(delete_account)) - .with_state(ctx) -} - -#[utoipa::path( - post, - path = "/api/auth/oauth/{provider}/state", - tag = "Auth", - params(("provider" = String, Path, description = "OAuth provider identifier")), - security(()), - responses((status = 200, body = OAuthStateResponse)) -)] -pub async fn oauth_state( - Path(provider): Path, - State(ctx): State, -) -> Result<(HeaderMap, Json), StatusCode> { - let provider_kind = - ExternalAuthProviderKind::try_from(provider.as_str()).map_err(|_| StatusCode::NOT_FOUND)?; - if ctx.external_auth().get(provider_kind).is_none() { - return Err(StatusCode::NOT_IMPLEMENTED); - } - let state = generate_oauth_state(); - let mut headers = HeaderMap::new(); - append_cookie( - &mut headers, - build_oauth_state_cookie(provider_kind, &state, ctx.cfg.session_cookie_secure), - ); - Ok((headers, Json(OAuthStateResponse { state }))) -} - -#[utoipa::path( - post, - path = "/api/auth/oauth/{provider}", - tag = "Auth", - params( - ("provider" = String, Path, description = "OAuth provider identifier (e.g., google)") - ), - request_body = OAuthLoginRequest, - security(()), - responses((status = 200, body = LoginResponse)) -)] -pub async fn oauth_login( - Path(provider): Path, - State(ctx): State, - headers: HeaderMap, - Json(req): Json, -) -> Result<(HeaderMap, Json), StatusCode> { - let provider_kind = - ExternalAuthProviderKind::try_from(provider.as_str()).map_err(|_| StatusCode::NOT_FOUND)?; - let registry = ctx.external_auth(); - let verifier = registry - .get(provider_kind) - .ok_or(StatusCode::NOT_IMPLEMENTED)?; - let mut response_headers = HeaderMap::new(); - if provider_kind.requires_state() { - let provided_state = req.state.as_deref().ok_or(StatusCode::BAD_REQUEST)?; - validate_oauth_state_cookie(&headers, provider_kind, provided_state) - .map_err(|_| StatusCode::UNAUTHORIZED)?; - clear_oauth_state_cookie(&mut response_headers, ctx.cfg.session_cookie_secure); - } - let payload = ExternalAuthPayload { - credential: req.credential.clone(), - code: req.code.clone(), - redirect_uri: req.redirect_uri.clone(), - }; - let identity = verifier.verify(&payload).await.map_err(map_auth_error)?; - let account_service = ctx.account_service(); - let user_dto = account_service - .sign_in_with_external(identity) - .await - .map_err(map_account_error)?; - let user = build_user_response(&ctx, user_dto, None).await?; - let active_workspace_id = user - .active_workspace_id - .or_else(|| user.workspaces.iter().find(|w| w.is_default).map(|w| w.id)) - .ok_or(StatusCode::INTERNAL_SERVER_ERROR)?; - let client_ip = extract_client_ip(&headers); - let user_agent = extract_user_agent(&headers); - let issued = ctx - .session_service() - .issue_new_session( - user.id, - active_workspace_id, - req.remember_me, - SessionMetadata { - user_agent, - ip_address: client_ip.as_deref(), - }, - ) - .await - .map_err(map_auth_error)?; - apply_session_cookies(&ctx, &mut response_headers, &issued); - Ok(( - response_headers, - Json(LoginResponse { - access_token: issued.access.token, - user, - }), - )) -} - -#[utoipa::path( - get, - path = "/api/auth/providers", - tag = "Auth", - security(()), - responses((status = 200, body = AuthProvidersResponse)) -)] -pub async fn list_oauth_providers( - State(ctx): State, -) -> Result, StatusCode> { - let providers = ctx - .external_auth() - .list_descriptors() - .into_iter() - .map(|descriptor| AuthProviderInfoResponse { - id: descriptor.kind.as_str().to_string(), - requires_state: descriptor.requires_state, - client_ids: descriptor.client_ids, - redirect_uri: descriptor.redirect_uri, - name: descriptor.display_name, - authorization_url: descriptor.authorization_url, - scopes: descriptor.scopes, - }) - .collect(); - Ok(Json(AuthProvidersResponse { providers })) -} - -fn map_account_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(inner) => { - error!(error = ?inner, "account_service_error"); - StatusCode::INTERNAL_SERVER_ERROR - } - } -} - -fn map_workspace_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(inner) => { - error!(error = ?inner, "workspace_service_error"); - StatusCode::INTERNAL_SERVER_ERROR - } - } -} - -fn workspace_response_from(item: WorkspaceListItem) -> WorkspaceMembershipResponse { - WorkspaceMembershipResponse { - id: item.id, - name: item.name, - slug: item.slug, - icon: item.icon, - description: item.description, - is_personal: item.is_personal, - role_kind: item.role_kind, - system_role: item.system_role, - custom_role_id: item.custom_role_id, - is_default: item.is_default, - } -} - -fn session_response_from( - record: UserSessionRecord, - current_session_id: Option, -) -> SessionResponse { - SessionResponse { - id: record.id, - workspace_id: record.workspace_id, - user_agent: record.user_agent, - ip_address: record.ip_address, - remember_me: record.remember_me, - created_at: record.created_at, - last_seen_at: record.last_seen_at, - expires_at: record.expires_at, - current: current_session_id.map_or(false, |id| id == record.id), - } -} - -async fn build_user_response( - ctx: &AppContext, - user: UserDto, - preferred_workspace_id: Option, -) -> Result { - let workspaces = ctx - .workspace_service() - .list_for_user(user.id) - .await - .map_err(map_workspace_error)? - .into_iter() - .map(workspace_response_from) - .collect::>(); - let mut active_workspace_id = - preferred_workspace_id.and_then(|id| workspaces.iter().find(|w| w.id == id).map(|w| w.id)); - if active_workspace_id.is_none() { - active_workspace_id = workspaces.iter().find(|w| w.is_default).map(|w| w.id); - } - if active_workspace_id.is_none() { - active_workspace_id = workspaces.first().map(|w| w.id); - } - let active_workspace = - active_workspace_id.and_then(|id| workspaces.iter().find(|w| w.id == id).cloned()); - let mut active_workspace_permissions = Vec::new(); - if let Some(active_ws_id) = active_workspace_id { - if let Some(set) = ctx - .workspace_service() - .resolve_permission_set(active_ws_id, user.id) - .await - .map_err(map_workspace_error)? - { - active_workspace_permissions = set.to_vec(); - } - } - Ok(UserResponse { - id: user.id, - email: user.email, - name: user.name, - workspaces, - active_workspace_id, - active_workspace, - active_workspace_permissions, - }) -} - -#[utoipa::path(post, path = "/api/auth/register", tag = "Auth", request_body = RegisterRequest, security(()), responses( - (status = 200, body = UserResponse) -))] -pub async fn register( - State(ctx): State, - Json(req): Json, -) -> Result, StatusCode> { - let service = ctx.account_service(); - let user = service - .register(&req.email, &req.name, &req.password) - .await - .map_err(map_account_error)?; - let response = build_user_response(&ctx, user, None).await?; - Ok(Json(response)) -} - -#[utoipa::path(post, path = "/api/auth/login", tag = "Auth", request_body = LoginRequest, security(()), responses( - (status = 200, body = LoginResponse) -))] -pub async fn login( - State(ctx): State, - headers: HeaderMap, - Json(req): Json, -) -> Result<(HeaderMap, Json), StatusCode> { - let service = ctx.account_service(); - let user = service - .login(&req.email, &req.password) - .await - .map_err(map_account_error)? - .ok_or(StatusCode::UNAUTHORIZED)?; - let user = build_user_response(&ctx, user, None).await?; - let active_workspace_id = user - .active_workspace_id - .or_else(|| user.workspaces.iter().find(|w| w.is_default).map(|w| w.id)) - .ok_or(StatusCode::INTERNAL_SERVER_ERROR)?; - let client_ip = extract_client_ip(&headers); - let user_agent = extract_user_agent(&headers); - let issued = ctx - .session_service() - .issue_new_session( - user.id, - active_workspace_id, - req.remember_me, - SessionMetadata { - user_agent, - ip_address: client_ip.as_deref(), - }, - ) - .await - .map_err(map_auth_error)?; - - let mut response_headers = HeaderMap::new(); - apply_session_cookies(&ctx, &mut response_headers, &issued); - - Ok(( - response_headers, - Json(LoginResponse { - access_token: issued.access.token, - user, - }), - )) -} - -#[utoipa::path(post, path = "/api/auth/refresh", tag = "Auth", responses( - (status = 200, body = RefreshResponse) -))] -pub async fn refresh_session( - State(ctx): State, - refreshed: Option>, -) -> Result { - if let Some(Extension(bundle)) = refreshed { - return Ok(Json(RefreshResponse { - access_token: bundle.0.access.token.clone(), - }) - .into_response()); - } - - let mut response_headers = HeaderMap::new(); - clear_auth_cookies(&mut response_headers, ctx.cfg.session_cookie_secure); - Ok((response_headers, StatusCode::UNAUTHORIZED).into_response()) -} - -#[utoipa::path(get, path = "/api/auth/me", tag = "Auth", responses((status = 200, body = UserResponse)))] -pub async fn me( - State(ctx): State, - bearer: Result, - headers: HeaderMap, -) -> Result, StatusCode> { - let bearer = bearer?; - let bearer_token = bearer.0.clone(); - let sub = validate_bearer_str(&ctx, &bearer_token).await?; - let id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - - let active_workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - id, - ) - .await - .map(Some) - .or_else(|err| { - if err == StatusCode::FORBIDDEN { - Ok(None) - } else { - Err(err) - } - })?; - - let service = ctx.account_service(); - let row = service - .get_me(id) - .await - .map_err(map_account_error)? - .ok_or(StatusCode::UNAUTHORIZED)?; - let resp = build_user_response(&ctx, row, active_workspace_id).await?; - Ok(Json(resp)) -} - -#[utoipa::path(delete, path = "/api/auth/me", tag = "Auth", responses((status = 204)))] -pub async fn delete_account( - State(ctx): State, - bearer: Bearer, -) -> Result<(HeaderMap, StatusCode), StatusCode> { - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let service = ctx.account_service(); - service - .delete_account(user_id) - .await - .map_err(map_account_error)?; - ctx.session_service() - .revoke_all_for_user(user_id) - .await - .map_err(map_auth_error)?; - - let mut headers = HeaderMap::new(); - clear_auth_cookies(&mut headers, ctx.cfg.session_cookie_secure); - - Ok((headers, StatusCode::NO_CONTENT)) -} - -// --- Bearer extractor & JWT utils --- -use axum::extract::FromRequestParts; -use axum::http::request::Parts; - -#[derive(Debug, Clone)] -pub struct Bearer(pub String); - -#[derive(Debug, Clone)] -pub struct AccessTokenOverride(pub String); - -#[derive(Clone)] -pub struct RefreshedSession(pub Arc); - -fn unauthorized_token_expired(ctx: &AppContext) -> axum::response::Response { - let mut headers = HeaderMap::new(); - clear_auth_cookies(&mut headers, ctx.cfg.session_cookie_secure); - let _ = headers.insert( - header::WWW_AUTHENTICATE, - HeaderValue::from_static("Bearer error=\"token_expired\""), - ); - (headers, StatusCode::UNAUTHORIZED).into_response() -} - -fn extract_bearer_token(headers: &HeaderMap) -> Option { - // Prefer the session cookie if present to avoid accidentally overriding it - // with other Bearer values (e.g. share tokens) that might be sent by the - // client. - if let Some(cookie) = headers - .get(axum::http::header::COOKIE) - .and_then(|v| v.to_str().ok()) - { - if let Some(token) = get_cookie(cookie, SESSION_COOKIE_NAME) { - if !token.trim().is_empty() { - return Some(token); - } - } - } - - if let Some(auth) = headers - .get(axum::http::header::AUTHORIZATION) - .and_then(|v| v.to_str().ok()) - { - if let Some(t) = auth.strip_prefix("Bearer ") { - let trimmed = t.trim(); - if !trimmed.is_empty() { - return Some(trimmed.to_string()); - } - } - } - None -} - -fn should_skip_refresh(path: &str) -> bool { - path.starts_with("/api/public") || path.starts_with("/api/health") || path == "/metrics" -} - -#[axum::async_trait] -impl FromRequestParts for Bearer -where - S: Send + Sync, -{ - type Rejection = StatusCode; - - async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - if let Some(token) = parts.extensions.get::() { - return Ok(Bearer(token.0.clone())); - } - // 1) Prefer HttpOnly cookie `access_token` - if let Some(cookie_hdr) = parts - .headers - .get(axum::http::header::COOKIE) - .and_then(|v| v.to_str().ok()) - { - if let Some(token) = get_cookie(cookie_hdr, SESSION_COOKIE_NAME) { - return Ok(Bearer(token)); - } - } - - // 2) Fall back to Authorization header if provided - if let Some(auth) = parts - .headers - .get(axum::http::header::AUTHORIZATION) - .and_then(|v| v.to_str().ok()) - { - if let Some(t) = auth.strip_prefix("Bearer ") { - return Ok(Bearer(t.to_string())); - } - } - - Err(StatusCode::UNAUTHORIZED) - } -} - -pub(crate) async fn validate_bearer( - ctx: &AppContext, - bearer: Bearer, -) -> Result { - validate_bearer_str(ctx, &bearer.0).await -} - -pub async fn validate_bearer_public( - ctx: &AppContext, - bearer: Bearer, -) -> Result { - validate_bearer(ctx, bearer).await -} - -pub async fn validate_bearer_str(ctx: &AppContext, token: &str) -> Result { - let service = ctx.auth_service(); - let session_service = ctx.session_service(); - let subject = match service.subject_from_token(token).await { - Ok(Some(sub)) => sub, - Ok(None) => return Err(StatusCode::UNAUTHORIZED), - Err(ServiceError::TokenExpired) => { - request_status::mark_token_expired(); - return Err(StatusCode::UNAUTHORIZED); - } - Err(err) => return Err(map_auth_error(err)), - }; - if let Some(session_id) = service.session_id_from_token_claim(token) { - session_service - .ensure_session_active(session_id) - .await - .map_err(map_auth_error)?; - } - Ok(subject) -} - -pub async fn resolve_actor_from_parts( - ctx: &AppContext, - bearer: Option, - share_token: Option<&str>, -) -> Option { - // Prefer explicit share token when provided so users with an active session - // can still open shared resources using the token (e.g., saved mounts). - if let Some(token) = share_token { - if let Some(actor) = resolve_actor_from_token_str(ctx, token).await { - return Some(actor); - } - } - - if let Some(b) = bearer { - match validate_bearer(ctx, b.clone()).await { - Ok(sub) => { - if let Ok(uid) = Uuid::parse_str(&sub) { - return Some(access::Actor::User(uid)); - } - } - Err(_) => { - if let Some(actor) = resolve_actor_from_token_str(ctx, &b.0).await { - return Some(actor); - } - } - } - } - None -} - -pub async fn refresh_middleware( - State(ctx): State, - mut req: Request, - next: Next, -) -> axum::response::Response { - let path = req.uri().path().to_owned(); - if should_skip_refresh(&path) { - return next.run(req).await; - } - - let mut refreshed: Option> = None; - let force_refresh = path == "/api/auth/refresh"; - let access_token = extract_bearer_token(req.headers()); - let refresh_token = extract_refresh_token(req.headers()); - - if force_refresh || access_token.is_some() || refresh_token.is_some() { - let auth = ctx.auth_service(); - let session_service = ctx.session_service(); - - let token_expired_or_missing = if force_refresh { - true - } else if let Some(access_token) = access_token { - match auth.subject_from_token(&access_token).await { - Ok(Some(_)) => false, - Ok(None) => false, - Err(ServiceError::TokenExpired) => true, - Err(_) => false, - } - } else if refresh_token.is_some() { - // Access token already expired/absent but refresh token still exists. - true - } else { - false - }; - - if token_expired_or_missing { - if let Some(refresh_token) = refresh_token { - let client_ip = extract_client_ip(req.headers()); - let meta = SessionMetadata { - user_agent: extract_user_agent(req.headers()), - ip_address: client_ip.as_deref(), - }; - match session_service - .refresh_session(&refresh_token, None, meta) - .await - { - Ok(bundle) => { - let shared = Arc::new(bundle); - req.extensions_mut() - .insert(AccessTokenOverride(shared.access.token.clone())); - req.extensions_mut() - .insert(RefreshedSession(shared.clone())); - refreshed = Some(shared); - } - Err(ServiceError::Unauthorized) => return unauthorized_token_expired(&ctx), - Err(err) => return map_auth_error(err).into_response(), - } - } else { - return unauthorized_token_expired(&ctx); - } - } - } - - let mut response = next.run(req).await; - if let Some(bundle) = refreshed { - apply_session_cookies(&ctx, response.headers_mut(), bundle.as_ref()); - } - response -} - -pub async fn resolve_actor_from_token_str(ctx: &AppContext, token: &str) -> Option { - let trimmed = token.trim(); - if trimmed.is_empty() { - return None; - } - let service = ctx.auth_service(); - match service.subject_from_token(trimmed).await { - Ok(Some(sub)) => { - if let Ok(uid) = Uuid::parse_str(&sub) { - if let Some(session_id) = service.session_id_from_token_claim(trimmed) { - if let Err(err) = ctx - .session_service() - .ensure_session_active(session_id) - .await - { - if err.is_internal() { - error!(error = ?err, "session_validation_failed"); - } - return None; - } - } - return Some(access::Actor::User(uid)); - } else { - return Some(access::Actor::Public); - } - } - Err(ServiceError::TokenExpired) => { - request_status::mark_token_expired(); - return None; - } - Err(err) => { - if err.is_internal() { - error!(error = ?err, "token_validation_failed"); - } - } - Ok(None) => {} - } - Some(access::Actor::ShareToken(trimmed.to_string())) -} - -pub(crate) fn map_auth_error(err: ServiceError) -> StatusCode { - if err.is_internal() { - StatusCode::INTERNAL_SERVER_ERROR - } else { - StatusCode::UNAUTHORIZED - } -} - -// --- Cookie helpers & logout --- - -fn generate_oauth_state() -> String { - OsRng - .sample_iter(&Alphanumeric) - .take(48) - .map(char::from) - .collect() -} - -fn build_oauth_state_cookie( - provider: ExternalAuthProviderKind, - state: &str, - secure: bool, -) -> String { - let issued_at = Utc::now().timestamp(); - let value = format!("{}:{}:{}", provider.as_str(), state, issued_at); - let secure_attr = if secure { "; Secure" } else { "" }; - format!( - "{}={}; HttpOnly{}; Path=/; Max-Age={}; SameSite=Lax", - OAUTH_STATE_COOKIE_NAME, value, secure_attr, OAUTH_STATE_TTL_SECS - ) -} - -fn clear_oauth_state_cookie(headers: &mut HeaderMap, secure: bool) { - let secure_attr = if secure { "; Secure" } else { "" }; - append_cookie( - headers, - format!( - "{}=; HttpOnly{}; Path=/; Max-Age=0; SameSite=Lax", - OAUTH_STATE_COOKIE_NAME, secure_attr - ), - ); -} - -fn validate_oauth_state_cookie( - headers: &HeaderMap, - provider: ExternalAuthProviderKind, - provided_state: &str, -) -> Result<(), ()> { - let cookie_value = extract_cookie_from_headers(headers, OAUTH_STATE_COOKIE_NAME).ok_or(())?; - let mut segments = cookie_value.splitn(3, ':'); - let provider_raw = segments.next().ok_or(())?; - let stored_state = segments.next().ok_or(())?; - let issued_raw = segments.next().ok_or(())?; - let parsed_provider = ExternalAuthProviderKind::try_from(provider_raw).map_err(|_| ())?; - if parsed_provider != provider || stored_state != provided_state { - return Err(()); - } - let issued_ts: i64 = issued_raw.parse().map_err(|_| ())?; - let issued_at = DateTime::::from_timestamp(issued_ts, 0).ok_or(())?; - if Utc::now() - issued_at > Duration::seconds(OAUTH_STATE_TTL_SECS) { - return Err(()); - } - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use axum::http::{HeaderValue, header}; - - fn cookie_headers( - provider: ExternalAuthProviderKind, - state: &str, - issued_at: i64, - ) -> HeaderMap { - let mut headers = HeaderMap::new(); - let raw_value = format!( - "{}={}:{}:{}", - OAUTH_STATE_COOKIE_NAME, - provider.as_str(), - state, - issued_at - ); - headers.insert( - header::COOKIE, - HeaderValue::from_str(&raw_value).expect("header"), - ); - headers - } - - #[test] - fn oauth_state_cookie_roundtrip() { - let provider = ExternalAuthProviderKind::Github; - let state = "state-token"; - let issued = Utc::now().timestamp(); - let headers = cookie_headers(provider, state, issued); - assert!(validate_oauth_state_cookie(&headers, provider, state).is_ok()); - } - - #[test] - fn oauth_state_cookie_rejects_expired() { - let provider = ExternalAuthProviderKind::Github; - let state = "expired"; - let issued = Utc::now().timestamp() - (OAUTH_STATE_TTL_SECS + 10); - let headers = cookie_headers(provider, state, issued); - assert!(validate_oauth_state_cookie(&headers, provider, state).is_err()); - } -} - -fn get_cookie(cookie_header: &str, name: &str) -> Option { - for part in cookie_header.split(';') { - let kv = part.trim(); - if let Some((k, v)) = kv.split_once('=') { - if k.trim() == name { - return Some(v.trim().to_string()); - } - } - } - None -} - -fn extract_cookie_from_headers(headers: &HeaderMap, name: &str) -> Option { - headers - .get(header::COOKIE) - .and_then(|v| v.to_str().ok()) - .and_then(|cookie| get_cookie(cookie, name)) -} - -pub(crate) fn extract_refresh_token(headers: &HeaderMap) -> Option { - extract_cookie_from_headers(headers, REFRESH_COOKIE_NAME) -} - -pub(crate) fn extract_user_agent<'a>(headers: &'a HeaderMap) -> Option<&'a str> { - headers - .get(header::USER_AGENT) - .and_then(|v| v.to_str().ok()) -} - -pub(crate) fn extract_client_ip(headers: &HeaderMap) -> Option { - if let Some(value) = headers.get("x-forwarded-for").and_then(|v| v.to_str().ok()) { - if let Some(first) = value.split(',').next() { - let trimmed = first.trim(); - if !trimmed.is_empty() { - return Some(trimmed.to_string()); - } - } - } - headers - .get("x-real-ip") - .or_else(|| headers.get("cf-connecting-ip")) - .and_then(|v| v.to_str().ok()) - .map(|s| s.trim().to_string()) - .filter(|s| !s.is_empty()) -} - -pub(crate) fn build_session_cookie(token: &str, max_age_secs: usize, secure: bool) -> String { - let secure_attr = if secure { "; Secure" } else { "" }; - format!( - "{}={}; HttpOnly{}; Path=/; Max-Age={}; SameSite=Lax", - SESSION_COOKIE_NAME, token, secure_attr, max_age_secs - ) -} - -fn build_refresh_cookie(token: &str, max_age_secs: usize, secure: bool) -> String { - let secure_attr = if secure { "; Secure" } else { "" }; - format!( - "{}={}; HttpOnly{}; Path=/; Max-Age={}; SameSite=Lax", - REFRESH_COOKIE_NAME, token, secure_attr, max_age_secs - ) -} - -fn clear_session_cookie(secure: bool) -> String { - let secure_attr = if secure { "; Secure" } else { "" }; - format!( - "{}=; HttpOnly{}; Path=/; Max-Age=0; SameSite=Lax", - SESSION_COOKIE_NAME, secure_attr - ) -} - -fn clear_refresh_cookie(secure: bool) -> String { - let secure_attr = if secure { "; Secure" } else { "" }; - format!( - "{}=; HttpOnly{}; Path=/; Max-Age=0; SameSite=Lax", - REFRESH_COOKIE_NAME, secure_attr - ) -} - -fn append_cookie(headers: &mut HeaderMap, value: String) { - if let Ok(header_value) = HeaderValue::from_str(&value) { - headers.append(header::SET_COOKIE, header_value); - } -} - -fn refresh_cookie_max_age(expires_at: DateTime) -> usize { - let now = Utc::now(); - if expires_at <= now { - 0 - } else { - (expires_at - now).num_seconds().max(0) as usize - } -} - -pub(crate) fn apply_session_cookies( - ctx: &AppContext, - headers: &mut HeaderMap, - issued: &IssuedSessionBundle, -) { - append_cookie( - headers, - build_session_cookie( - &issued.access.token, - ctx.auth_service().session_ttl_secs(), - ctx.cfg.session_cookie_secure, - ), - ); - append_cookie( - headers, - build_refresh_cookie( - &issued.refresh_token, - refresh_cookie_max_age(issued.refresh_expires_at), - ctx.cfg.session_cookie_secure, - ), - ); -} - -pub(crate) fn clear_auth_cookies(headers: &mut HeaderMap, secure: bool) { - append_cookie(headers, clear_session_cookie(secure)); - append_cookie(headers, clear_refresh_cookie(secure)); -} - -#[utoipa::path(post, path = "/api/auth/logout", tag = "Auth", responses((status = 204)))] -pub async fn logout( - State(ctx): State, - headers: HeaderMap, -) -> Result<(HeaderMap, StatusCode), StatusCode> { - if let Some(refresh_token) = extract_refresh_token(&headers) { - if let Err(err) = ctx.session_service().revoke_by_token(&refresh_token).await { - warn!(error = ?err, "logout_revoke_session_failed"); - } - } - let mut response_headers = HeaderMap::new(); - clear_auth_cookies(&mut response_headers, ctx.cfg.session_cookie_secure); - clear_oauth_state_cookie(&mut response_headers, ctx.cfg.session_cookie_secure); - Ok((response_headers, StatusCode::NO_CONTENT)) -} - -#[utoipa::path(get, path = "/api/auth/sessions", tag = "Auth", responses((status = 200, body = [SessionResponse])))] -pub async fn list_sessions( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result>, StatusCode> { - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let current_session_id = if let Some(refresh_token) = extract_refresh_token(&headers) { - match ctx - .session_service() - .find_session_by_token(&refresh_token) - .await - { - Ok(Some(session)) => Some(session.id), - Ok(None) => None, - Err(err) => { - warn!(error = ?err, "resolve_current_session_failed"); - None - } - } - } else { - None - }; - let sessions = ctx - .session_service() - .list_for_user(user_id) - .await - .map_err(map_auth_error)?; - let now = Utc::now(); - let payload = sessions - .into_iter() - .filter(|session| session.revoked_at.is_none() && session.expires_at > now) - .map(|session| session_response_from(session, current_session_id)) - .collect(); - Ok(Json(payload)) -} - -#[utoipa::path(delete, path = "/api/auth/sessions/{id}", tag = "Auth", params(("id" = Uuid, Path, description = "Session ID")), responses((status = 204)))] -pub async fn revoke_session( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(session_id): Path, -) -> Result<(HeaderMap, StatusCode), StatusCode> { - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let current_session_id = if let Some(refresh_token) = extract_refresh_token(&headers) { - match ctx - .session_service() - .find_session_by_token(&refresh_token) - .await - { - Ok(Some(session)) => Some(session.id), - Ok(None) => None, - Err(err) => { - warn!(error = ?err, "resolve_current_session_failed"); - None - } - } - } else { - None - }; - ctx.session_service() - .revoke_session(user_id, session_id) - .await - .map_err(|err| match err { - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::NotFound => StatusCode::NOT_FOUND, - other => map_auth_error(other), - })?; - let mut response_headers = HeaderMap::new(); - if current_session_id == Some(session_id) { - clear_auth_cookies(&mut response_headers, ctx.cfg.session_cookie_secure); - } - Ok((response_headers, StatusCode::NO_CONTENT)) -} diff --git a/api/src/presentation/http/documents.rs b/api/src/presentation/http/documents.rs deleted file mode 100644 index 44d84283..00000000 --- a/api/src/presentation/http/documents.rs +++ /dev/null @@ -1,1337 +0,0 @@ -use axum::{ - Json, Router, - extract::{Path, Query, State}, - http::{HeaderMap, HeaderValue, StatusCode}, - response::{IntoResponse, Response}, - routing::{get, post}, -}; -use serde::{Deserialize, Serialize}; -use serde_json::{Value, json}; -use utoipa::ToSchema; -use uuid::Uuid; - -use crate::application::access; -use crate::application::dto::diff::TextDiffResult; -use crate::application::dto::document_export::DocumentDownloadFormat; -use crate::application::dto::documents::{ - DocumentListFilter, SnapshotDiffBaseMode, SnapshotDiffSideDto, SnapshotSummaryDto, -}; -use crate::application::services::documents::DocumentPatchOperation; -use crate::application::services::errors::ServiceError; -use crate::domain::documents::document as domain; -use crate::domain::workspaces::permissions::PERM_DOC_VIEW; -use crate::presentation::context::AppContext; -use crate::presentation::http::auth::{self, Bearer}; -use crate::presentation::http::workspace_scope; -use tracing::error; - -#[derive(Debug, Serialize, ToSchema)] -pub struct Document { - pub id: Uuid, - pub owner_id: Uuid, - pub workspace_id: Uuid, - pub title: String, - pub parent_id: Option, - pub r#type: String, - pub created_at: chrono::DateTime, - pub updated_at: chrono::DateTime, - #[serde(skip_serializing_if = "Option::is_none")] - pub created_by_plugin: Option, - pub slug: String, - pub desired_path: String, - pub path: Option, - pub created_by: Option, - pub archived_at: Option>, - pub archived_by: Option, - pub archived_parent_id: Option, -} - -fn to_http_document(doc: domain::Document) -> Document { - Document { - id: doc.id, - owner_id: doc.owner_id, - workspace_id: doc.workspace_id, - title: doc.title, - parent_id: doc.parent_id, - r#type: doc.doc_type, - created_at: doc.created_at, - updated_at: doc.updated_at, - created_by_plugin: doc.created_by_plugin, - slug: doc.slug, - desired_path: doc.desired_path, - path: doc.path, - created_by: doc.created_by, - archived_at: doc.archived_at, - archived_by: doc.archived_by, - archived_parent_id: doc.archived_parent_id, - } -} - -fn map_service_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(inner) => { - error!(error = ?inner, "document_service_error"); - StatusCode::INTERNAL_SERVER_ERROR - } - } -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct DocumentListResponse { - pub items: Vec, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct SnapshotSummary { - pub id: Uuid, - pub document_id: Uuid, - pub label: String, - pub notes: Option, - pub kind: String, - pub created_at: chrono::DateTime, - pub created_by: Option, - pub byte_size: i64, - pub content_hash: String, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct SnapshotListResponse { - pub items: Vec, -} - -#[derive(Debug, Serialize, ToSchema)] -#[serde(rename_all = "lowercase")] -pub enum SnapshotDiffKind { - Current, - Snapshot, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct SnapshotDiffSideResponse { - pub kind: SnapshotDiffKind, - pub markdown: String, - pub snapshot: Option, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct SnapshotDiffResponse { - pub base: SnapshotDiffSideResponse, - pub target: SnapshotDiffSideResponse, - pub diff: TextDiffResult, -} - -#[derive(Debug, Clone, Copy, Deserialize, ToSchema)] -#[serde(rename_all = "snake_case")] -pub enum SnapshotDiffBaseParam { - Auto, - Current, - Previous, -} - -impl Default for SnapshotDiffBaseParam { - fn default() -> Self { - Self::Auto - } -} - -impl From for SnapshotDiffBaseMode { - fn from(value: SnapshotDiffBaseParam) -> Self { - match value { - SnapshotDiffBaseParam::Auto => SnapshotDiffBaseMode::Auto, - SnapshotDiffBaseParam::Current => SnapshotDiffBaseMode::ForceCurrent, - SnapshotDiffBaseParam::Previous => SnapshotDiffBaseMode::ForcePrevious, - } - } -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct SnapshotRestoreResponse { - pub snapshot: SnapshotSummary, -} - -fn snapshot_summary_from(record: SnapshotSummaryDto) -> SnapshotSummary { - SnapshotSummary { - id: record.id, - document_id: record.document_id, - label: record.label, - notes: record.notes, - kind: record.kind, - created_at: record.created_at, - created_by: record.created_by, - byte_size: record.byte_size, - content_hash: record.content_hash, - } -} - -fn snapshot_diff_side_response_from(side: SnapshotDiffSideDto) -> SnapshotDiffSideResponse { - match side { - SnapshotDiffSideDto::Current { markdown } => SnapshotDiffSideResponse { - kind: SnapshotDiffKind::Current, - markdown, - snapshot: None, - }, - SnapshotDiffSideDto::Snapshot { snapshot, markdown } => SnapshotDiffSideResponse { - kind: SnapshotDiffKind::Snapshot, - markdown, - snapshot: Some(snapshot_summary_from(snapshot)), - }, - } -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct CreateDocumentRequest { - pub title: Option, - pub parent_id: Option, - pub r#type: Option, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct UpdateDocumentRequest { - pub title: Option, - #[serde(default, deserialize_with = "deserialize_double_option")] - #[schema(value_type = Option)] - pub parent_id: DoubleOption, -} - -impl Default for UpdateDocumentRequest { - fn default() -> Self { - Self { - title: None, - parent_id: DoubleOption::NotProvided, - } - } -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct DuplicateDocumentRequest { - pub title: Option, - #[serde(default, deserialize_with = "deserialize_double_option")] - #[schema(value_type = Option)] - pub parent_id: DoubleOption, -} - -impl Default for DuplicateDocumentRequest { - fn default() -> Self { - Self { - title: None, - parent_id: DoubleOption::NotProvided, - } - } -} - -#[derive(Debug, Clone)] -pub enum DoubleOption { - NotProvided, - Null, - Some(T), -} - -fn deserialize_double_option<'de, D, T>(deserializer: D) -> Result, D::Error> -where - D: serde::Deserializer<'de>, - T: serde::Deserialize<'de>, -{ - Option::::deserialize(deserializer).map(|opt| match opt { - None => DoubleOption::Null, - Some(value) => DoubleOption::Some(value), - }) -} - -impl Default for DoubleOption { - fn default() -> Self { - DoubleOption::NotProvided - } -} - -// Uses AppContext as router state - -#[derive(Debug, Deserialize)] -pub struct ListDocumentsQuery { - pub query: Option, - pub tag: Option, - #[serde(default)] - pub state: Option, -} - -#[derive(Debug, Clone, Copy, Deserialize, ToSchema)] -#[serde(rename_all = "lowercase")] -pub enum DocumentStateFilter { - Active, - Archived, - All, -} - -impl From for DocumentListFilter { - fn from(value: DocumentStateFilter) -> Self { - match value { - DocumentStateFilter::Active => DocumentListFilter::Active, - DocumentStateFilter::Archived => DocumentListFilter::Archived, - DocumentStateFilter::All => DocumentListFilter::All, - } - } -} - -#[utoipa::path(get, path = "/api/documents", tag = "Documents", - params( - ("query" = Option, Query, description = "Search query"), - ("tag" = Option, Query, description = "Filter by tag"), - ("state" = Option, Query, description = "Filter by document state (active|archived|all)") - ), - responses((status = 200, body = DocumentListResponse)))] -pub async fn list_documents( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - q: Option>, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_DOC_VIEW) - .await?; - let (qstr, tag, state_param) = q - .map(|Query(v)| (v.query, v.tag, v.state)) - .unwrap_or((None, None, None)); - let state = state_param - .map(DocumentStateFilter::into) - .unwrap_or_default(); - - let service = ctx.document_service(); - let docs = service - .list_for_user(workspace_id, qstr, tag, state) - .await - .map_err(map_service_error)?; - - let items: Vec = docs.into_iter().map(to_http_document).collect(); - Ok(Json(DocumentListResponse { items })) -} - -#[utoipa::path(post, path = "/api/documents", tag = "Documents", request_body = CreateDocumentRequest, responses((status = 200, body = Document)))] -pub async fn create_document( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(req): Json, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let title = req.title.unwrap_or_else(|| "Untitled".into()); - let dtype = req.r#type.unwrap_or_else(|| "document".into()); - let service = ctx.document_service(); - let doc = service - .create_for_user( - workspace_id, - user_id, - &permissions, - &title, - req.parent_id, - &dtype, - None, - ) - .await - .map_err(map_service_error)?; - - Ok(Json(to_http_document(doc))) -} - -#[utoipa::path(get, path = "/api/documents/{id}", tag = "Documents", - params(("id" = Uuid, Path, description = "Document ID"), ("token" = Option, Query, description = "Share token (optional)")), - responses((status = 200, body = Document)))] -pub async fn get_document( - State(ctx): State, - bearer: Option, - Query(params): Query>, - Path(id): Path, -) -> Result, StatusCode> { - let token = params.get("token").map(|s| s.as_str()); - let actor = auth::resolve_actor_from_parts(&ctx, bearer, token) - .await - .ok_or(StatusCode::UNAUTHORIZED)?; - let service = ctx.document_service(); - let doc = service - .get_for_actor(&actor, id) - .await - .map_err(map_service_error)?; - - Ok(Json(to_http_document(doc))) -} - -#[utoipa::path(delete, path = "/api/documents/{id}", tag = "Documents", params(("id" = Uuid, Path, description = "Document ID"),), responses((status = 204)))] -pub async fn delete_document( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(id): Path, -) -> Result { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let service = ctx.document_service(); - let ok = service - .delete_for_user(workspace_id, id, Some(user_id), &permissions) - .await - .map_err(map_service_error)?; - if ok { - Ok(StatusCode::NO_CONTENT) - } else { - Err(StatusCode::NOT_FOUND) - } -} - -#[utoipa::path(get, path = "/api/documents/{id}/content", tag = "Documents", params(("id" = Uuid, Path, description = "Document ID"),), responses((status = 200)))] -pub async fn get_document_content( - State(ctx): State, - bearer: Bearer, - Path(id): Path, -) -> Result, StatusCode> { - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let actor = access::Actor::User(user_id); - let service = ctx.document_service(); - let content = service - .get_content(&actor, id) - .await - .map_err(map_service_error)?; - Ok(Json(serde_json::json!({"content": content}))) -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct UpdateDocumentContentRequest { - pub content: String, -} - -#[derive(Debug, Deserialize, ToSchema)] -#[serde(tag = "op", rename_all = "snake_case")] -pub enum DocumentPatchOperationRequest { - Insert { - offset: usize, - text: String, - }, - Delete { - offset: usize, - length: usize, - }, - Replace { - offset: usize, - length: usize, - text: String, - }, -} - -impl From for DocumentPatchOperation { - fn from(value: DocumentPatchOperationRequest) -> Self { - match value { - DocumentPatchOperationRequest::Insert { offset, text } => { - DocumentPatchOperation::Insert { offset, text } - } - DocumentPatchOperationRequest::Delete { offset, length } => { - DocumentPatchOperation::Delete { offset, length } - } - DocumentPatchOperationRequest::Replace { - offset, - length, - text, - } => DocumentPatchOperation::Replace { - offset, - length, - text, - }, - } - } -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct PatchDocumentContentRequest { - pub operations: Vec, -} - -#[utoipa::path( - put, - path = "/api/documents/{id}/content", - tag = "Documents", - params( - ("id" = Uuid, Path, description = "Document ID"), - ("token" = Option, Query, description = "Share token (optional)") - ), - request_body = UpdateDocumentContentRequest, - responses((status = 200, body = Document)) -)] -pub async fn update_document_content( - State(ctx): State, - bearer: Option, - Path(id): Path, - q: Option>, - Json(body): Json, -) -> Result, StatusCode> { - let params = q.map(|Query(v)| v).unwrap_or_default(); - let token = params.token.as_deref(); - let actor = auth::resolve_actor_from_parts(&ctx, bearer, token) - .await - .ok_or(StatusCode::UNAUTHORIZED)?; - let service = ctx.document_service(); - let updated = service - .update_content(&actor, id, &body.content) - .await - .map_err(map_service_error)?; - Ok(Json(to_http_document(updated))) -} - -#[utoipa::path( - patch, - path = "/api/documents/{id}/content", - tag = "Documents", - params( - ("id" = Uuid, Path, description = "Document ID"), - ("token" = Option, Query, description = "Share token (optional)") - ), - request_body = PatchDocumentContentRequest, - responses((status = 200, body = Document)) -)] -pub async fn patch_document_content( - State(ctx): State, - bearer: Option, - Path(id): Path, - q: Option>, - Json(body): Json, -) -> Result, StatusCode> { - if body.operations.is_empty() { - return Err(StatusCode::BAD_REQUEST); - } - let params = q.map(|Query(v)| v).unwrap_or_default(); - let token = params.token.as_deref(); - let actor = auth::resolve_actor_from_parts(&ctx, bearer, token) - .await - .ok_or(StatusCode::UNAUTHORIZED)?; - let service = ctx.document_service(); - let operations: Vec = body - .operations - .into_iter() - .map(DocumentPatchOperation::from) - .collect(); - let updated = service - .patch_content(&actor, id, &operations) - .await - .map_err(map_service_error)?; - Ok(Json(to_http_document(updated))) -} - -#[allow(dead_code)] -#[derive(ToSchema)] -pub struct DocumentDownloadBinary(#[schema(value_type = String, format = Binary)] Vec); - -#[allow(dead_code)] -#[derive(ToSchema)] -pub struct DocumentArchiveBinary(#[schema(value_type = String, format = Binary)] Vec); - -#[derive(Debug, Clone, Copy, Deserialize, ToSchema, Default)] -#[serde(rename_all = "snake_case")] -#[schema(rename_all = "snake_case")] -pub enum DownloadFormat { - #[default] - Archive, - Markdown, - Html, - Html5, - Pdf, - Docx, - Latex, - Beamer, - Context, - Man, - Mediawiki, - Dokuwiki, - Textile, - Org, - Texinfo, - Opml, - Docbook, - Opendocument, - Odt, - Rtf, - Epub, - Epub3, - Fb2, - Asciidoc, - Icml, - Slidy, - Slideous, - Dzslides, - Revealjs, - S5, - Json, - Plain, - Commonmark, - CommonmarkX, - MarkdownStrict, - MarkdownPhpextra, - MarkdownGithub, - Rst, - Native, - Haddock, -} - -impl From for DocumentDownloadFormat { - fn from(value: DownloadFormat) -> Self { - match value { - DownloadFormat::Archive => DocumentDownloadFormat::Archive, - DownloadFormat::Markdown => DocumentDownloadFormat::Markdown, - DownloadFormat::Html => DocumentDownloadFormat::Html, - DownloadFormat::Html5 => DocumentDownloadFormat::Html5, - DownloadFormat::Pdf => DocumentDownloadFormat::Pdf, - DownloadFormat::Docx => DocumentDownloadFormat::Docx, - DownloadFormat::Latex => DocumentDownloadFormat::Latex, - DownloadFormat::Beamer => DocumentDownloadFormat::Beamer, - DownloadFormat::Context => DocumentDownloadFormat::Context, - DownloadFormat::Man => DocumentDownloadFormat::Man, - DownloadFormat::Mediawiki => DocumentDownloadFormat::MediaWiki, - DownloadFormat::Dokuwiki => DocumentDownloadFormat::Dokuwiki, - DownloadFormat::Textile => DocumentDownloadFormat::Textile, - DownloadFormat::Org => DocumentDownloadFormat::Org, - DownloadFormat::Texinfo => DocumentDownloadFormat::Texinfo, - DownloadFormat::Opml => DocumentDownloadFormat::Opml, - DownloadFormat::Docbook => DocumentDownloadFormat::Docbook, - DownloadFormat::Opendocument => DocumentDownloadFormat::OpenDocument, - DownloadFormat::Odt => DocumentDownloadFormat::Odt, - DownloadFormat::Rtf => DocumentDownloadFormat::Rtf, - DownloadFormat::Epub => DocumentDownloadFormat::Epub, - DownloadFormat::Epub3 => DocumentDownloadFormat::Epub3, - DownloadFormat::Fb2 => DocumentDownloadFormat::Fb2, - DownloadFormat::Asciidoc => DocumentDownloadFormat::Asciidoc, - DownloadFormat::Icml => DocumentDownloadFormat::Icml, - DownloadFormat::Slidy => DocumentDownloadFormat::Slidy, - DownloadFormat::Slideous => DocumentDownloadFormat::Slideous, - DownloadFormat::Dzslides => DocumentDownloadFormat::Dzslides, - DownloadFormat::Revealjs => DocumentDownloadFormat::Revealjs, - DownloadFormat::S5 => DocumentDownloadFormat::S5, - DownloadFormat::Json => DocumentDownloadFormat::Json, - DownloadFormat::Plain => DocumentDownloadFormat::Plain, - DownloadFormat::Commonmark => DocumentDownloadFormat::Commonmark, - DownloadFormat::CommonmarkX => DocumentDownloadFormat::CommonmarkX, - DownloadFormat::MarkdownStrict => DocumentDownloadFormat::MarkdownStrict, - DownloadFormat::MarkdownPhpextra => DocumentDownloadFormat::MarkdownPhpextra, - DownloadFormat::MarkdownGithub => DocumentDownloadFormat::MarkdownGithub, - DownloadFormat::Rst => DocumentDownloadFormat::Rst, - DownloadFormat::Native => DocumentDownloadFormat::Native, - DownloadFormat::Haddock => DocumentDownloadFormat::Haddock, - } - } -} - -#[derive(Debug, Deserialize, ToSchema, Default)] -pub struct DownloadDocumentQuery { - pub token: Option, - #[serde(default)] - pub format: DownloadFormat, -} - -#[utoipa::path( - get, - path = "/api/documents/{id}/download", - tag = "Documents", - operation_id = "download_document", - params( - ("id" = Uuid, Path, description = "Document ID"), - ("token" = Option, Query, description = "Share token (optional)"), - ("format" = Option, Query, description = "Download format (see schema for supported values)") - ), - responses( - (status = 200, description = "Document download", body = DocumentDownloadBinary, content_type = "application/octet-stream"), - (status = 401, description = "Unauthorized"), - (status = 404, description = "Document not found") - ) -)] -pub async fn download_document( - State(ctx): State, - bearer: Option, - Query(params): Query, - Path(id): Path, -) -> Result)> { - let token = params.token.as_deref(); - let format = params.format; - let error_response = |status: StatusCode, code: &str, message: String| { - ( - status, - Json(json!({ - "error": code, - "message": message, - })), - ) - }; - - let actor = match auth::resolve_actor_from_parts(&ctx, bearer, token).await { - Some(actor) => actor, - None => { - return Err(error_response( - StatusCode::UNAUTHORIZED, - "unauthorized", - "Unauthorized".to_string(), - )); - } - }; - - let service = ctx.document_service(); - let download = match service.download_document(&actor, id, format.into()).await { - Ok(payload) => payload, - Err(ServiceError::Unauthorized) - | Err(ServiceError::TokenExpired) - | Err(ServiceError::Forbidden) - | Err(ServiceError::NotFound) => { - return Err(error_response( - StatusCode::NOT_FOUND, - "not_found", - "Document not found".to_string(), - )); - } - Err(ServiceError::Conflict) => { - return Err(error_response( - StatusCode::CONFLICT, - "conflict", - "Document cannot be downloaded".to_string(), - )); - } - Err(ServiceError::BadRequest(_)) => { - return Err(error_response( - StatusCode::BAD_REQUEST, - "bad_request", - "Invalid download request".to_string(), - )); - } - Err(ServiceError::Unexpected(error)) => { - error!( - document_id = %id, - ?format, - error = ?error, - "document_download_failed" - ); - return Err(error_response( - StatusCode::INTERNAL_SERVER_ERROR, - "internal", - "Failed to prepare download".to_string(), - )); - } - }; - - let mut headers = HeaderMap::new(); - let content_type = match HeaderValue::from_str(&download.content_type) { - Ok(value) => value, - Err(_) => { - return Err(error_response( - StatusCode::INTERNAL_SERVER_ERROR, - "invalid_header", - "Failed to prepare download headers".to_string(), - )); - } - }; - headers.insert(axum::http::header::CONTENT_TYPE, content_type); - headers.insert( - axum::http::header::HeaderName::from_static("x-content-type-options"), - HeaderValue::from_static("nosniff"), - ); - let disposition = format!("attachment; filename=\"{}\"", download.filename); - let content_disposition = match HeaderValue::from_str(&disposition) { - Ok(value) => value, - Err(_) => { - return Err(error_response( - StatusCode::INTERNAL_SERVER_ERROR, - "invalid_header", - "Failed to prepare download headers".to_string(), - )); - } - }; - headers.insert(axum::http::header::CONTENT_DISPOSITION, content_disposition); - - Ok((headers, download.bytes).into_response()) -} - -#[utoipa::path(patch, path = "/api/documents/{id}", tag = "Documents", request_body = UpdateDocumentRequest, - params(("id" = Uuid, Path, description = "Document ID"),), responses((status = 200, body = Document)))] -pub async fn update_document( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(id): Path, - Json(req): Json, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let parent_opt = match req.parent_id.clone() { - DoubleOption::NotProvided => None, - DoubleOption::Null => Some(None), - DoubleOption::Some(v) => Some(Some(v)), - }; - let service = ctx.document_service(); - let doc = service - .update_metadata( - workspace_id, - id, - user_id, - &permissions, - req.title.clone(), - parent_opt, - ) - .await - .map_err(map_service_error)?; - Ok(Json(to_http_document(doc))) -} - -#[utoipa::path( - post, - path = "/api/documents/{id}/duplicate", - tag = "Documents", - request_body = DuplicateDocumentRequest, - params(("id" = Uuid, Path, description = "Document ID"),), - responses((status = 200, body = Document)) -)] -pub async fn duplicate_document( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(id): Path, - Json(req): Json, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let parent_opt = match req.parent_id.clone() { - DoubleOption::NotProvided => None, - DoubleOption::Null => Some(None), - DoubleOption::Some(v) => Some(Some(v)), - }; - let doc = ctx - .document_service() - .duplicate_document( - workspace_id, - id, - user_id, - &permissions, - req.title.clone(), - parent_opt, - ) - .await - .map_err(map_service_error)?; - Ok(Json(to_http_document(doc))) -} - -#[utoipa::path( - post, - path = "/api/documents/{id}/archive", - tag = "Documents", - params(("id" = Uuid, Path, description = "Document ID")), - responses( - (status = 200, body = Document), - (status = 404, description = "Document not found"), - (status = 409, description = "Document already archived") - ) -)] -pub async fn archive_document( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(id): Path, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let doc = ctx - .document_service() - .archive_document(workspace_id, id, user_id, &permissions) - .await - .map_err(map_service_error)?; - Ok(Json(to_http_document(doc))) -} - -#[utoipa::path( - post, - path = "/api/documents/{id}/unarchive", - tag = "Documents", - params(("id" = Uuid, Path, description = "Document ID")), - responses( - (status = 200, body = Document), - (status = 404, description = "Document not found"), - (status = 409, description = "Document is not archived") - ) -)] -pub async fn unarchive_document( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(id): Path, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let doc = ctx - .document_service() - .unarchive_document(workspace_id, id, user_id, &permissions) - .await - .map_err(map_service_error)?; - Ok(Json(to_http_document(doc))) -} - -#[utoipa::path( - get, - path = "/api/documents/{id}/snapshots", - tag = "Documents", - params( - ("id" = Uuid, Path, description = "Document ID"), - ("token" = Option, Query, description = "Share token (optional)"), - ("limit" = Option, Query, description = "Maximum number of snapshots to return"), - ("offset" = Option, Query, description = "Offset for pagination") - ), - responses((status = 200, body = SnapshotListResponse)) -)] -pub async fn list_document_snapshots( - State(ctx): State, - bearer: Option, - Path(id): Path, - q: Option>, -) -> Result, StatusCode> { - let params = q.map(|Query(v)| v).unwrap_or_default(); - let token = params.token.as_deref(); - let actor = auth::resolve_actor_from_parts(&ctx, bearer, token) - .await - .ok_or(StatusCode::UNAUTHORIZED)?; - - let limit = params.limit.unwrap_or(50).clamp(1, 200); - let offset = params.offset.unwrap_or(0).max(0); - - let service = ctx.document_service(); - let records = service - .list_snapshots(&actor, id, limit, offset) - .await - .map_err(map_service_error)?; - let items = records.into_iter().map(snapshot_summary_from).collect(); - - Ok(Json(SnapshotListResponse { items })) -} - -#[utoipa::path( - get, - path = "/api/documents/{id}/snapshots/{snapshot_id}/diff", - tag = "Documents", - params( - ("id" = Uuid, Path, description = "Document ID"), - ("snapshot_id" = Uuid, Path, description = "Snapshot ID"), - ("token" = Option, Query, description = "Share token (optional)"), - ("compare" = Option, Query, description = "Snapshot ID to compare against (defaults to current document state)"), - ("base" = Option, Query, description = "Base comparison to use when compare is not provided (auto|current|previous)") - ), - responses((status = 200, body = SnapshotDiffResponse)) -)] -pub async fn get_document_snapshot_diff( - State(ctx): State, - bearer: Option, - Path((id, snapshot_id)): Path<(Uuid, Uuid)>, - q: Option>, -) -> Result, StatusCode> { - let params = q.map(|Query(v)| v).unwrap_or_default(); - let token = params.token.as_deref(); - let actor = auth::resolve_actor_from_parts(&ctx, bearer, token) - .await - .ok_or(StatusCode::UNAUTHORIZED)?; - - let base_mode = params - .base - .map(SnapshotDiffBaseMode::from) - .unwrap_or(SnapshotDiffBaseMode::Auto); - - let service = ctx.document_service(); - let result = service - .snapshot_diff(&actor, id, snapshot_id, params.compare, base_mode) - .await - .map_err(map_service_error)?; - - let diff = result.diff; - let base = snapshot_diff_side_response_from(result.base); - let target = snapshot_diff_side_response_from(result.target); - - Ok(Json(SnapshotDiffResponse { base, target, diff })) -} - -#[utoipa::path( - post, - path = "/api/documents/{id}/snapshots/{snapshot_id}/restore", - tag = "Documents", - params( - ("id" = Uuid, Path, description = "Document ID"), - ("snapshot_id" = Uuid, Path, description = "Snapshot ID"), - ("token" = Option, Query, description = "Share token (optional)") - ), - responses((status = 200, body = SnapshotRestoreResponse)) -)] -pub async fn restore_document_snapshot( - State(ctx): State, - bearer: Option, - Path((id, snapshot_id)): Path<(Uuid, Uuid)>, - q: Option>, -) -> Result, StatusCode> { - let params = q.map(|Query(v)| v).unwrap_or_default(); - let token = params.token.as_deref(); - let actor = auth::resolve_actor_from_parts(&ctx, bearer, token) - .await - .ok_or(StatusCode::UNAUTHORIZED)?; - - let service = ctx.document_service(); - let restored = service - .restore_snapshot(&actor, id, snapshot_id) - .await - .map_err(map_service_error)?; - - Ok(Json(SnapshotRestoreResponse { - snapshot: snapshot_summary_from(restored), - })) -} - -#[utoipa::path( - get, - path = "/api/documents/{id}/snapshots/{snapshot_id}/download", - tag = "Documents", - params( - ("id" = Uuid, Path, description = "Document ID"), - ("snapshot_id" = Uuid, Path, description = "Snapshot ID"), - ("token" = Option, Query, description = "Share token (optional)") - ), - responses( - (status = 200, description = "Snapshot archive", body = DocumentArchiveBinary, content_type = "application/zip"), - (status = 401, description = "Unauthorized"), - (status = 404, description = "Snapshot not found") - ) -)] -pub async fn download_document_snapshot( - State(ctx): State, - bearer: Option, - Path((id, snapshot_id)): Path<(Uuid, Uuid)>, - q: Option>, -) -> Result { - let params = q.map(|Query(v)| v).unwrap_or_default(); - let token = params.token.as_deref(); - let actor = auth::resolve_actor_from_parts(&ctx, bearer, token) - .await - .ok_or(StatusCode::UNAUTHORIZED)?; - - let service = ctx.document_service(); - let download = service - .download_snapshot(&actor, id, snapshot_id) - .await - .map_err(map_service_error)?; - - let mut headers = HeaderMap::new(); - headers.insert( - axum::http::header::CONTENT_TYPE, - HeaderValue::from_static("application/zip"), - ); - let disposition = format!("attachment; filename=\"{}\"", download.filename); - let content_disposition = - HeaderValue::from_str(&disposition).map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; - headers.insert(axum::http::header::CONTENT_DISPOSITION, content_disposition); - - Ok((headers, download.bytes).into_response()) -} - -pub fn routes(ctx: AppContext) -> Router { - Router::new() - .route("/documents", get(list_documents).post(create_document)) - .route( - "/documents/:id", - get(get_document) - .delete(delete_document) - .patch(update_document), - ) - .route( - "/documents/:id/content", - get(get_document_content) - .put(update_document_content) - .patch(patch_document_content), - ) - .route("/documents/:id/duplicate", post(duplicate_document)) - .route("/documents/:id/archive", post(archive_document)) - .route("/documents/:id/unarchive", post(unarchive_document)) - .route("/documents/:id/snapshots", get(list_document_snapshots)) - .route( - "/documents/:id/snapshots/:snapshot_id/diff", - get(get_document_snapshot_diff), - ) - .route( - "/documents/:id/snapshots/:snapshot_id/restore", - post(restore_document_snapshot), - ) - .route( - "/documents/:id/snapshots/:snapshot_id/download", - get(download_document_snapshot), - ) - .route("/documents/:id/download", get(download_document)) - .route("/documents/:id/backlinks", get(get_backlinks)) - .route("/documents/:id/links", get(get_outgoing_links)) - .route("/documents/search", get(search_documents)) - .with_state(ctx) -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct SearchResult { - pub id: Uuid, - pub title: String, - pub document_type: String, - pub path: Option, - pub updated_at: chrono::DateTime, -} - -#[derive(Debug, Deserialize)] -pub struct SearchQuery { - pub q: Option, -} - -#[derive(Debug, Default, Deserialize)] -pub struct ListSnapshotsQuery { - pub token: Option, - pub limit: Option, - pub offset: Option, -} - -#[derive(Debug, Default, Deserialize)] -pub struct SnapshotDiffQuery { - pub token: Option, - pub compare: Option, - #[serde(default)] - pub base: Option, -} - -#[derive(Debug, Default, Deserialize)] -pub struct SnapshotTokenQuery { - pub token: Option, -} - -#[utoipa::path(get, path = "/api/documents/search", tag = "Documents", - params(("q" = Option, Query, description = "Query")), - responses((status = 200, body = [SearchResult])))] -pub async fn search_documents( - State(ctx): State, - bearer: crate::presentation::http::auth::Bearer, - headers: HeaderMap, - q: Option>, -) -> Result>, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_DOC_VIEW) - .await?; - let query_text = q.and_then(|Query(v)| v.q); - - let service = ctx.document_service(); - let hits = service - .search_for_user(workspace_id, query_text, 20) - .await - .map_err(map_service_error)?; - let items = hits - .into_iter() - .map(|h| SearchResult { - id: h.id, - title: h.title, - document_type: h.doc_type, - path: h.path, - updated_at: h.updated_at, - }) - .collect(); - Ok(Json(items)) -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct BacklinkInfo { - pub document_id: String, - pub title: String, - pub document_type: String, - pub file_path: Option, - pub link_type: String, - pub link_text: Option, - pub link_count: i64, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct BacklinksResponse { - pub backlinks: Vec, - pub total_count: usize, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct OutgoingLink { - pub document_id: String, - pub title: String, - pub document_type: String, - pub file_path: Option, - pub link_type: String, - pub link_text: Option, - pub position_start: Option, - pub position_end: Option, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct OutgoingLinksResponse { - pub links: Vec, - pub total_count: usize, -} - -#[utoipa::path(get, path = "/api/documents/{id}/backlinks", tag = "Documents", operation_id = "getBacklinks", - params(("id" = Uuid, Path, description = "Document ID")), - responses((status = 200, body = BacklinksResponse)))] -pub async fn get_backlinks( - State(ctx): State, - bearer: crate::presentation::http::auth::Bearer, - headers: HeaderMap, - Path(id): Path, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_DOC_VIEW) - .await?; - let actor = access::Actor::User(user_id); - let service = ctx.document_service(); - let items = service - .backlinks(&actor, workspace_id, id) - .await - .map_err(map_service_error)?; - let backlinks: Vec = items - .into_iter() - .map(|r| BacklinkInfo { - document_id: r.document_id.to_string(), - title: r.title, - document_type: r.document_type, - file_path: r.file_path, - link_type: r.link_type, - link_text: r.link_text, - link_count: r.link_count, - }) - .collect(); - Ok(Json(BacklinksResponse { - total_count: backlinks.len(), - backlinks, - })) -} - -#[utoipa::path(get, path = "/api/documents/{id}/links", tag = "Documents", operation_id = "getOutgoingLinks", - params(("id" = Uuid, Path, description = "Document ID")), - responses((status = 200, body = OutgoingLinksResponse)))] -pub async fn get_outgoing_links( - State(ctx): State, - bearer: crate::presentation::http::auth::Bearer, - headers: HeaderMap, - Path(id): Path, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_DOC_VIEW) - .await?; - let actor = access::Actor::User(user_id); - let service = ctx.document_service(); - let items = service - .outgoing_links(&actor, workspace_id, id) - .await - .map_err(map_service_error)?; - let links = items - .into_iter() - .map(|r| OutgoingLink { - document_id: r.document_id.to_string(), - title: r.title, - document_type: r.document_type, - file_path: r.file_path, - link_type: r.link_type, - link_text: r.link_text, - position_start: r.position_start, - position_end: r.position_end, - }) - .collect::>(); - - Ok(Json(OutgoingLinksResponse { - total_count: links.len(), - links, - })) -} diff --git a/api/src/presentation/http/files.rs b/api/src/presentation/http/files.rs deleted file mode 100644 index 41d41135..00000000 --- a/api/src/presentation/http/files.rs +++ /dev/null @@ -1,307 +0,0 @@ -use axum::{ - Json, Router, - extract::{Multipart, Path as AxumPath, Query, State}, - http::{HeaderMap, HeaderValue, StatusCode}, - response::{IntoResponse, Response}, - routing::{get, post}, -}; -use serde::{Deserialize, Serialize}; -use utoipa::ToSchema; -use uuid::Uuid; - -use crate::application::access; -use crate::application::services::errors::ServiceError; -use crate::application::services::files::FilePayload; -use crate::domain::workspaces::permissions::{PERM_DOC_VIEW, PERM_FILE_UPLOAD}; -use crate::presentation::context::AppContext; -use crate::presentation::http::auth::{self, Bearer}; -use crate::presentation::http::workspace_scope; - -// Uses AppContext as router state - -#[derive(Debug, Serialize, ToSchema)] -pub struct UploadFileResponse { - pub id: Uuid, - pub url: String, - pub filename: String, - pub content_type: Option, - pub size: i64, -} - -fn map_file_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(inner) => { - tracing::error!(error = ?inner, "file_service_error"); - StatusCode::INTERNAL_SERVER_ERROR - } - } -} - -fn file_payload_response(payload: FilePayload) -> Response { - let mut headers = HeaderMap::new(); - if let Some(ct) = payload.content_type { - headers.insert( - axum::http::header::CONTENT_TYPE, - HeaderValue::from_str(&ct) - .unwrap_or(HeaderValue::from_static("application/octet-stream")), - ); - } - headers.insert( - axum::http::header::HeaderName::from_static("x-content-type-options"), - HeaderValue::from_static("nosniff"), - ); - (headers, payload.bytes).into_response() -} - -#[derive(ToSchema)] -#[allow(dead_code)] -pub struct UploadFileMultipart { - /// File to upload - #[schema(value_type = String, format = Binary)] - file: String, - /// Target document ID - #[schema(value_type = String, format = Uuid)] - document_id: String, -} - -/// POST /api/files (multipart/form-data) -/// Fields: -/// - file: binary file (required) -/// - document_id: uuid (required by current schema) -#[utoipa::path( - post, - path = "/api/files", - tag = "Files", - request_body( - content = UploadFileMultipart, - content_type = "multipart/form-data", - ), - responses( - (status = 201, description = "File uploaded", body = UploadFileResponse) - ) -)] -pub async fn upload_file( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - mut multipart: Multipart, -) -> Result, StatusCode> { - // Validate user via bearer - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_FILE_UPLOAD) - .await?; - - let mut document_id: Option = None; - let mut file_bytes: Option> = None; - let mut orig_filename: Option = None; - let mut content_type: Option = None; - - while let Some(field) = multipart - .next_field() - .await - .map_err(|_| StatusCode::BAD_REQUEST)? - { - let name = field.name().map(|s| s.to_string()); - let file_name = field.file_name().map(|s| s.to_string()); - let ct = field.content_type().map(|s| s.to_string()); - match name.as_deref() { - Some("document_id") => { - let t = field.text().await.map_err(|_| StatusCode::BAD_REQUEST)?; - document_id = Uuid::parse_str(t.trim()).ok(); - } - Some("file") => { - // Read file field (allow any extension/MIME; size limit enforced below) - orig_filename = file_name.clone(); - content_type = ct.clone(); - let data = field.bytes().await.map_err(|_| StatusCode::BAD_REQUEST)?; - // Enforce configured max upload size (additional safety besides DefaultBodyLimit) - if data.len() > ctx.cfg.upload_max_bytes { - return Err(StatusCode::PAYLOAD_TOO_LARGE); - } - file_bytes = Some(data.to_vec()); - } - _ => { /* ignore additional fields */ } - } - } - - let doc_id = document_id.ok_or(StatusCode::BAD_REQUEST)?; - let bytes = file_bytes.ok_or(StatusCode::BAD_REQUEST)?; - - let public_base_url = ctx.cfg.public_base_url.clone(); - let file_service = ctx.file_service(); - let f = file_service - .upload_file( - workspace_id, - user_id, - doc_id, - bytes, - orig_filename, - content_type.clone(), - public_base_url, - ) - .await - .map_err(map_file_error)?; - Ok(Json(UploadFileResponse { - id: f.id, - url: f.url, - filename: f.filename, - content_type: f.content_type, - size: f.size, - })) -} - -/// GET /api/files/{id} -> bytes (fallback; primary is /uploads/{filename}) -#[utoipa::path( - get, - path = "/api/files/{id}", - tag = "Files", - params(("id" = Uuid, Path, description = "File ID")), - responses((status = 200, description = "OK", body = Vec, content_type = "application/octet-stream")) -)] -pub async fn get_file( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - AxumPath(id): AxumPath, -) -> Result { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_DOC_VIEW) - .await?; - let payload = ctx - .file_service() - .download_owned_file(workspace_id, id) - .await - .map_err(map_file_error)?; - Ok(file_payload_response(payload)) -} - -#[derive(Debug, Deserialize)] -pub struct FileByNameQuery { - pub document_id: Uuid, -} - -/// GET /api/files/documents/{filename}?document_id=uuid -> bytes -#[utoipa::path( - get, - path = "/api/files/documents/{filename}", - tag = "Files", - params(("filename" = String, Path, description = "File name"), ("document_id" = Uuid, Query, description = "Document ID")), - responses((status = 200, description = "OK", body = Vec, content_type = "application/octet-stream")) -)] -pub async fn get_file_by_name( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - AxumPath(filename): AxumPath, - Query(q): Query, -) -> Result { - // auth: owner of the document only - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_DOC_VIEW) - .await?; - - let actor = access::Actor::User(user_id); - let payload = ctx - .file_service() - .get_file_by_name(&actor, q.document_id, &filename) - .await - .map_err(map_file_error)?; - Ok(file_payload_response(payload)) -} - -/// Serve static files from uploads directory with authentication support -/// Supports both JWT auth and share tokens -pub async fn serve_upload( - State(ctx): State, - AxumPath(path): AxumPath, - Query(params): Query>, - headers: HeaderMap, -) -> Result { - // Accept share token from query, or JWT/share token from Authorization header / cookie. - let share_token = params.get("token").cloned(); - let bearer_token = headers - .get(axum::http::header::AUTHORIZATION) - .and_then(|h| h.to_str().ok()) - .and_then(|s| s.strip_prefix("Bearer ").map(|s| s.to_string())) - .or_else(|| { - headers - .get(axum::http::header::COOKIE) - .and_then(|h| h.to_str().ok()) - .and_then(|cookie_hdr| { - for part in cookie_hdr.split(';') { - let kv = part.trim(); - if let Some((k, v)) = kv.split_once('=') { - if k.trim() == "access_token" { - return Some(v.trim().to_string()); - } - } - } - None - }) - }); - let bearer = bearer_token.clone().map(Bearer); - - // Path must start with document UUID. If not, reject. - let parts: Vec<&str> = path.split('/').collect(); - if parts.len() < 2 { - return Err(StatusCode::FORBIDDEN); - } - let doc_id = Uuid::parse_str(parts[0]).map_err(|_| StatusCode::FORBIDDEN)?; - - // Build actor and require at least view capability (or public) - let mut actor = auth::resolve_actor_from_parts(&ctx, bearer, share_token.as_deref()).await; - if actor.is_none() { - if let Some(token_str) = bearer_token { - actor = auth::resolve_actor_from_token_str(&ctx, &token_str).await; - } - } - let actor = actor.unwrap_or(access::Actor::Public); - let attachment_path = parts[1..].join("/"); - let payload = ctx - .file_service() - .serve_upload(&actor, doc_id, &attachment_path) - .await - .map_err(map_file_error)?; - - Ok(file_payload_response(payload)) -} - -pub fn routes(ctx: AppContext) -> Router { - Router::new() - .route("/files", post(upload_file)) - .route("/files/:id", get(get_file)) - .route("/files/documents/:filename", get(get_file_by_name)) - .with_state(ctx) -} diff --git a/api/src/presentation/http/git.rs b/api/src/presentation/http/git.rs deleted file mode 100644 index 3782ad68..00000000 --- a/api/src/presentation/http/git.rs +++ /dev/null @@ -1,1319 +0,0 @@ -use axum::{ - Json, Router, - extract::State, - http::{HeaderMap, StatusCode}, - routing::{get, post}, -}; -use serde::{Deserialize, Serialize}; -use utoipa::ToSchema; - -use crate::presentation::http::auth::{Bearer, validate_bearer}; -// Config is no longer needed directly here -use crate::application::dto::diff::TextDiffResult; -use crate::application::dto::git::{ - GitChangeItem as GitChangeDto, GitCommitInfo, GitConfigDto, GitPullRequestDto, - GitPullResolutionDto, GitPullSessionDto, GitStatusDto, GitSyncRequestDto, GitignoreUpdateDto, - UpsertGitConfigInput, -}; -use crate::application::services::errors::ServiceError; -use crate::application::services::git::FinalizePullSessionResult; -use crate::domain::workspaces::permissions::{PERM_GIT_CONFIGURE, PERM_GIT_INIT, PERM_GIT_SYNC}; -use crate::presentation::context::AppContext; -use crate::presentation::http::workspace_scope; -use tracing::error; -use uuid::Uuid; - -// Uses AppContext as router state - -pub fn routes(ctx: AppContext) -> Router { - Router::new() - .route( - "/git/config", - get(get_config) - .post(create_or_update_config) - .delete(delete_config), - ) - .route("/git/status", get(get_status)) - .route("/git/changes", get(get_changes)) - .route("/git/history", get(get_history)) - .route("/git/diff/working", get(get_working_diff)) - .route("/git/diff/commits/:from/:to", get(get_commit_diff)) - .route("/git/sync", post(sync_now)) - .route("/git/import", post(import_repository)) - .route("/git/pull", post(pull_repository)) - .route("/git/pull/start", post(start_pull_session)) - .route("/git/pull/session/:id", get(get_pull_session)) - .route("/git/pull/session/:id/resolve", post(resolve_pull_session)) - .route( - "/git/pull/session/:id/finalize", - post(finalize_pull_session), - ) - .route("/git/init", post(init_repository)) - .route("/git/deinit", post(deinit_repository)) - .route("/git/ignore/doc/:id", post(ignore_document)) - .route("/git/ignore/folder/:id", post(ignore_folder)) - .route( - "/git/gitignore/patterns", - get(get_gitignore_patterns).post(add_gitignore_patterns), - ) - .route("/git/gitignore/check", post(check_path_ignored)) - .with_state(ctx) -} - -fn map_git_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(inner) => { - error!(error = ?inner, "git_service_error"); - StatusCode::INTERNAL_SERVER_ERROR - } - } -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct GitignoreUpdateResponse { - pub added: usize, - pub patterns: Vec, -} - -impl From for GitignoreUpdateResponse { - fn from(value: GitignoreUpdateDto) -> Self { - Self { - added: value.added, - patterns: value.patterns, - } - } -} - -#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] -pub struct GitConfigResponse { - pub id: uuid::Uuid, - pub repository_url: String, - pub branch_name: String, - pub auth_type: String, - pub auto_sync: bool, - pub created_at: chrono::DateTime, - pub updated_at: chrono::DateTime, - pub remote_check: Option, -} - -#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] -pub struct GitRemoteCheckResponse { - pub ok: bool, - pub message: String, - pub reason: Option, -} - -impl From for GitRemoteCheckResponse { - fn from(value: crate::application::dto::git::GitRemoteCheckDto) -> Self { - Self { - ok: value.ok, - message: value.message, - reason: value.reason, - } - } -} - -impl From for GitConfigResponse { - fn from(d: GitConfigDto) -> Self { - GitConfigResponse { - id: d.id, - repository_url: d.repository_url, - branch_name: d.branch_name, - auth_type: d.auth_type, - auto_sync: d.auto_sync, - created_at: d.created_at, - updated_at: d.updated_at, - remote_check: None, - } - } -} - -#[derive(Debug, Serialize, Deserialize, ToSchema)] -pub struct CreateGitConfigRequest { - pub repository_url: String, - pub branch_name: Option, - pub auth_type: String, - pub auth_data: serde_json::Value, - pub auto_sync: Option, -} -impl From for UpsertGitConfigInput { - fn from(r: CreateGitConfigRequest) -> Self { - UpsertGitConfigInput { - repository_url: r.repository_url, - branch_name: r.branch_name, - auth_type: r.auth_type, - auth_data: r.auth_data, - auto_sync: r.auto_sync, - } - } -} - -#[derive(Debug, Serialize, Deserialize, ToSchema)] -pub struct UpdateGitConfigRequest { - pub repository_url: Option, - pub branch_name: Option, - pub auth_type: Option, - pub auth_data: Option, - pub auto_sync: Option, -} - -#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] -pub struct GitPullResolution { - pub path: String, - pub choice: String, - pub content: Option, -} - -#[derive(Debug, Serialize, Deserialize, ToSchema)] -pub struct GitPullRequest { - pub resolutions: Option>, -} - -#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] -pub struct GitPullConflictItem { - pub path: String, - pub is_binary: bool, - pub ours: Option, - pub theirs: Option, - pub base: Option, - pub document_id: Option, -} - -impl From for GitPullConflictItem { - fn from(value: crate::application::dto::git::GitPullConflictItemDto) -> Self { - Self { - path: value.path, - is_binary: value.is_binary, - ours: value.ours, - theirs: value.theirs, - base: value.base, - document_id: value.document_id, - } - } -} - -#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] -pub struct GitPullResponse { - pub success: bool, - pub message: String, - pub files_changed: i32, - pub commit_hash: Option, - pub conflicts: Option>, - pub git_status: Option, -} - -#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] -pub struct GitImportResponse { - pub success: bool, - pub message: String, - pub files_changed: i32, - pub commit_hash: Option, - pub docs_created: i32, - pub attachments_created: i32, -} - -#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] -pub struct GitPullSessionResponse { - pub session_id: uuid::Uuid, - pub status: String, - pub conflicts: Vec, - pub resolutions: Vec, - pub message: Option, -} - -impl From for GitPullSessionResponse { - fn from(value: GitPullSessionDto) -> Self { - Self { - session_id: value.id, - status: value.status, - conflicts: value.conflicts.into_iter().map(Into::into).collect(), - resolutions: value - .resolutions - .into_iter() - .map(|r| GitPullResolution { - path: r.path, - choice: r.choice, - content: r.content, - }) - .collect(), - message: value.message, - } - } -} - -#[utoipa::path(get, path = "/api/git/config", tag = "Git", responses((status = 200, body = Option)))] -pub async fn get_config( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result>, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_INIT) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_SYNC) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_CONFIGURE) - .await?; - let service = ctx.git_service(); - let resp: Option = service - .get_config(workspace_id) - .await - .map_err(map_git_error)?; - let mut out: Option = resp.map(Into::into); - if let Some(ref mut cfg) = out { - if let Some(check) = service - .check_remote(workspace_id) - .await - .map_err(map_git_error)? - { - cfg.remote_check = Some(check.into()); - } - } - Ok(Json(out)) -} - -#[utoipa::path(post, path = "/api/git/config", tag = "Git", request_body = CreateGitConfigRequest, responses((status = 200, body = GitConfigResponse)))] -pub async fn create_or_update_config( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(req): Json, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_INIT) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_SYNC) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_CONFIGURE) - .await?; - let input: UpsertGitConfigInput = req.into(); - let service = ctx.git_service(); - let resp: GitConfigDto = service - .upsert_config(workspace_id, &input) - .await - .map_err(|err| match err { - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - other => map_git_error(other), - })?; - let mut out: GitConfigResponse = resp.into(); - if let Some(check) = service - .check_remote(workspace_id) - .await - .map_err(map_git_error)? - { - out.remote_check = Some(check.into()); - } - Ok(Json(out)) -} - -#[utoipa::path(delete, path = "/api/git/config", tag = "Git", responses((status = 204, description = "Deleted")))] -pub async fn delete_config( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_SYNC) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_CONFIGURE) - .await?; - let service = ctx.git_service(); - service - .delete_config(workspace_id) - .await - .map_err(map_git_error)?; - Ok(StatusCode::NO_CONTENT) -} - -#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] -pub struct GitStatus { - pub repository_initialized: bool, - pub has_remote: bool, - pub current_branch: Option, - pub uncommitted_changes: u32, - pub untracked_files: u32, - pub last_sync: Option>, - pub last_sync_status: Option, - pub last_sync_message: Option, - pub last_sync_commit_hash: Option, - pub sync_enabled: bool, -} -impl From for GitStatus { - fn from(d: GitStatusDto) -> Self { - GitStatus { - repository_initialized: d.repository_initialized, - has_remote: d.has_remote, - current_branch: d.current_branch, - uncommitted_changes: d.uncommitted_changes, - untracked_files: d.untracked_files, - last_sync: d.last_sync, - last_sync_status: d.last_sync_status, - last_sync_message: d.last_sync_message, - last_sync_commit_hash: d.last_sync_commit_hash, - sync_enabled: d.sync_enabled, - } - } -} - -// Diff models are provided in application::dto::git -// strip_user_prefix moved to application/use_cases/git/helpers - -// compute_doc_patterns_with is provided in use-cases layer; no local definition here - -// compute_doc_patterns: no longer used (use-case handles patterns via shared helper) - -#[utoipa::path(post, path = "/api/git/ignore/doc/{id}", params(("id" = String, Path, description = "Document ID")), tag = "Git", responses((status = 200, description = "OK")))] -pub async fn ignore_document( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - axum::extract::Path(id): axum::extract::Path, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let doc_id = Uuid::parse_str(&id).map_err(|_| StatusCode::BAD_REQUEST)?; - let service = ctx.git_service(); - let res = service - .ignore_document(workspace_id, doc_id) - .await - .map_err(|err| match err { - ServiceError::NotFound => StatusCode::NOT_FOUND, - other => map_git_error(other), - })?; - Ok(Json(res.into())) -} - -#[utoipa::path(post, path = "/api/git/ignore/folder/{id}", params(("id" = String, Path, description = "Folder ID")), tag = "Git", responses((status = 200, description = "OK")))] -pub async fn ignore_folder( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - axum::extract::Path(id): axum::extract::Path, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let folder_id = Uuid::parse_str(&id).map_err(|_| StatusCode::BAD_REQUEST)?; - let service = ctx.git_service(); - let res = service - .ignore_folder(workspace_id, folder_id) - .await - .map_err(|err| match err { - ServiceError::NotFound => StatusCode::NOT_FOUND, - other => map_git_error(other), - })?; - Ok(Json(res.into())) -} - -#[derive(Deserialize, ToSchema)] -pub struct AddPatternsRequest { - pub patterns: Vec, -} - -#[utoipa::path(post, path = "/api/git/gitignore/patterns", tag = "Git", request_body = AddPatternsRequest, responses((status = 200, description = "OK")))] -pub async fn add_gitignore_patterns( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(req): Json, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let service = ctx.git_service(); - let added = service - .add_gitignore_patterns(workspace_id, req.patterns) - .await - .map_err(map_git_error)?; - Ok(Json(serde_json::json!({"added": added}))) -} - -#[utoipa::path(get, path = "/api/git/gitignore/patterns", tag = "Git", responses((status = 200, description = "OK")))] -pub async fn get_gitignore_patterns( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let service = ctx.git_service(); - let patterns = service - .get_gitignore_patterns(workspace_id) - .await - .map_err(map_git_error)?; - Ok(Json(serde_json::json!({"patterns": patterns}))) -} - -#[derive(Deserialize, ToSchema)] -pub struct CheckIgnoredRequest { - pub path: String, -} - -#[utoipa::path(post, path = "/api/git/gitignore/check", tag = "Git", request_body = CheckIgnoredRequest, responses((status = 200, description = "OK")))] -pub async fn check_path_ignored( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(req): Json, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let service = ctx.git_service(); - let is_ignored = service - .check_path_ignored(workspace_id, &req.path) - .await - .map_err(map_git_error)?; - Ok(Json( - serde_json::json!({"path": req.path, "is_ignored": is_ignored}), - )) -} - -#[utoipa::path(get, path = "/api/git/status", tag = "Git", responses((status = 200, body = GitStatus)))] -pub async fn get_status( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let service = ctx.git_service(); - let dto: GitStatusDto = service - .get_status(workspace_id) - .await - .map_err(map_git_error)?; - let out: GitStatus = dto.into(); - Ok(Json(out)) -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct GitSyncRequest { - pub message: Option, - pub force: Option, - pub full_scan: Option, - pub skip_push: Option, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct GitSyncResponse { - pub success: bool, - pub message: String, - pub commit_hash: Option, - pub files_changed: u32, -} - -#[utoipa::path(post, path = "/api/git/sync", tag = "Git", request_body = GitSyncRequest, responses((status = 200, body = GitSyncResponse), (status = 409, description = "Conflicts during rebase/pull")))] -pub async fn sync_now( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(req): Json, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let service = ctx.git_service(); - let out = service - .sync_now( - workspace_id, - GitSyncRequestDto { - message: req.message.clone(), - force: req.force, - full_scan: req.full_scan, - skip_push: req.skip_push, - }, - ) - .await - .map_err(map_git_error)?; - Ok(Json(GitSyncResponse { - success: out.success, - message: out.message, - commit_hash: out.commit_hash, - files_changed: out.files_changed, - })) -} - -#[utoipa::path( - post, - path = "/api/git/import", - tag = "Git", - request_body = CreateGitConfigRequest, - responses((status = 200, body = GitImportResponse)) -)] -pub async fn import_repository( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(req): Json, -) -> Result, StatusCode> { - if req.repository_url.trim().is_empty() { - return Err(StatusCode::BAD_REQUEST); - } - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_INIT) - .await?; - - let service = ctx.git_service(); - let dto = service - .import_repository(workspace_id, user_id, &UpsertGitConfigInput::from(req)) - .await - .map_err(map_git_error)?; - Ok(Json(GitImportResponse { - success: true, - message: dto.message, - files_changed: dto.files_changed as i32, - commit_hash: dto.commit_hash, - docs_created: dto.docs_created as i32, - attachments_created: dto.attachments_created as i32, - })) -} - -#[utoipa::path( - post, - path = "/api/git/pull", - tag = "Git", - request_body = GitPullRequest, - responses( - (status = 200, body = GitPullResponse), - (status = 409, body = GitPullResponse, description = "Conflicts detected") - ) -)] -pub async fn pull_repository( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(req): Json, -) -> Result<(StatusCode, Json), StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_SYNC) - .await?; - let service = ctx.git_service(); - let dto = service - .pull_repository( - workspace_id, - user_id, - GitPullRequestDto { - resolutions: req - .resolutions - .unwrap_or_default() - .into_iter() - .map(|r| GitPullResolutionDto { - path: r.path, - choice: r.choice, - content: r.content, - }) - .collect(), - }, - ) - .await - .map_err(|err| { - let message = match &err { - ServiceError::BadRequest("workspace_has_pending_changes") => { - "Workspace has pending changes. Commit, sync, or discard them before pulling." - .to_string() - } - _ => err.to_string(), - }; - let status = map_git_error(err); - let body = GitPullResponse { - success: false, - message, - files_changed: 0, - commit_hash: None, - conflicts: None, - git_status: None, - }; - (status, body) - }); - let dto = match dto { - Ok(v) => v, - Err((status, body)) => return Ok((status, Json(body))), - }; - let conflicts = dto - .conflicts - .map(|items| items.into_iter().map(Into::into).collect::>()) - .unwrap_or_default(); - let has_conflicts = !conflicts.is_empty(); - let status = if has_conflicts { - StatusCode::CONFLICT - } else { - StatusCode::OK - }; - Ok(( - status, - Json(GitPullResponse { - success: dto.success, - message: dto.message, - files_changed: dto.files_changed as i32, - commit_hash: dto.commit_hash, - conflicts: if has_conflicts { Some(conflicts) } else { None }, - git_status: None, - }), - )) -} - -#[utoipa::path( - post, - path = "/api/git/pull/start", - tag = "Git", - responses( - (status = 200, body = GitPullSessionResponse), - (status = 400, body = GitPullSessionResponse), - (status = 409, body = GitPullSessionResponse, description = "Conflicts detected") - ) -)] -pub async fn start_pull_session( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result<(StatusCode, Json), StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_SYNC) - .await?; - - let service = ctx.git_service(); - let session = match service.start_pull_session_flow(workspace_id, user_id).await { - Ok(v) => v, - Err(err) => { - let message = match &err { - ServiceError::BadRequest("workspace_has_pending_changes") => { - "Workspace has pending changes. Commit, sync, or discard them before pulling." - .to_string() - } - other => other.to_string(), - }; - let status = map_git_error(err); - return Ok(( - status, - Json(GitPullSessionResponse { - session_id: Uuid::nil(), - status: "error".to_string(), - conflicts: Vec::new(), - resolutions: Vec::new(), - message: Some(message), - }), - )); - } - }; - if session.status == "error" { - return Ok(( - StatusCode::BAD_REQUEST, - Json(GitPullSessionResponse { - session_id: session.id, - status: session.status, - conflicts: Vec::new(), - resolutions: Vec::new(), - message: session.message, - }), - )); - } - let conflicts = session - .conflicts - .clone() - .into_iter() - .map(Into::into) - .collect::>(); - let has_conflicts = !conflicts.is_empty(); - let status = if has_conflicts { - StatusCode::CONFLICT - } else { - StatusCode::OK - }; - Ok(( - status, - Json(GitPullSessionResponse { - session_id: session.id, - status: session.status, - conflicts, - resolutions: Vec::new(), - message: session.message, - }), - )) -} - -#[utoipa::path( - get, - path = "/api/git/pull/session/{id}", - tag = "Git", - responses((status = 200, body = GitPullSessionResponse)) -)] -pub async fn get_pull_session( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - axum::extract::Path(id): axum::extract::Path, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_SYNC) - .await?; - - let service = ctx.git_service(); - let state = service - .load_pull_session_with_stale_check(workspace_id, id) - .await - .map_err(|err| { - let status = map_git_error(err); - status - })? - .ok_or(StatusCode::NOT_FOUND)?; - Ok(Json(GitPullSessionResponse { - session_id: state.id, - status: state.status, - conflicts: state.conflicts.into_iter().map(Into::into).collect(), - resolutions: state - .resolutions - .into_iter() - .map(|r| GitPullResolution { - path: r.path, - choice: r.choice, - content: r.content, - }) - .collect(), - message: state.message, - })) -} - -#[utoipa::path( - post, - path = "/api/git/pull/session/{id}/resolve", - tag = "Git", - request_body = GitPullRequest, - responses( - (status = 200, body = GitPullSessionResponse), - (status = 400, body = GitPullSessionResponse), - (status = 409, body = GitPullSessionResponse) - ) -)] -pub async fn resolve_pull_session( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - axum::extract::Path(id): axum::extract::Path, - Json(req): Json, -) -> Result<(StatusCode, Json), StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_SYNC) - .await?; - - let service = ctx.git_service(); - let existing_session = service - .load_pull_session_with_stale_check(workspace_id, id) - .await - .map_err(map_git_error)? - .ok_or(StatusCode::NOT_FOUND)?; - let resolutions = req.resolutions.unwrap_or_default(); - let session = match service - .resolve_pull_session_flow( - workspace_id, - user_id, - id, - resolutions - .iter() - .cloned() - .map(|r| GitPullResolutionDto { - path: r.path, - choice: r.choice, - content: r.content, - }) - .collect(), - ) - .await - { - Ok(v) => v, - Err(err) => { - let message = match &err { - ServiceError::BadRequest("workspace_has_pending_changes") => { - "Workspace has pending changes. Commit, sync, or discard them before pulling." - .to_string() - } - other => other.to_string(), - }; - let status = map_git_error(err); - return Ok(( - status, - Json(GitPullSessionResponse { - session_id: id, - status: "error".to_string(), - conflicts: existing_session - .conflicts - .into_iter() - .map(Into::into) - .collect(), - resolutions: existing_session - .resolutions - .into_iter() - .map(|r| GitPullResolution { - path: r.path, - choice: r.choice, - content: r.content, - }) - .collect(), - message: Some(message), - }), - )); - } - }; - - let mut status_code = StatusCode::OK; - - let conflicts: Vec = session - .conflicts - .clone() - .into_iter() - .map(Into::into) - .collect(); - if !conflicts.is_empty() { - status_code = StatusCode::CONFLICT; - } - if session.status == "stale" { - status_code = StatusCode::CONFLICT; - } - if session.status == "error" { - status_code = StatusCode::BAD_REQUEST; - } - let session_status = session.status.clone(); - - Ok(( - status_code, - Json(GitPullSessionResponse { - session_id: id, - status: session_status.clone(), - conflicts, - resolutions, - message: if session_status == "error" { - session.message - } else if status_code == StatusCode::CONFLICT && session_status == "stale" { - Some("Pull session is stale. Please start a new pull.".to_string()) - } else { - session.message - }, - }), - )) -} - -#[utoipa::path( - post, - path = "/api/git/pull/session/{id}/finalize", - tag = "Git", - responses( - (status = 200, body = GitPullResponse), - (status = 400, body = GitPullResponse), - (status = 409, body = GitPullResponse) - ) -)] -pub async fn finalize_pull_session( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - axum::extract::Path(id): axum::extract::Path, -) -> Result<(StatusCode, Json), StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_GIT_SYNC) - .await?; - - let service = ctx.git_service(); - let FinalizePullSessionResult { - session, - git_status, - } = service - .finalize_pull_session_flow(workspace_id, id) - .await - .map_err(map_git_error)?; - if session.status == "error" { - return Ok(( - StatusCode::BAD_REQUEST, - Json(GitPullResponse { - success: false, - message: session - .message - .clone() - .unwrap_or_else(|| "pull failed".to_string()), - files_changed: 0, - commit_hash: None, - conflicts: Some(session.conflicts.into_iter().map(Into::into).collect()), - git_status: None, - }), - )); - } - if session.status == "stale" { - return Ok(( - StatusCode::CONFLICT, - Json(GitPullResponse { - success: false, - message: session - .message - .clone() - .unwrap_or_else(|| "pull session stale".to_string()), - files_changed: 0, - commit_hash: None, - conflicts: Some(session.conflicts.into_iter().map(Into::into).collect()), - git_status: None, - }), - )); - } - if !session.conflicts.is_empty() { - return Ok(( - StatusCode::CONFLICT, - Json(GitPullResponse { - success: false, - message: "conflicts remaining".to_string(), - files_changed: 0, - commit_hash: None, - conflicts: Some(session.conflicts.into_iter().map(Into::into).collect()), - git_status: None, - }), - )); - } - Ok(( - StatusCode::OK, - Json(GitPullResponse { - success: true, - message: session - .message - .clone() - .unwrap_or_else(|| "merge completed".to_string()), - files_changed: 0, - commit_hash: None, - conflicts: None, - git_status: git_status.map(Into::into), - }), - )) -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct GitChangeItem { - pub path: String, - pub status: String, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct GitChangesResponse { - pub files: Vec, -} - -#[utoipa::path(get, path = "/api/git/changes", tag = "Git", responses((status = 200, body = GitChangesResponse)))] -pub async fn get_changes( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let service = ctx.git_service(); - let files: Vec = service - .get_changes(workspace_id) - .await - .map_err(map_git_error)?; - let items = files - .into_iter() - .map(|c| GitChangeItem { - path: c.path, - status: c.status, - }) - .collect(); - Ok(Json(GitChangesResponse { files: items })) -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct GitCommitItem { - pub hash: String, - pub message: String, - pub author_name: String, - pub author_email: String, - pub time: chrono::DateTime, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct GitHistoryResponse { - pub commits: Vec, -} - -#[utoipa::path(get, path = "/api/git/history", tag = "Git", responses((status = 200, body = GitHistoryResponse)))] -pub async fn get_history( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let service = ctx.git_service(); - let commits: Vec = service - .get_history(workspace_id) - .await - .map_err(map_git_error)?; - let out = commits - .into_iter() - .map(|c| GitCommitItem { - hash: c.hash, - message: c.message, - author_name: c.author_name, - author_email: c.author_email, - time: c.time, - }) - .collect(); - Ok(Json(GitHistoryResponse { commits: out })) -} - -#[utoipa::path( - get, - path = "/api/git/diff/working", - tag = "Git", - responses((status = 200, body = [TextDiffResult])) -)] -pub async fn get_working_diff( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result>, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let service = ctx.git_service(); - let diffs = service - .get_working_diff(workspace_id) - .await - .map_err(map_git_error)?; - Ok(Json(diffs)) -} - -#[utoipa::path( - get, - path = "/api/git/diff/commits/{from}/{to}", - params(("from" = String, Path, description = "From"), ("to" = String, Path, description = "To")), - tag = "Git", - responses((status = 200, body = [TextDiffResult])) -)] -pub async fn get_commit_diff( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - axum::extract::Path((from, to)): axum::extract::Path<(String, String)>, -) -> Result>, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let service = ctx.git_service(); - let diffs = service - .get_commit_diff(workspace_id, &from, &to) - .await - .map_err(map_git_error)?; - Ok(Json(diffs)) -} - -// pull endpoint intentionally removed in push-only backup mode - -#[utoipa::path(post, path = "/api/git/init", tag = "Git", responses((status = 200, description = "OK")))] -pub async fn init_repository( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let service = ctx.git_service(); - service - .init_repository(workspace_id) - .await - .map_err(map_git_error)?; - Ok(Json(serde_json::json!({"success":true}))) -} - -#[utoipa::path(post, path = "/api/git/deinit", tag = "Git", responses((status = 200, description = "OK")))] -pub async fn deinit_repository( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = validate_bearer(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let service = ctx.git_service(); - service - .deinit_repository(workspace_id) - .await - .map_err(map_git_error)?; - Ok(Json(serde_json::json!({"success":true}))) -} diff --git a/api/src/presentation/http/markdown.rs b/api/src/presentation/http/markdown.rs deleted file mode 100644 index 03353af4..00000000 --- a/api/src/presentation/http/markdown.rs +++ /dev/null @@ -1,281 +0,0 @@ -use std::collections::HashMap; - -use crate::application::access; -use crate::application::services::errors::ServiceError; -use crate::application::services::markdown::{PlaceholderItem, RenderOptions, RenderResponse}; -use crate::application::services::markdown_render::MarkdownRenderTask; -use crate::presentation::context::AppContext; -use crate::presentation::http::auth::{self, Bearer}; -use axum::{Json, Router, extract::State, http::StatusCode, routing::post}; -use serde::{Deserialize, Serialize}; -use utoipa::ToSchema; -use uuid::Uuid; -// no bearer injection; renderer should receive token via options when needed - -pub fn routes(ctx: AppContext) -> Router { - Router::new() - .route("/markdown/render", post(render_markdown)) - .route("/markdown/render-many", post(render_markdown_many)) - .with_state(ctx) -} - -#[derive(Debug, Clone, Deserialize, Serialize, ToSchema, Default)] -#[serde(default)] -pub struct RenderOptionsPayload { - pub flavor: Option, - pub theme: Option, - pub features: Option>, - pub sanitize: Option, - pub hardbreaks: Option, - pub doc_id: Option, - pub base_origin: Option, - pub absolute_attachments: Option, - pub token: Option, -} - -impl From for RenderOptions { - fn from(value: RenderOptionsPayload) -> Self { - RenderOptions { - flavor: value.flavor, - theme: value.theme, - features: value.features, - sanitize: value.sanitize, - hardbreaks: value.hardbreaks, - doc_id: value.doc_id, - base_origin: value.base_origin, - absolute_attachments: value.absolute_attachments, - token: value.token, - } - } -} - -impl From for RenderOptionsPayload { - fn from(value: RenderOptions) -> Self { - Self { - flavor: value.flavor, - theme: value.theme, - features: value.features, - sanitize: value.sanitize, - hardbreaks: value.hardbreaks, - doc_id: value.doc_id, - base_origin: value.base_origin, - absolute_attachments: value.absolute_attachments, - token: value.token, - } - } -} - -#[derive(Debug, Clone, Serialize, ToSchema)] -pub struct PlaceholderItemPayload { - pub kind: String, - pub id: String, - pub code: String, -} - -impl From for PlaceholderItemPayload { - fn from(value: PlaceholderItem) -> Self { - Self { - kind: value.kind, - id: value.id, - code: value.code, - } - } -} - -#[derive(Debug, Clone, Serialize, ToSchema)] -pub struct RenderResponseBody { - pub html: String, - #[serde(skip_serializing_if = "Vec::is_empty")] - pub placeholders: Vec, - pub hash: String, -} - -impl From for RenderResponseBody { - fn from(value: RenderResponse) -> Self { - Self { - html: value.html, - placeholders: value - .placeholders - .into_iter() - .map(PlaceholderItemPayload::from) - .collect(), - hash: value.hash, - } - } -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct RenderRequest { - text: String, - #[serde(default)] - options: RenderOptionsPayload, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct RenderManyRequest { - items: Vec, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct RenderManyResponse { - items: Vec, -} - -#[utoipa::path(post, path = "/api/markdown/render", tag = "Markdown", - request_body = RenderRequest, - responses((status = 200, body = RenderResponseBody)))] -pub async fn render_markdown( - State(ctx): State, - bearer: Option, - Json(req): Json, -) -> Result, StatusCode> { - // Per-item size guard (2MB) - if req.text.len() > 2 * 1024 * 1024 { - return Err(StatusCode::PAYLOAD_TOO_LARGE); - } - let RenderRequest { text, options } = req; - let options: RenderOptions = options.into(); - - let bearer_token = bearer.as_ref().map(|b| b.0.as_str()); - let user_scope = - resolve_user_scope_from_inputs(&ctx, bearer_token, options.token.as_deref()).await; - - let renderer = ctx.markdown_renderer(); - let resp = renderer - .render_single(text, options, user_scope) - .await - .map_err(map_markdown_error)?; - Ok(Json(RenderResponseBody::from(resp))) -} - -#[utoipa::path(post, path = "/api/markdown/render-many", tag = "Markdown", - request_body = RenderManyRequest, - responses((status = 200, body = RenderManyResponse)))] -pub async fn render_markdown_many( - State(ctx): State, - bearer: Option, - Json(req): Json, -) -> Result, StatusCode> { - // Guard: item count and total size - const MAX_ITEMS: usize = 128; - const MAX_TOTAL_BYTES: usize = 5 * 1024 * 1024; // 5MB - let items = req.items; - if items.len() > MAX_ITEMS { - return Err(StatusCode::PAYLOAD_TOO_LARGE); - } - let total: usize = items.iter().map(|i| i.text.len()).sum(); - if total > MAX_TOTAL_BYTES { - return Err(StatusCode::PAYLOAD_TOO_LARGE); - } - - let bearer_token = bearer.as_ref().map(|b| b.0.clone()); - let bearer_scope = resolve_user_scope_from_inputs(&ctx, bearer_token.as_deref(), None).await; - let mut share_scope_cache: HashMap> = HashMap::new(); - let mut tasks = Vec::with_capacity(items.len()); - - for item in items { - if item.text.len() > 2 * 1024 * 1024 { - return Err(StatusCode::PAYLOAD_TOO_LARGE); - } - let RenderRequest { text, options } = item; - let options: RenderOptions = options.into(); - let user_scope = if bearer_scope.is_some() { - bearer_scope - } else if let Some(token) = options.token.as_deref() { - if let Some(scope) = share_scope_cache.get(token) { - *scope - } else { - let scope = resolve_user_scope_from_inputs(&ctx, None, Some(token)).await; - share_scope_cache.insert(token.to_string(), scope); - scope - } - } else { - None - }; - tasks.push(MarkdownRenderTask { - text, - options, - user_scope, - }); - } - - let renderer = ctx.markdown_renderer(); - let responses = renderer - .render_many(tasks) - .await - .map_err(map_markdown_error)?; - let items = responses - .into_iter() - .map(RenderResponseBody::from) - .collect(); - Ok(Json(RenderManyResponse { items })) -} - -fn map_markdown_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(_) => StatusCode::INTERNAL_SERVER_ERROR, - } -} - -async fn resolve_user_scope_from_inputs( - ctx: &AppContext, - bearer_token: Option<&str>, - share_token: Option<&str>, -) -> Option { - if let Some(token) = bearer_token { - if let Some(workspace_id) = ctx.auth_service().workspace_from_token_claim(token) { - return Some(workspace_id); - } - if let Ok(Some(workspace_id)) = ctx.auth_service().workspace_from_token_async(token).await { - return Some(workspace_id); - } - if let Ok(sub) = auth::validate_bearer_str(ctx, token).await { - if let Ok(uid) = Uuid::parse_str(&sub) { - if let Ok(workspaces) = ctx.workspace_service().list_for_user(uid).await { - if workspaces.is_empty() { - return None; - } - if let Some(default_ws) = workspaces.iter().find(|ws| ws.is_default) { - return Some(default_ws.id); - } - return Some(workspaces[0].id); - } - } - } - } - if let Some(token) = share_token { - // Share token: resolve its workspace for renderer so plugin manifests can be loaded. - if let Some(actor) = auth::resolve_actor_from_token_str(ctx, token).await { - match actor { - access::Actor::User(uid) => { - if let Ok(workspaces) = ctx.workspace_service().list_for_user(uid).await { - if workspaces.is_empty() { - return None; - } - if let Some(default_ws) = workspaces.iter().find(|ws| ws.is_default) { - return Some(default_ws.id); - } - return Some(workspaces[0].id); - } - } - access::Actor::ShareToken(t) => { - if let Ok(Some((_share_id, _perm, exp, _doc_id, _typ, workspace_id))) = - ctx.share_service().resolve_share_context(&t).await - { - if exp.map(|e| e < chrono::Utc::now()).unwrap_or(false) { - return None; - } - return Some(workspace_id); - } - } - _ => {} - } - } - } - None -} diff --git a/api/src/presentation/http/mod.rs b/api/src/presentation/http/mod.rs deleted file mode 100644 index 8fcd9ab7..00000000 --- a/api/src/presentation/http/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -pub mod api_tokens; -pub mod auth; -pub mod documents; -pub mod files; -pub mod git; -pub mod health; -pub mod markdown; -pub mod metrics; -pub mod plugins; -pub mod public; -pub mod shares; -pub mod shortcuts; -pub mod storage_ingest; -pub mod tags; -pub mod workspace_scope; -pub mod workspaces; diff --git a/api/src/presentation/http/plugins.rs b/api/src/presentation/http/plugins.rs deleted file mode 100644 index f5ea9c66..00000000 --- a/api/src/presentation/http/plugins.rs +++ /dev/null @@ -1,963 +0,0 @@ -use axum::response::sse::{Event, KeepAlive, Sse}; -use axum::{ - Json, Router, - extract::{Path, Query, State}, - http::{HeaderMap, HeaderValue, StatusCode, header}, - response::{IntoResponse, Response}, - routing::{get, patch, post}, -}; -use futures_util::stream::{self, Stream, StreamExt}; -use serde::{Deserialize, Serialize}; -use serde_json::json; -use std::collections::HashMap; -use std::time::Duration; -use utoipa::ToSchema; -use uuid::Uuid; - -use crate::application::access; -use crate::application::dto::plugins::ExecResult; -use crate::application::services::errors::ServiceError; -use crate::application::services::plugins::management::{ - self, AssetRequestScope, PluginAssetRequest, PluginManifestItem, -}; -use crate::application::use_cases::plugins::install_from_url::InstallPluginError; -use crate::domain::workspaces::permissions::{ - PERM_DOC_EDIT, PERM_DOC_VIEW, PERM_PLUGIN_INSTALL, PERM_PLUGIN_RUN, PERM_PLUGIN_UNINSTALL, - PermissionSet, -}; -use crate::presentation::context::AppContext; -use crate::presentation::http::auth::{self, Bearer}; -use crate::presentation::http::workspace_scope; - -const PERMISSION_DOC_READ: &str = "doc.read"; -const PERMISSION_DOC_WRITE: &str = "doc.write"; - -struct PluginUserContext { - workspace_id: Uuid, - user_id: Uuid, - permissions: PermissionSet, - actor: access::Actor, -} - -async fn resolve_plugin_user_context( - ctx: &AppContext, - headers: &HeaderMap, - bearer_token: &str, - required_permission: Option<&str>, -) -> Result { - if let Some(actor) = auth::resolve_actor_from_token_str(ctx, bearer_token).await { - match actor { - access::Actor::User(user_id) => { - let workspace_id = workspace_scope::resolve_active_workspace_id( - ctx, - headers, - Some(bearer_token), - user_id, - ) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - let permissions = - workspace_scope::resolve_workspace_permissions(ctx, workspace_id, user_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - if let Some(permission) = required_permission { - if !permissions.allows(permission) { - return Err(StatusCode::FORBIDDEN); - } - } - return Ok(PluginUserContext { - workspace_id, - user_id, - permissions, - actor: access::Actor::User(user_id), - }); - } - access::Actor::ShareToken(token) => { - let share = ctx - .share_service() - .resolve_share_context(&token) - .await - .map_err(|_| StatusCode::UNAUTHORIZED)? - .ok_or(StatusCode::UNAUTHORIZED)?; - let (_share_id, perm, expires_at, _shared_id, _shared_type, workspace_id) = share; - if let Some(exp) = expires_at { - if exp < chrono::Utc::now() { - return Err(StatusCode::UNAUTHORIZED); - } - } - let mut permissions = PermissionSet::from_slice(&[PERM_PLUGIN_RUN, PERM_DOC_VIEW]); - if perm == "edit" { - permissions.insert(PERM_DOC_EDIT); - } - if let Some(permission) = required_permission { - if !permissions.allows(permission) { - return Err(StatusCode::FORBIDDEN); - } - } - return Ok(PluginUserContext { - workspace_id, - // Share tokens do not map to a user; use workspace_id as a stable placeholder - user_id: workspace_id, - permissions, - actor: access::Actor::ShareToken(token), - }); - } - _ => {} - } - } - - Err(StatusCode::UNAUTHORIZED) -} - -pub fn routes(ctx: AppContext) -> Router { - Router::new() - // Manifest for current user (stubbed) - .route("/me/plugins/manifest", get(get_manifest)) - // SSE updates (stubbed) - .route("/me/plugins/updates", get(sse_updates)) - // Generic exec endpoint - .route("/plugins/:plugin/exec/:action", post(exec_action)) - .route("/me/plugins/install-from-url", post(install_from_url)) - .route("/me/plugins/uninstall", post(uninstall)) - // Generic records API - .route( - "/plugins/:plugin/docs/:doc_id/records/:kind", - get(list_records).post(create_record), - ) - .route( - "/plugins/:plugin/records/:id", - patch(update_record).delete(delete_record), - ) - .route( - "/plugins/:plugin/docs/:doc_id/kv/:key", - get(get_kv_value).put(put_kv_value), - ) - .route("/plugin-assets", get(get_plugin_asset)) - .with_state(ctx) -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct RecordsPath { - plugin: String, - doc_id: Uuid, - kind: String, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct RecordsResponse { - items: Vec, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct ExecResultResponse { - pub ok: bool, - #[serde(skip_serializing_if = "Option::is_none")] - pub data: Option, - pub effects: Vec, - #[serde(skip_serializing_if = "Option::is_none")] - pub error: Option, -} - -impl From for ExecResultResponse { - fn from(value: ExecResult) -> Self { - Self { - ok: value.ok, - data: value.data, - effects: value.effects, - error: value.error, - } - } -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct ManifestItem { - id: String, - name: Option, - version: String, - scope: String, - mounts: Vec, - frontend: serde_json::Value, - permissions: Vec, - config: serde_json::Value, - ui: serde_json::Value, - author: Option, - repository: Option, -} - -impl From for ManifestItem { - fn from(value: PluginManifestItem) -> Self { - Self { - id: value.id, - name: value.name, - version: value.version, - scope: value.scope, - mounts: value.mounts, - frontend: value.frontend, - permissions: value.permissions, - config: value.config, - ui: value.ui, - author: value.author, - repository: value.repository, - } - } -} - -#[utoipa::path( - get, - path = "/api/plugins/{plugin}/docs/{doc_id}/records/{kind}", - params( - ("plugin" = String, Path, description = "Plugin ID"), - ("doc_id" = Uuid, Path, description = "Document ID"), - ("kind" = String, Path, description = "Record kind"), - ("limit" = Option, Query, description = "Limit"), - ("offset" = Option, Query, description = "Offset") - ), - responses((status = 200, body = RecordsResponse)), - tag = "Plugins" -)] -pub async fn list_records( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Query(params): Query>, - Path(p): Path, -) -> Result, StatusCode> { - ensure_valid_plugin_id(&p.plugin)?; - let bearer_token = bearer.0; - let plugin_ctx = - resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), Some(PERM_PLUGIN_RUN)) - .await?; - let actor = plugin_ctx.actor; - ctx.authorization() - .require_view(&actor, p.doc_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - - let limit = params - .get("limit") - .and_then(|s| s.parse::().ok()) - .unwrap_or(50) - .clamp(1, 200); - let offset = params - .get("offset") - .and_then(|s| s.parse::().ok()) - .unwrap_or(0) - .max(0); - - ctx.plugin_permissions() - .ensure( - Some(plugin_ctx.workspace_id), - &p.plugin, - PERMISSION_DOC_READ, - ) - .await - .map_err(map_plugin_service_error)?; - - let plugin_data = ctx.plugin_data_service(); - let rows = plugin_data - .list_records(&p.plugin, "doc", p.doc_id, &p.kind, limit, offset) - .await - .map_err(map_plugin_service_error)?; - let mut items = Vec::with_capacity(rows.len()); - for r in rows { - // Normalize output shape for client (id + data + timestamps) - items.push(json!({ - "id": r.id, - "plugin": r.plugin, - "kind": r.kind, - "data": r.data, - "createdAt": r.created_at, - "updatedAt": r.updated_at, - })); - } - Ok(Json(RecordsResponse { items })) -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct CreateRecordBody { - data: serde_json::Value, -} - -#[utoipa::path( - post, - path = "/api/plugins/{plugin}/docs/{doc_id}/records/{kind}", - request_body = CreateRecordBody, - params( - ("plugin" = String, Path, description = "Plugin ID"), - ("doc_id" = Uuid, Path, description = "Document ID"), - ("kind" = String, Path, description = "Record kind") - ), - responses((status = 200, body = serde_json::Value)), - tag = "Plugins", - operation_id = "pluginsCreateRecord" -)] -pub async fn create_record( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(p): Path, - Json(body): Json, -) -> Result, StatusCode> { - ensure_valid_plugin_id(&p.plugin)?; - let bearer_token = bearer.0; - let plugin_ctx = - resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), Some(PERM_PLUGIN_RUN)) - .await?; - let actor = plugin_ctx.actor.clone(); - ctx.authorization() - .require_edit(&actor, p.doc_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - - ctx.plugin_permissions() - .ensure( - Some(plugin_ctx.workspace_id), - &p.plugin, - PERMISSION_DOC_WRITE, - ) - .await - .map_err(map_plugin_service_error)?; - - // Attach authorId and timestamps if not provided - let mut data = body.data; - data["authorId"] = json!(plugin_ctx.user_id); - - let plugin_data = ctx.plugin_data_service(); - let rec = plugin_data - .create_record(&p.plugin, "doc", p.doc_id, &p.kind, &data) - .await - .map_err(map_plugin_service_error)?; - Ok(Json(json!({ - "id": rec.id, - "data": rec.data, - "createdAt": rec.created_at, - "updatedAt": rec.updated_at, - }))) -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct UpdateRecordPath { - plugin: String, - id: Uuid, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct UpdateRecordBody { - patch: serde_json::Value, -} - -#[utoipa::path( - patch, - path = "/api/plugins/{plugin}/records/{id}", - request_body = UpdateRecordBody, - params(("plugin" = String, Path, description = "Plugin ID"), ("id" = Uuid, Path, description = "Record ID")), - responses((status = 200, body = serde_json::Value)), - tag = "Plugins", - operation_id = "pluginsUpdateRecord" -)] -pub async fn update_record( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(p): Path, - Json(body): Json, -) -> Result, StatusCode> { - ensure_valid_plugin_id(&p.plugin)?; - let bearer_token_raw = bearer.0; - let plugin_ctx = resolve_plugin_user_context( - &ctx, - &headers, - bearer_token_raw.as_str(), - Some(PERM_PLUGIN_RUN), - ) - .await?; - let actor = plugin_ctx.actor.clone(); - - let plugin_data = ctx.plugin_data_service(); - // Get record for scope info and docId to enforce edit permission - let rec = plugin_data - .get_record(p.id) - .await - .map_err(map_plugin_service_error)? - .ok_or(StatusCode::NOT_FOUND)?; - - if rec.plugin != p.plugin { - return Err(StatusCode::NOT_FOUND); - } - - // Edit permission on the doc scope - ctx.authorization() - .require_edit(&actor, rec.scope_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - - ctx.plugin_permissions() - .ensure( - Some(plugin_ctx.workspace_id), - &p.plugin, - PERMISSION_DOC_WRITE, - ) - .await - .map_err(map_plugin_service_error)?; - - let updated = plugin_data - .update_record(p.id, &body.patch) - .await - .map_err(map_plugin_service_error)? - .ok_or(StatusCode::NOT_FOUND)?; - - Ok(Json(json!({ - "id": updated.id, - "data": updated.data, - "updatedAt": updated.updated_at, - }))) -} - -#[utoipa::path( - delete, - path = "/api/plugins/{plugin}/records/{id}", - params(("plugin" = String, Path, description = "Plugin ID"), ("id" = Uuid, Path, description = "Record ID")), - responses((status = 204)), - tag = "Plugins", - operation_id = "pluginsDeleteRecord" -)] -pub async fn delete_record( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(p): Path, -) -> Result { - ensure_valid_plugin_id(&p.plugin)?; - let bearer_token_raw = bearer.0; - let plugin_ctx = resolve_plugin_user_context( - &ctx, - &headers, - bearer_token_raw.as_str(), - Some(PERM_PLUGIN_RUN), - ) - .await?; - let actor = plugin_ctx.actor.clone(); - let plugin_data = ctx.plugin_data_service(); - // Get record to authorize - let rec = plugin_data - .get_record(p.id) - .await - .map_err(map_plugin_service_error)? - .ok_or(StatusCode::NOT_FOUND)?; - - if rec.plugin != p.plugin { - return Err(StatusCode::NOT_FOUND); - } - - ctx.authorization() - .require_edit(&actor, rec.scope_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - - ctx.plugin_permissions() - .ensure( - Some(plugin_ctx.workspace_id), - &p.plugin, - PERMISSION_DOC_WRITE, - ) - .await - .map_err(map_plugin_service_error)?; - - let ok = plugin_data - .delete_record(p.id) - .await - .map_err(map_plugin_service_error)?; - if ok { - Ok(StatusCode::NO_CONTENT) - } else { - Err(StatusCode::NOT_FOUND) - } -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct KvPath { - plugin: String, - doc_id: Uuid, - key: String, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct KvValueResponse { - value: serde_json::Value, -} -#[derive(Debug, Deserialize, ToSchema)] -pub struct KvValueBody { - value: serde_json::Value, -} - -#[utoipa::path( - get, - path = "/api/plugins/{plugin}/docs/{doc_id}/kv/{key}", - params(("plugin" = String, Path, description = "Plugin ID"), ("doc_id" = Uuid, Path, description = "Document ID"), ("key" = String, Path, description = "Key")), - responses((status = 200, body = KvValueResponse)), - tag = "Plugins", - operation_id = "pluginsGetKv" -)] -pub async fn get_kv_value( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(p): Path, -) -> Result, StatusCode> { - ensure_valid_plugin_id(&p.plugin)?; - let bearer_token = bearer.0; - let plugin_ctx = - resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), Some(PERM_PLUGIN_RUN)) - .await?; - let actor = plugin_ctx.actor.clone(); - ctx.authorization() - .require_view(&actor, p.doc_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - - ctx.plugin_permissions() - .ensure( - Some(plugin_ctx.workspace_id), - &p.plugin, - PERMISSION_DOC_READ, - ) - .await - .map_err(map_plugin_service_error)?; - - let plugin_data = ctx.plugin_data_service(); - let val = plugin_data - .get_kv(&p.plugin, "doc", Some(p.doc_id), &p.key) - .await - .map_err(map_plugin_service_error)? - .unwrap_or(serde_json::Value::Null); - Ok(Json(KvValueResponse { value: val })) -} - -#[utoipa::path( - put, - path = "/api/plugins/{plugin}/docs/{doc_id}/kv/{key}", - request_body = KvValueBody, - params(("plugin" = String, Path, description = "Plugin ID"), ("doc_id" = Uuid, Path, description = "Document ID"), ("key" = String, Path, description = "Key")), - responses((status = 204)), - tag = "Plugins", - operation_id = "pluginsPutKv" -)] -pub async fn put_kv_value( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(p): Path, - Json(body): Json, -) -> Result { - ensure_valid_plugin_id(&p.plugin)?; - let bearer_token = bearer.0; - let plugin_ctx = - resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), Some(PERM_PLUGIN_RUN)) - .await?; - let actor = plugin_ctx.actor.clone(); - ctx.authorization() - .require_edit(&actor, p.doc_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - - ctx.plugin_permissions() - .ensure( - Some(plugin_ctx.workspace_id), - &p.plugin, - PERMISSION_DOC_WRITE, - ) - .await - .map_err(map_plugin_service_error)?; - - let plugin_data = ctx.plugin_data_service(); - plugin_data - .put_kv(&p.plugin, "doc", Some(p.doc_id), &p.key, &body.value) - .await - .map_err(map_plugin_service_error)?; - Ok(StatusCode::NO_CONTENT) -} - -fn ensure_valid_plugin_id(id: &str) -> Result<(), StatusCode> { - management::validate_plugin_id(id).map_err(map_plugin_service_error) -} - -fn map_plugin_service_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(_) => StatusCode::INTERNAL_SERVER_ERROR, - } -} - -fn extract_doc_id(value: &serde_json::Value) -> Option { - value - .get("docId") - .and_then(|v| v.as_str()) - .and_then(|s| Uuid::parse_str(s).ok()) - .or_else(|| { - value - .get("payload") - .and_then(|payload| payload.get("docId")) - .and_then(|v| v.as_str()) - .and_then(|s| Uuid::parse_str(s).ok()) - }) -} - -#[utoipa::path( - get, - path = "/api/me/plugins/manifest", - responses((status = 200, body = [ManifestItem])), - tag = "Plugins", - operation_id = "pluginsGetManifest" -)] -pub async fn get_manifest( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result>, StatusCode> { - let bearer_token = bearer.0; - let plugin_ctx = - resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), None).await?; - let manifests = ctx - .plugin_management() - .manifests_for_workspace(plugin_ctx.workspace_id, plugin_ctx.user_id) - .await - .map_err(map_plugin_service_error)? - .into_iter() - .map(ManifestItem::from) - .collect(); - Ok(Json(manifests)) -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct ExecBody { - payload: Option, -} - -#[utoipa::path( - post, - path = "/api/plugins/{plugin}/exec/{action}", - request_body = ExecBody, - params( - ("plugin" = String, Path, description = "Plugin ID"), - ("action" = String, Path, description = "Action") - ), - responses((status = 200, body = ExecResultResponse)), - tag = "Plugins", - operation_id = "pluginsExecAction" -)] -pub async fn exec_action( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path((plugin, action)): Path<(String, String)>, - Json(body): Json, -) -> Result, StatusCode> { - ensure_valid_plugin_id(&plugin)?; - let bearer_token = bearer.0; - let plugin_ctx = - resolve_plugin_user_context(&ctx, &headers, bearer_token.as_str(), Some(PERM_PLUGIN_RUN)) - .await?; - let actor = plugin_ctx.actor.clone(); - let doc_id_from_payload = body.payload.as_ref().and_then(extract_doc_id); - let doc_id_from_share = if doc_id_from_payload.is_none() { - if let access::Actor::ShareToken(token) = &actor { - ctx.share_service() - .resolve_share_context(token) - .await - .map_err(map_plugin_service_error)? - .and_then(|(_, _, _, shared_id, shared_type, _)| { - if shared_type == "document" { - Some(shared_id) - } else { - None - } - }) - } else { - None - } - } else { - None - }; - let effective_doc_id = doc_id_from_payload.or(doc_id_from_share); - if let Some(doc_id) = effective_doc_id { - let auth = ctx.authorization(); - if let access::Actor::ShareToken(_) = &actor { - auth.require_view(&actor, doc_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - } else { - auth.require_edit(&actor, doc_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - } - } - let allowed_doc_id = match &actor { - access::Actor::ShareToken(_) => effective_doc_id, - _ => None, - }; - let exec_service = ctx.plugin_execution_service(); - match exec_service - .execute_action( - plugin_ctx.workspace_id, - plugin_ctx.user_id, - &plugin_ctx.permissions, - &plugin, - &action, - body.payload.clone(), - allowed_doc_id, - &actor, - ) - .await - .map_err(map_plugin_service_error)? - { - Some(result) => Ok(Json(ExecResultResponse::from(result))), - None => Ok(Json(ExecResultResponse { - ok: false, - data: None, - effects: vec![], - error: Some(json!({ "code": "UNKNOWN_ACTION" })), - })), - } -} - -#[utoipa::path( - get, - path = "/api/me/plugins/updates", - tag = "Plugins", - responses((status = 200, description = "Plugin event stream", content_type = "text/event-stream")) -)] -pub async fn sse_updates( - State(ctx): State, - bearer: Bearer, -) -> Result>>, StatusCode> { - // authenticate user (per-user stream) - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - - let initial = stream::iter(vec![Ok(Event::default().event("ready").data("{}\n"))]); - let event_stream = ctx - .subscribe_plugin_events() - .await - .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; - let broadcast = event_stream.filter_map(move |ev| { - let user_id = user_id.clone(); - async move { - if ev.user_id.is_some() && ev.user_id != Some(user_id) { - return None; - } - let payload = ev.payload.to_string(); - Some(Ok(Event::default().event("update").data(payload))) - } - }); - let merged = initial.chain(broadcast); - let keepalive = KeepAlive::new() - .interval(Duration::from_secs(25)) - .text(":\n"); - Ok(Sse::new(merged).keep_alive(keepalive)) -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct InstallFromUrlBody { - url: String, - token: Option, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct InstallResponse { - id: String, - version: String, -} - -#[utoipa::path( - post, - path = "/api/me/plugins/install-from-url", - request_body = InstallFromUrlBody, - responses((status = 200, body = InstallResponse)), - tag = "Plugins", - operation_id = "pluginsInstallFromUrl" -)] -pub async fn install_from_url( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(body): Json, -) -> Result, StatusCode> { - let bearer_token_raw = bearer.0; - let plugin_ctx = resolve_plugin_user_context( - &ctx, - &headers, - bearer_token_raw.as_str(), - Some(PERM_PLUGIN_INSTALL), - ) - .await?; - - let management = ctx.plugin_management(); - - match management - .install_from_url( - plugin_ctx.workspace_id, - plugin_ctx.user_id, - &plugin_ctx.permissions, - &body.url, - body.token.as_deref(), - ) - .await - { - Ok(installed) => Ok(Json(InstallResponse { - id: installed.id, - version: installed.version, - })), - Err(err) => { - tracing::error!(error = ?err, "failed to install plugin from url"); - match err { - InstallPluginError::Download(_) => Err(StatusCode::BAD_GATEWAY), - InstallPluginError::Install(inner) => match inner { - crate::application::ports::plugin_installer::PluginInstallError::InvalidPackage(_) => { - Err(StatusCode::BAD_REQUEST) - } - crate::application::ports::plugin_installer::PluginInstallError::Storage(_) => { - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - }, - InstallPluginError::Persist(_) => Err(StatusCode::INTERNAL_SERVER_ERROR), - InstallPluginError::Event(_) => Err(StatusCode::INTERNAL_SERVER_ERROR), - } - } - } -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct UninstallBody { - id: String, -} - -#[utoipa::path( - post, - path = "/api/me/plugins/uninstall", - request_body = UninstallBody, - responses((status = 204)), - tag = "Plugins", - operation_id = "pluginsUninstall" -)] -pub async fn uninstall( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(body): Json, -) -> Result { - let bearer_token_raw = bearer.0; - let plugin_ctx = resolve_plugin_user_context( - &ctx, - &headers, - bearer_token_raw.as_str(), - Some(PERM_PLUGIN_UNINSTALL), - ) - .await?; - let UninstallBody { id } = body; - let trimmed_id = id.trim(); - ensure_valid_plugin_id(trimmed_id)?; - ctx.plugin_management() - .uninstall( - plugin_ctx.workspace_id, - plugin_ctx.user_id, - &plugin_ctx.permissions, - trimmed_id, - ) - .await - .map_err(map_plugin_service_error)?; - Ok(StatusCode::NO_CONTENT) -} - -#[utoipa::path( - get, - path = "/api/plugin-assets", - params(("token" = Option, Query, description = "Share token (optional)")), - responses((status = 200, description = "Plugin asset")), - tag = "Plugins", - operation_id = "pluginsGetAsset" -)] -pub async fn get_plugin_asset( - State(ctx): State, - Query(params): Query>, -) -> Result { - let scope_raw = params - .get("scope") - .map(|s| s.as_str()) - .ok_or(StatusCode::BAD_REQUEST)?; - let plugin_id = params - .get("plugin") - .map(|s| s.as_str()) - .ok_or(StatusCode::BAD_REQUEST)?; - let version = params - .get("version") - .map(|s| s.as_str()) - .ok_or(StatusCode::BAD_REQUEST)?; - let path = params - .get("path") - .map(|s| s.as_str()) - .ok_or(StatusCode::BAD_REQUEST)?; - let exp = params - .get("exp") - .map(|s| s.as_str()) - .ok_or(StatusCode::BAD_REQUEST)?; - let expires_at = exp.parse::().map_err(|_| StatusCode::BAD_REQUEST)?; - let sig = params - .get("sig") - .map(|s| s.as_str()) - .ok_or(StatusCode::BAD_REQUEST)?; - let share_owned = params - .get("share") - .map(|s| s.trim()) - .filter(|s| !s.is_empty()) - .map(|s| s.to_string()); - - let scope = match scope_raw { - "global" => AssetRequestScope::Global, - "user" => { - let owner_str = params - .get("owner") - .map(|s| s.as_str()) - .ok_or(StatusCode::BAD_REQUEST)?; - let owner_id = Uuid::parse_str(owner_str).map_err(|_| StatusCode::BAD_REQUEST)?; - AssetRequestScope::User { - owner_id, - share_token: share_owned.as_deref(), - } - } - _ => return Err(StatusCode::BAD_REQUEST), - }; - - let payload = ctx - .plugin_management() - .fetch_asset(PluginAssetRequest { - scope, - plugin_id, - version, - path, - expires_at, - signature: sig, - }) - .await - .map_err(map_plugin_service_error)?; - - let mut headers = HeaderMap::new(); - headers.insert( - header::CONTENT_TYPE, - HeaderValue::from_str(&payload.content_type) - .unwrap_or_else(|_| HeaderValue::from_static("application/octet-stream")), - ); - headers.insert( - header::CACHE_CONTROL, - HeaderValue::from_static("public, max-age=60"), - ); - headers.insert( - header::HeaderName::from_static("x-content-type-options"), - HeaderValue::from_static("nosniff"), - ); - - Ok((headers, payload.bytes).into_response()) -} diff --git a/api/src/presentation/http/public.rs b/api/src/presentation/http/public.rs deleted file mode 100644 index e51c0ae4..00000000 --- a/api/src/presentation/http/public.rs +++ /dev/null @@ -1,265 +0,0 @@ -use axum::{ - Json, Router, - extract::{Path, State}, - http::{HeaderMap, StatusCode}, - routing::{get, post}, -}; -use serde::Serialize; -use utoipa::ToSchema; -use uuid::Uuid; - -use crate::application::dto::public::PublicDocumentSummaryDto; -use crate::application::services::errors::ServiceError; -use crate::presentation::context::AppContext; -use crate::presentation::http::auth::Bearer; -use crate::presentation::http::documents::Document; -use crate::presentation::http::workspace_scope; - -// Uses AppContext as router state - -#[derive(Debug, Serialize, ToSchema)] -pub struct PublishResponse { - pub slug: String, - pub public_url: String, -} - -fn map_public_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(inner) => { - tracing::error!(error = ?inner, "public_service_error"); - StatusCode::INTERNAL_SERVER_ERROR - } - } -} - -#[utoipa::path( - post, - path = "/api/public/documents/{id}", - tag = "Public Documents", - params(("id" = Uuid, Path, description = "Document ID")), - responses((status = 200, description = "Published", body = PublishResponse)) -)] -pub async fn publish_document( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(id): Path, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let service = ctx.public_service(); - let out = service - .publish_document(workspace_id, &permissions, id) - .await - .map_err(map_public_error)?; - Ok(Json(PublishResponse { - slug: out.slug, - public_url: out.public_url, - })) -} - -#[utoipa::path( - delete, - path = "/api/public/documents/{id}", - tag = "Public Documents", - params(("id" = Uuid, Path, description = "Document ID")), - responses((status = 204, description = "Unpublished")) -)] -pub async fn unpublish_document( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(id): Path, -) -> Result { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let ok = ctx - .public_service() - .unpublish_document(workspace_id, &permissions, id) - .await - .map_err(map_public_error)?; - if ok { - Ok(StatusCode::NO_CONTENT) - } else { - Err(StatusCode::FORBIDDEN) - } -} - -#[utoipa::path( - get, - path = "/api/public/documents/{id}", - tag = "Public Documents", - params(("id" = Uuid, Path, description = "Document ID")), - responses((status = 200, description = "Published status", body = PublishResponse)) -)] -pub async fn get_publish_status( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(id): Path, -) -> Result, StatusCode> { - // Validate ownership - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let out = ctx - .public_service() - .get_publish_status(workspace_id, &permissions, id) - .await - .map_err(map_public_error)?; - Ok(Json(PublishResponse { - slug: out.slug, - public_url: out.public_url, - })) -} - -// Slug-based endpoints are intentionally omitted to simplify routing and match legacy pattern strictly. - -#[derive(Debug, Serialize, ToSchema)] -pub struct PublicDocumentSummary { - pub id: Uuid, - pub title: String, - pub updated_at: chrono::DateTime, - pub published_at: chrono::DateTime, -} - -#[utoipa::path( - get, - path = "/api/public/workspaces/{slug}", - tag = "Public Documents", - params(("slug" = String, Path, description = "Workspace slug")), - responses((status = 200, description = "Public documents for workspace", body = [PublicDocumentSummary])) -)] -pub async fn list_workspace_public_documents( - State(ctx): State, - Path(slug): Path, -) -> Result>, StatusCode> { - let items = ctx - .public_service() - .list_workspace_public_documents(&slug) - .await - .map_err(map_public_error)?; - Ok(Json( - items - .into_iter() - .map(|d: PublicDocumentSummaryDto| PublicDocumentSummary { - id: d.id, - title: d.title, - updated_at: d.updated_at, - published_at: d.published_at, - }) - .collect(), - )) -} - -#[utoipa::path( - get, - path = "/api/public/workspaces/{slug}/{id}", - tag = "Public Documents", - params(("slug" = String, Path, description = "Workspace slug"), ("id" = Uuid, Path, description = "Document ID")), - responses((status = 200, description = "Document metadata", body = Document)) -)] -pub async fn get_public_by_workspace_and_id( - State(ctx): State, - Path((slug, id)): Path<(String, Uuid)>, -) -> Result, StatusCode> { - let d = ctx - .public_service() - .get_public_by_workspace_and_id(&slug, id) - .await - .map_err(map_public_error)?; - Ok(Json(Document { - id: d.id, - owner_id: d.owner_id, - workspace_id: d.workspace_id, - title: d.title, - parent_id: d.parent_id, - r#type: d.doc_type, - created_at: d.created_at, - updated_at: d.updated_at, - created_by_plugin: d.created_by_plugin, - slug: d.slug, - desired_path: d.desired_path, - path: d.path, - created_by: d.created_by, - archived_at: d.archived_at, - archived_by: d.archived_by, - archived_parent_id: d.archived_parent_id, - })) -} - -#[utoipa::path( - get, - path = "/api/public/workspaces/{slug}/{id}/content", - tag = "Public Documents", - params(("slug" = String, Path, description = "Workspace slug"), ("id" = Uuid, Path, description = "Document ID")), - responses((status = 200, description = "Document content")) -)] -pub async fn get_public_content_by_workspace_and_id( - State(ctx): State, - Path((slug, id)): Path<(String, Uuid)>, -) -> Result, StatusCode> { - let content = ctx - .public_service() - .get_public_content_by_workspace_and_id(&slug, id) - .await - .map_err(map_public_error)?; - Ok(Json(serde_json::json!({"content": content, "id": id}))) -} -pub fn routes(ctx: AppContext) -> Router { - Router::new() - .route( - "/documents/:id", - post(publish_document) - .delete(unpublish_document) - .get(get_publish_status), - ) - .route("/workspaces/:slug", get(list_workspace_public_documents)) - .route("/workspaces/:slug/:id", get(get_public_by_workspace_and_id)) - .route( - "/workspaces/:slug/:id/content", - get(get_public_content_by_workspace_and_id), - ) - // legacy aliases - .route("/users/:slug", get(list_workspace_public_documents)) - .route("/users/:slug/:id", get(get_public_by_workspace_and_id)) - .route( - "/users/:slug/:id/content", - get(get_public_content_by_workspace_and_id), - ) - .with_state(ctx) -} diff --git a/api/src/presentation/http/shares.rs b/api/src/presentation/http/shares.rs deleted file mode 100644 index 1e47c400..00000000 --- a/api/src/presentation/http/shares.rs +++ /dev/null @@ -1,689 +0,0 @@ -use axum::{ - Json, Router, - extract::{Query, State}, - http::{HeaderMap, StatusCode}, - routing::{delete, get, post}, -}; -use serde::{Deserialize, Serialize}; -use utoipa::ToSchema; -use uuid::Uuid; - -use crate::application::access; -use crate::application::dto::shares::{ - ActiveShareItemDto, ApplicableShareDto, ShareBrowseResponseDto, ShareBrowseTreeItemDto, - ShareDocumentDto, ShareItemDto, ShareMountDto, -}; -use crate::application::services::errors::ServiceError; -use crate::presentation::context::{AppContext, PresentationConfig}; -use crate::presentation::http::auth; -use crate::presentation::http::auth::Bearer; -use crate::presentation::http::workspace_scope; - -fn frontend_base(cfg: &PresentationConfig) -> String { - cfg.frontend_url - .clone() - .unwrap_or_else(|| "http://localhost:3000".into()) -} - -fn build_share_url(base: &str, document_type: &str, document_id: Uuid, token: &str) -> String { - let base = base.trim_end_matches('/'); - if document_type == "folder" { - format!("{}/share/{}", base, token) - } else { - format!("{}/document/{}?token={}", base, document_id, token) - } -} - -fn share_scope(document_type: &str) -> String { - if document_type == "folder" { - "folder".to_string() - } else { - "document".to_string() - } -} - -fn map_share_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(inner) => { - tracing::error!(error = ?inner, "share_service_error"); - StatusCode::INTERNAL_SERVER_ERROR - } - } -} - -// Uses AppContext as router state - -#[derive(Debug, Deserialize, ToSchema)] -pub struct CreateShareRequest { - pub document_id: Uuid, - pub permission: Option, - pub expires_at: Option>, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct CreateShareResponse { - pub token: String, - pub url: String, -} - -#[utoipa::path( - post, - path = "/api/shares", - tag = "Sharing", - request_body = CreateShareRequest, - responses((status = 200, description = "Share link created", body = CreateShareResponse)) -)] -pub async fn create_share( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(req): Json, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let actor = access::Actor::User(user_id); - ctx.authorization() - .require_edit(&actor, req.document_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - let permission = req.permission.as_deref().unwrap_or("view"); - let service = ctx.share_service(); - let res = service - .create_share( - workspace_id, - user_id, - &permissions, - req.document_id, - permission, - req.expires_at, - ) - .await - .map_err(map_share_error)?; - let base = frontend_base(&ctx.cfg); - let url = build_share_url(&base, &res.document_type, res.document_id, &res.token); - Ok(Json(CreateShareResponse { - token: res.token, - url, - })) -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct ShareItem { - pub id: Uuid, - pub token: String, - pub permission: String, - pub expires_at: Option>, - pub url: String, - /// document | folder - pub scope: String, - /// If present, this document share was materialized from a folder share - pub parent_share_id: Option, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct CreateShareMountRequest { - pub token: String, - pub parent_folder_id: Option, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct ShareMountItem { - pub id: Uuid, - pub token: String, - pub target_document_id: Uuid, - pub target_document_type: String, - pub target_title: String, - pub permission: String, - pub parent_folder_id: Option, - pub created_at: chrono::DateTime, -} - -impl From for ShareMountItem { - fn from(d: ShareMountDto) -> Self { - ShareMountItem { - id: d.id, - token: d.token, - target_document_id: d.target_document_id, - target_document_type: d.target_document_type, - target_title: d.target_title, - permission: d.permission, - parent_folder_id: d.parent_folder_id, - created_at: d.created_at, - } - } -} - -#[utoipa::path( - get, - path = "/api/shares/documents/{id}", - tag = "Sharing", - params(("id" = Uuid, Path, description = "Document ID")), - responses((status = 200, description = "OK", body = [ShareItem])) -)] -pub async fn list_document_shares( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - axum::extract::Path(id): axum::extract::Path, -) -> Result>, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - // authorization: require edit on the document - let actor = access::Actor::User(user_id); - ctx.authorization() - .require_edit(&actor, id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - let service = ctx.share_service(); - let rows: Vec = service - .list_document_shares(workspace_id, &permissions, id) - .await - .map_err(map_share_error)?; - let base = frontend_base(&ctx.cfg); - let items: Vec = rows - .into_iter() - .map(|r| { - let ShareItemDto { - id, - token, - permission, - expires_at, - document_id, - document_type, - parent_share_id, - .. - } = r; - let url = build_share_url(&base, &document_type, document_id, &token); - ShareItem { - id, - token, - permission, - expires_at, - url, - scope: share_scope(&document_type), - parent_share_id, - } - }) - .collect(); - Ok(Json(items)) -} - -#[utoipa::path( - delete, - path = "/api/shares/{token}", - tag = "Sharing", - params(("token" = String, Path, description = "Share token")), - responses((status = 204, description = "Share link deleted")) -)] -pub async fn delete_share( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - axum::extract::Path(token): axum::extract::Path, -) -> Result { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let service = ctx.share_service(); - let meta = service - .share_document_meta(&token) - .await - .map_err(map_share_error)? - .ok_or(StatusCode::NOT_FOUND)?; - if meta.workspace_id != workspace_id { - return Err(StatusCode::FORBIDDEN); - } - let actor = access::Actor::User(user_id); - ctx.authorization() - .require_edit(&actor, meta.document_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - let ok = service - .delete_share(workspace_id, &permissions, &token) - .await - .map_err(map_share_error)?; - if ok { - Ok(StatusCode::NO_CONTENT) - } else { - Err(StatusCode::NOT_FOUND) - } -} - -#[derive(Debug, Deserialize)] -pub struct ApplicableQuery { - pub doc_id: Uuid, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct ApplicableShareItem { - pub token: String, - pub permission: String, - /// 'document' or 'folder' - pub scope: String, - pub excluded: bool, -} - -impl From for ApplicableShareItem { - fn from(d: ApplicableShareDto) -> Self { - ApplicableShareItem { - token: d.token, - permission: d.permission, - scope: d.scope, - excluded: d.excluded, - } - } -} - -#[utoipa::path(get, path = "/api/shares/applicable", tag = "Sharing", - params(("doc_id" = Uuid, Query, description = "Document ID")), - responses((status = 200, description = "Shares that include the document", body = [ApplicableShareItem])))] -pub async fn list_applicable_shares( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Query(q): Query, -) -> Result>, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - // authorize: require view on the document - let actor = access::Actor::User(user_id); - ctx.authorization() - .require_view(&actor, q.doc_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - - let service = ctx.share_service(); - let rows = service - .list_applicable(workspace_id, &permissions, q.doc_id) - .await - .map_err(map_share_error)?; - let items: Vec = rows.into_iter().map(Into::into).collect(); - Ok(Json(items)) -} - -// Share token validation for document access -#[derive(Debug, Deserialize)] -pub struct ShareTokenQuery { - pub token: String, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct ShareDocumentResponse { - pub id: Uuid, - pub title: String, - pub permission: String, - pub content: Option, -} - -impl From for ShareDocumentResponse { - fn from(d: ShareDocumentDto) -> Self { - ShareDocumentResponse { - id: d.id, - title: d.title, - permission: d.permission, - content: d.content, - } - } -} - -#[utoipa::path( - get, - path = "/api/shares/validate", - tag = "Sharing", - params(("token" = String, Query, description = "Share token")), - responses((status = 200, description = "Document info", body = ShareDocumentResponse)) -)] -pub async fn validate_share_token( - State(ctx): State, - Query(query): Query, -) -> Result, StatusCode> { - let service = ctx.share_service(); - let res = service - .validate_token(&query.token) - .await - .map_err(map_share_error)?; - let out: ShareDocumentResponse = res.map(Into::into).ok_or(StatusCode::NOT_FOUND)?; - Ok(Json(out)) -} - -// ---- List active shares for current user ---- -#[derive(Debug, Serialize, ToSchema)] -pub struct ActiveShareItem { - pub id: Uuid, - pub token: String, - pub permission: String, - pub expires_at: Option>, - pub created_at: chrono::DateTime, - pub document_id: Uuid, - pub document_title: String, - /// 'document' or 'folder' - pub document_type: String, - pub url: String, - pub parent_share_id: Option, -} - -#[utoipa::path( - get, - path = "/api/shares/active", - tag = "Sharing", - responses((status = 200, description = "Active shares", body = [ActiveShareItem])) -)] -pub async fn list_active_shares( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result>, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = crate::presentation::http::auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let service = ctx.share_service(); - let items: Vec = service - .list_active(workspace_id, &permissions) - .await - .map_err(map_share_error)?; - let base = frontend_base(&ctx.cfg); - let out: Vec = items - .into_iter() - .map(|r| { - let url = build_share_url(&base, &r.document_type, r.document_id, &r.token); - ActiveShareItem { - id: r.id, - token: r.token, - permission: r.permission, - expires_at: r.expires_at, - created_at: r.created_at, - document_id: r.document_id, - document_title: r.document_title, - document_type: r.document_type, - url, - parent_share_id: r.parent_share_id, - } - }) - .collect(); - Ok(Json(out)) -} - -#[utoipa::path( - post, - path = "/api/shares/mounts", - tag = "Sharing", - request_body = CreateShareMountRequest, - responses((status = 200, description = "Saved share mount", body = ShareMountItem)) -)] -pub async fn create_share_mount( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(req): Json, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let service = ctx.share_service(); - let item = service - .save_share_mount( - workspace_id, - user_id, - &permissions, - &req.token, - req.parent_folder_id, - ) - .await - .map_err(map_share_error)?; - Ok(Json(item.into())) -} - -#[utoipa::path( - get, - path = "/api/shares/mounts", - tag = "Sharing", - responses((status = 200, description = "Share mounts", body = [ShareMountItem])) -)] -pub async fn list_share_mounts( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result>, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let service = ctx.share_service(); - let items: Vec = service - .list_share_mounts(workspace_id, &permissions) - .await - .map_err(map_share_error)?; - Ok(Json(items.into_iter().map(Into::into).collect())) -} - -#[utoipa::path( - delete, - path = "/api/shares/mounts/{id}", - tag = "Sharing", - params(("id" = Uuid, Path, description = "Share mount ID")), - responses((status = 204, description = "Share mount removed")) -)] -pub async fn delete_share_mount( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - axum::extract::Path(id): axum::extract::Path, -) -> Result { - let bearer_token = bearer.0.clone(); - let sub = auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let service = ctx.share_service(); - let deleted = service - .delete_share_mount(workspace_id, &permissions, id) - .await - .map_err(map_share_error)?; - if deleted { - Ok(StatusCode::NO_CONTENT) - } else { - Err(StatusCode::NOT_FOUND) - } -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct ShareBrowseTreeItem { - pub id: Uuid, - pub title: String, - pub parent_id: Option, - #[schema(example = "document")] - pub r#type: String, - pub created_at: chrono::DateTime, - pub updated_at: chrono::DateTime, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct ShareBrowseResponse { - pub tree: Vec, -} - -impl From for ShareBrowseTreeItem { - fn from(t: ShareBrowseTreeItemDto) -> Self { - ShareBrowseTreeItem { - id: t.id, - title: t.title, - parent_id: t.parent_id, - r#type: t.r#type, - created_at: t.created_at, - updated_at: t.updated_at, - } - } -} - -impl From for ShareBrowseResponse { - fn from(d: ShareBrowseResponseDto) -> Self { - ShareBrowseResponse { - tree: d.tree.into_iter().map(Into::into).collect(), - } - } -} - -#[utoipa::path(get, path = "/api/shares/browse", tag = "Sharing", - params(("token" = String, Query, description = "Share token")), - responses((status = 200, description = "Share tree", body = ShareBrowseResponse)))] -pub async fn browse_share( - State(ctx): State, - Query(query): Query, -) -> Result, StatusCode> { - let service = ctx.share_service(); - let res = service - .browse_share(&query.token) - .await - .map_err(map_share_error)?; - let out: ShareBrowseResponse = res.map(Into::into).ok_or(StatusCode::NOT_FOUND)?; - Ok(Json(out)) -} - -// Helper function to validate share token (for WebSocket) -// validate_share_token_for_doc was moved to `access` layer via ShareAccessPort; no local implementation here - -pub fn routes(ctx: AppContext) -> Router { - Router::new() - .route("/shares", post(create_share)) - .route( - "/shares/mounts", - post(create_share_mount).get(list_share_mounts), - ) - .route("/shares/browse", get(browse_share)) - .route("/shares/validate", get(validate_share_token)) - .route("/shares/documents/:id", get(list_document_shares)) - .route("/shares/applicable", get(list_applicable_shares)) - .route( - "/shares/folders/:token/materialize", - post(materialize_folder_share), - ) - .route("/shares/active", get(list_active_shares)) - .route("/shares/mounts/:id", delete(delete_share_mount)) - .route("/shares/:token", delete(delete_share)) - .with_state(ctx) -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct MaterializeResponse { - pub created: i64, -} - -#[utoipa::path(post, path = "/api/shares/folders/{token}/materialize", tag = "Sharing", - params(("token" = String, Path, description = "Folder share token")), - responses((status = 200, description = "Created doc shares", body = MaterializeResponse)) -)] -pub async fn materialize_folder_share( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - axum::extract::Path(token): axum::extract::Path, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = uuid::Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let service = ctx.share_service(); - let meta = service - .share_document_meta(&token) - .await - .map_err(map_share_error)? - .ok_or(StatusCode::NOT_FOUND)?; - if meta.workspace_id != workspace_id { - return Err(StatusCode::FORBIDDEN); - } - let actor = access::Actor::User(user_id); - ctx.authorization() - .require_edit(&actor, meta.document_id) - .await - .map_err(|_| StatusCode::FORBIDDEN)?; - let created = service - .materialize_folder_share(workspace_id, user_id, &permissions, &token) - .await - .map_err(map_share_error)?; - Ok(Json(MaterializeResponse { created })) -} diff --git a/api/src/presentation/http/shortcuts.rs b/api/src/presentation/http/shortcuts.rs deleted file mode 100644 index c9ed4a26..00000000 --- a/api/src/presentation/http/shortcuts.rs +++ /dev/null @@ -1,154 +0,0 @@ -use axum::{ - Json, Router, - extract::State, - http::{HeaderMap, StatusCode}, - routing::get, -}; -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; -use serde_json::{Map, Value}; -use utoipa::ToSchema; -use uuid::Uuid; - -use crate::application::dto::user_shortcuts::UserShortcutProfileDto; -use crate::application::services::errors::ServiceError; -use crate::presentation::context::AppContext; -use crate::presentation::http::{ - auth::{self, Bearer}, - workspace_scope, -}; -use tracing::error; - -#[derive(Debug, Serialize, ToSchema)] -pub struct UserShortcutResponse { - #[schema(value_type = Object)] - pub bindings: Value, - #[schema(example = "")] - pub leader_key: Option, - pub updated_at: Option>, -} - -impl UserShortcutResponse { - fn empty() -> Self { - Self { - bindings: Value::Object(Map::new()), - leader_key: None, - updated_at: None, - } - } -} - -impl From for UserShortcutResponse { - fn from(value: UserShortcutProfileDto) -> Self { - Self { - bindings: value.bindings, - leader_key: value.leader_key, - updated_at: Some(value.updated_at), - } - } -} - -fn map_shortcut_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(inner) => { - error!(error = ?inner, "user_shortcut_service_error"); - StatusCode::INTERNAL_SERVER_ERROR - } - } -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct UpdateUserShortcutRequest { - #[schema(value_type = Object)] - #[serde(default = "Value::default")] - pub bindings: Value, - #[schema(example = "")] - pub leader_key: Option, -} - -#[utoipa::path( - get, - path = "/api/me/shortcuts", - tag = "Auth", - responses((status = 200, body = UserShortcutResponse)) -)] -pub async fn get_user_shortcuts( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = auth::validate_bearer(&ctx, Bearer(bearer_token.clone())).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let service = ctx.user_shortcut_service(); - let profile = service - .get_profile(workspace_id, user_id, &permissions) - .await - .map_err(map_shortcut_error)?; - let response = profile - .map(UserShortcutResponse::from) - .unwrap_or_else(UserShortcutResponse::empty); - Ok(Json(response)) -} - -#[utoipa::path( - put, - path = "/api/me/shortcuts", - tag = "Auth", - request_body = UpdateUserShortcutRequest, - responses((status = 200, body = UserShortcutResponse)) -)] -pub async fn update_user_shortcuts( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(payload): Json, -) -> Result, StatusCode> { - let bearer_token = bearer.0.clone(); - let sub = auth::validate_bearer(&ctx, Bearer(bearer_token.clone())).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - user_id, - ) - .await?; - let permissions = - workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, user_id).await?; - let service = ctx.user_shortcut_service(); - let result = service - .update_profile( - workspace_id, - user_id, - &permissions, - payload.bindings, - payload.leader_key, - ) - .await - .map_err(map_shortcut_error)?; - Ok(Json(UserShortcutResponse::from(result))) -} - -pub fn routes(ctx: AppContext) -> Router { - Router::new() - .route( - "/me/shortcuts", - get(get_user_shortcuts).put(update_user_shortcuts), - ) - .with_state(ctx) -} diff --git a/api/src/presentation/http/storage_ingest.rs b/api/src/presentation/http/storage_ingest.rs deleted file mode 100644 index 6a961a3d..00000000 --- a/api/src/presentation/http/storage_ingest.rs +++ /dev/null @@ -1,125 +0,0 @@ -use axum::{Json, Router, extract::State, http::HeaderMap, routing::post}; -use serde::{Deserialize, Serialize}; -use serde_json::Value; -use utoipa::ToSchema; -use uuid::Uuid; - -use crate::application::ports::storage_ingest_queue::{StorageIngestKind, StorageIngestQueue}; -use crate::application::services::storage_ingest::normalize_repo_path; -use crate::presentation::context::AppContext; -use crate::presentation::http::auth::{self, Bearer}; -use crate::presentation::http::workspace_scope; - -#[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] -pub struct IngestBatchRequest { - pub events: Vec, -} - -#[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] -#[serde(rename_all = "lowercase")] -pub enum IngestKindParam { - Upsert, - Delete, -} - -impl From for StorageIngestKind { - fn from(value: IngestKindParam) -> Self { - match value { - IngestKindParam::Upsert => StorageIngestKind::Upsert, - IngestKindParam::Delete => StorageIngestKind::Delete, - } - } -} - -#[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] -pub struct IngestEventRequest { - pub repo_path: String, - pub kind: IngestKindParam, - pub backend: Option, - pub content_hash: Option, - pub payload: Option, -} - -pub fn routes(ctx: AppContext) -> Router { - Router::new() - .route("/storage/ingest", post(enqueue_ingest_events)) - .with_state(ctx) -} - -#[utoipa::path( - post, - path = "/api/storage/ingest", - tag = "Storage", - request_body = IngestBatchRequest, - responses((status = 202, description = "Events enqueued"), (status = 400, description = "Invalid request")), -)] -pub async fn enqueue_ingest_events( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Json(body): Json, -) -> Result { - let bearer_token = bearer.0.clone(); - let sub = auth::validate_bearer(&ctx, Bearer(bearer_token.clone())) - .await? - .parse::() - .map_err(|_| axum::http::StatusCode::UNAUTHORIZED)?; - let workspace_id = workspace_scope::resolve_active_workspace_id( - &ctx, - &headers, - Some(bearer_token.as_str()), - sub, - ) - .await - .map_err(|_| axum::http::StatusCode::FORBIDDEN)?; - let permissions = workspace_scope::resolve_workspace_permissions(&ctx, workspace_id, sub) - .await - .map_err(|_| axum::http::StatusCode::FORBIDDEN)?; - let queue = ctx.storage_ingest_queue(); - let snapshot = permissions.to_vec(); - enqueue_batch(queue.as_ref(), workspace_id, sub, &snapshot, &body) - .await - .map(|count| { - tracing::info!(user_id = %sub, events = count, "storage_ingest_events_enqueued"); - axum::http::StatusCode::ACCEPTED - }) - .map_err(|err| { - tracing::error!(error = ?err, "storage_ingest_enqueue_failed"); - axum::http::StatusCode::INTERNAL_SERVER_ERROR - }) -} - -async fn enqueue_batch( - queue: &dyn StorageIngestQueue, - workspace_id: Uuid, - actor_id: Uuid, - permission_snapshot: &[String], - body: &IngestBatchRequest, -) -> anyhow::Result { - let mut processed = 0usize; - for event in &body.events { - let repo_path = event.repo_path.trim(); - if repo_path.is_empty() { - continue; - } - let Some(clean_repo) = normalize_repo_path(repo_path) else { - tracing::warn!(repo_path, "storage_ingest_invalid_repo_path_request"); - continue; - }; - queue - .enqueue_event( - workspace_id, - actor_id, - Some(actor_id), - &clean_repo, - event.backend.as_deref().unwrap_or("api"), - event.kind.clone().into(), - event.content_hash.as_deref(), - event.payload.clone(), - permission_snapshot, - ) - .await?; - processed += 1; - } - Ok(processed) -} diff --git a/api/src/presentation/http/tags.rs b/api/src/presentation/http/tags.rs deleted file mode 100644 index b85e0899..00000000 --- a/api/src/presentation/http/tags.rs +++ /dev/null @@ -1,77 +0,0 @@ -use axum::{ - Json, Router, - extract::{Query, State}, - http::StatusCode, - routing::get, -}; -use serde::Serialize; -use tracing::error; -use utoipa::ToSchema; -use uuid::Uuid; - -use crate::application::dto::tags::TagItemDto; -use crate::application::services::errors::ServiceError; -use crate::domain::workspaces::permissions::PERM_DOC_VIEW; -use crate::presentation::context::AppContext; -use crate::presentation::http::{ - auth::{self, Bearer}, - workspace_scope, -}; - -#[derive(Serialize, ToSchema)] -pub struct TagItem { - pub name: String, - pub count: i64, -} - -impl From for TagItem { - fn from(d: TagItemDto) -> Self { - TagItem { - name: d.name, - count: d.count, - } - } -} - -fn map_tag_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(inner) => { - error!(error = ?inner, "tag_service_error"); - StatusCode::INTERNAL_SERVER_ERROR - } - } -} - -#[utoipa::path(get, path = "/api/tags", tag = "Tags", - params(("q" = Option, Query, description = "Filter contains")), - responses((status = 200, body = [TagItem])))] -pub async fn list_tags( - State(ctx): State, - bearer: Bearer, - headers: axum::http::HeaderMap, - q: Option>>, -) -> Result>, StatusCode> { - let sub = auth::validate_bearer_public(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace_id = - workspace_scope::resolve_active_workspace_id(&ctx, &headers, None, user_id).await?; - workspace_scope::ensure_workspace_permission(&ctx, workspace_id, user_id, PERM_DOC_VIEW) - .await?; - let filter = q.and_then(|Query(m)| m.get("q").cloned()); - let service = ctx.tag_service(); - let items: Vec = service - .list(workspace_id, filter) - .await - .map_err(map_tag_error)?; - let out: Vec = items.into_iter().map(Into::into).collect(); - Ok(Json(out)) -} - -pub fn routes(ctx: AppContext) -> Router { - Router::new().route("/tags", get(list_tags)).with_state(ctx) -} diff --git a/api/src/presentation/http/workspaces.rs b/api/src/presentation/http/workspaces.rs deleted file mode 100644 index 9e2a6f7a..00000000 --- a/api/src/presentation/http/workspaces.rs +++ /dev/null @@ -1,1163 +0,0 @@ -use axum::routing::{delete, get, patch, post}; -use axum::{ - Json, Router, - response::{IntoResponse, Response}, -}; -use axum::{ - extract::{Path, Query, State}, - http::{HeaderMap, HeaderValue, StatusCode}, -}; -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; -use serde_json::{Value, json}; -use utoipa::ToSchema; -use uuid::Uuid; - -use crate::application::access; -use crate::application::ports::workspace_repository::{ - WorkspaceInvitationRecord, WorkspaceListItem, WorkspaceMemberDetail, WorkspaceRoleRecord, -}; -use crate::application::services::auth::user_sessions::SessionMetadata; -use crate::application::services::errors::ServiceError; -use crate::domain::workspaces::permissions::{ - PERM_DOC_VIEW, PERM_MEMBER_INVITE, PERM_MEMBER_REMOVE, PERM_MEMBER_UPDATE_ROLE, - PERM_MEMBER_VIEW, PERM_WORKSPACE_DELETE, PERM_WORKSPACE_UPDATE, -}; -use crate::presentation::context::AppContext; -use crate::presentation::http::auth::{ - self, Bearer, apply_session_cookies, extract_client_ip, extract_refresh_token, - extract_user_agent, -}; -#[allow(unused_imports)] -use crate::presentation::http::documents::DocumentDownloadBinary; -use crate::presentation::http::documents::DownloadFormat; - -pub fn routes(ctx: AppContext) -> Router { - Router::new() - .route("/workspaces", get(list_workspaces).post(create_workspace)) - .route( - "/workspaces/:id", - get(get_workspace_detail) - .put(update_workspace) - .delete(delete_workspace), - ) - .route("/workspaces/:id/leave", post(leave_workspace)) - .route("/workspaces/:id/switch", post(switch_workspace)) - .route("/workspaces/:id/members", get(list_members)) - .route( - "/workspaces/:id/members/:user_id", - patch(update_member_role).delete(remove_member), - ) - .route( - "/workspaces/:id/permissions", - get(get_workspace_permissions), - ) - .route("/workspaces/:id/roles", get(list_roles).post(create_role)) - .route( - "/workspaces/:id/roles/:role_id", - patch(update_role).delete(delete_role), - ) - .route( - "/workspaces/:id/invitations", - get(list_invitations).post(create_invitation), - ) - .route( - "/workspaces/:id/invitations/:invitation_id", - delete(revoke_invitation), - ) - .route("/workspaces/:id/download", get(download_workspace_archive)) - .route( - "/workspace-invitations/:token/accept", - post(accept_invitation), - ) - .with_state(ctx) -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct WorkspaceResponse { - pub id: Uuid, - pub name: String, - pub slug: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub icon: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub description: Option, - pub is_personal: bool, - pub role_kind: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub system_role: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub custom_role_id: Option, - pub is_default: bool, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct CreateWorkspaceRequest { - pub name: String, - pub icon: Option, - pub description: Option, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct WorkspaceMemberResponse { - pub workspace_id: Uuid, - pub user_id: Uuid, - pub email: String, - pub name: String, - pub role_kind: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub system_role: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub custom_role_id: Option, - pub is_default: bool, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct UpdateMemberRoleRequest { - pub role_kind: String, - pub system_role: Option, - pub custom_role_id: Option, -} - -#[derive(Debug, Serialize, Deserialize, ToSchema, Clone)] -pub struct PermissionOverridePayload { - pub permission: String, - pub allowed: bool, -} - -#[derive(Debug, Deserialize, ToSchema, Default)] -pub struct DownloadWorkspaceQuery { - #[serde(default)] - pub format: DownloadFormat, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct WorkspaceRoleResponse { - pub id: Uuid, - pub workspace_id: Uuid, - pub name: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub description: Option, - pub base_role: String, - pub priority: i32, - pub overrides: Vec, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct WorkspaceInvitationResponse { - pub id: Uuid, - pub workspace_id: Uuid, - pub email: String, - pub role_kind: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub system_role: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub custom_role_id: Option, - pub invited_by: Uuid, - pub token: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub expires_at: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - pub accepted_by: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub accepted_at: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - pub revoked_at: Option>, - pub created_at: DateTime, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct CreateWorkspaceRoleRequest { - pub name: String, - pub base_role: String, - pub description: Option, - pub priority: Option, - pub overrides: Option>, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct UpdateWorkspaceRoleRequest { - pub name: Option, - pub base_role: Option, - pub description: Option, - pub priority: Option, - pub overrides: Option>, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct UpdateWorkspaceRequest { - pub name: Option, - pub icon: Option, - pub description: Option, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct SwitchWorkspaceResponse { - pub access_token: String, -} - -#[derive(Debug, Serialize, ToSchema)] -pub struct WorkspacePermissionsResponse { - pub workspace_id: Uuid, - pub permissions: Vec, -} - -#[derive(Debug, Deserialize, ToSchema)] -pub struct CreateWorkspaceInvitationRequest { - pub email: String, - pub role_kind: String, - pub system_role: Option, - pub custom_role_id: Option, - pub expires_at: Option>, -} - -fn to_response( - row: crate::application::ports::workspace_repository::WorkspaceListItem, -) -> WorkspaceResponse { - WorkspaceResponse { - id: row.id, - name: row.name, - slug: row.slug, - icon: row.icon, - description: row.description, - is_personal: row.is_personal, - role_kind: row.role_kind, - system_role: row.system_role, - custom_role_id: row.custom_role_id, - is_default: row.is_default, - } -} - -pub(crate) fn map_service_error(err: ServiceError) -> StatusCode { - match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::Conflict => StatusCode::CONFLICT, - ServiceError::NotFound => StatusCode::NOT_FOUND, - ServiceError::BadRequest(_) => StatusCode::BAD_REQUEST, - ServiceError::Unexpected(inner) => { - tracing::error!(error = ?inner, "workspace_service_error"); - StatusCode::INTERNAL_SERVER_ERROR - } - } -} - -fn member_response_from(detail: WorkspaceMemberDetail) -> WorkspaceMemberResponse { - WorkspaceMemberResponse { - workspace_id: detail.workspace_id, - user_id: detail.user_id, - email: detail.user_email, - name: detail.user_name, - role_kind: detail.role_kind, - system_role: detail.system_role, - custom_role_id: detail.custom_role_id, - is_default: detail.is_default, - } -} - -fn role_response_from(record: WorkspaceRoleRecord) -> WorkspaceRoleResponse { - WorkspaceRoleResponse { - id: record.id, - workspace_id: record.workspace_id, - name: record.name, - description: record.description, - base_role: record.base_role, - priority: record.priority, - overrides: record - .overrides - .into_iter() - .map(|(permission, allowed)| PermissionOverridePayload { - permission, - allowed, - }) - .collect(), - } -} - -fn invitation_response_from(record: WorkspaceInvitationRecord) -> WorkspaceInvitationResponse { - WorkspaceInvitationResponse { - id: record.id, - workspace_id: record.workspace_id, - email: record.email, - role_kind: record.role_kind, - system_role: record.system_role, - custom_role_id: record.custom_role_id, - invited_by: record.invited_by, - token: record.token, - expires_at: record.expires_at, - accepted_by: record.accepted_by, - accepted_at: record.accepted_at, - revoked_at: record.revoked_at, - created_at: record.created_at, - } -} - -async fn require_permission( - ctx: &AppContext, - workspace_id: Uuid, - user_id: Uuid, - permission: &str, -) -> Result<(), StatusCode> { - let set = ctx - .workspace_service() - .resolve_permission_set(workspace_id, user_id) - .await - .map_err(map_service_error)? - .ok_or(StatusCode::FORBIDDEN)?; - if set.allows(permission) { - Ok(()) - } else { - Err(StatusCode::FORBIDDEN) - } -} - -async fn require_any_permission( - ctx: &AppContext, - workspace_id: Uuid, - user_id: Uuid, - permissions: &[&str], -) -> Result<(), StatusCode> { - if permissions.is_empty() { - return Err(StatusCode::FORBIDDEN); - } - let set = ctx - .workspace_service() - .resolve_permission_set(workspace_id, user_id) - .await - .map_err(map_service_error)? - .ok_or(StatusCode::FORBIDDEN)?; - if permissions.iter().any(|perm| set.allows(perm)) { - Ok(()) - } else { - Err(StatusCode::FORBIDDEN) - } -} - -fn validate_base_role(role: &str) -> bool { - matches!(role, "viewer" | "editor" | "admin") -} - -fn normalize_overrides( - overrides: Option>, -) -> Result, StatusCode> { - let mut out = Vec::new(); - if let Some(items) = overrides { - for item in items { - let perm = item.permission.trim(); - if perm.is_empty() { - return Err(StatusCode::BAD_REQUEST); - } - out.push((perm.to_string(), item.allowed)); - } - } - Ok(out) -} - -#[utoipa::path(get, path = "/api/workspaces", tag = "Workspaces", responses((status = 200, body = [WorkspaceResponse])))] -pub async fn list_workspaces( - State(ctx): State, - bearer: Bearer, -) -> Result>, StatusCode> { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let items = ctx - .workspace_service() - .list_for_user(user_id) - .await - .map_err(map_service_error)? - .into_iter() - .map(to_response) - .collect(); - Ok(Json(items)) -} - -#[utoipa::path( - get, - path = "/api/workspaces/{id}/members", - tag = "Workspaces", - params(("id" = Uuid, Path, description = "Workspace ID")), - responses((status = 200, body = [WorkspaceMemberResponse])) -)] -pub async fn list_members( - State(ctx): State, - bearer: Bearer, - Path(id): Path, -) -> Result>, StatusCode> { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_permission(&ctx, id, user_id, PERM_MEMBER_VIEW).await?; - let members = ctx - .workspace_service() - .list_members(id) - .await - .map_err(map_service_error)? - .into_iter() - .map(member_response_from) - .collect(); - Ok(Json(members)) -} - -#[utoipa::path( - get, - path = "/api/workspaces/{id}/roles", - tag = "Workspaces", - params(("id" = Uuid, Path, description = "Workspace ID")), - responses((status = 200, body = [WorkspaceRoleResponse])) -)] -pub async fn list_roles( - State(ctx): State, - bearer: Bearer, - Path(id): Path, -) -> Result>, StatusCode> { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_any_permission( - &ctx, - id, - user_id, - &[ - PERM_MEMBER_VIEW, - PERM_MEMBER_UPDATE_ROLE, - PERM_MEMBER_INVITE, - ], - ) - .await?; - let roles = ctx - .workspace_service() - .list_roles(id) - .await - .map_err(map_service_error)? - .into_iter() - .map(role_response_from) - .collect(); - Ok(Json(roles)) -} - -#[utoipa::path( - get, - path = "/api/workspaces/{id}/invitations", - tag = "Workspaces", - params(("id" = Uuid, Path, description = "Workspace ID")), - responses((status = 200, body = [WorkspaceInvitationResponse])) -)] -pub async fn list_invitations( - State(ctx): State, - bearer: Bearer, - Path(id): Path, -) -> Result>, StatusCode> { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_permission(&ctx, id, user_id, PERM_MEMBER_INVITE).await?; - let invitations = ctx - .workspace_service() - .list_invitations(id) - .await - .map_err(map_service_error)? - .into_iter() - .map(invitation_response_from) - .collect(); - Ok(Json(invitations)) -} - -#[utoipa::path( - post, - path = "/api/workspaces/{id}/invitations", - tag = "Workspaces", - params(("id" = Uuid, Path, description = "Workspace ID")), - request_body = CreateWorkspaceInvitationRequest, - responses((status = 200, body = WorkspaceInvitationResponse)) -)] -pub async fn create_invitation( - State(ctx): State, - bearer: Bearer, - Path(id): Path, - Json(body): Json, -) -> Result, StatusCode> { - if body.email.trim().is_empty() { - return Err(StatusCode::BAD_REQUEST); - } - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_permission(&ctx, id, user_id, PERM_MEMBER_INVITE).await?; - let record = ctx - .workspace_service() - .create_invitation( - id, - user_id, - &body.email, - body.role_kind.as_str(), - body.system_role.as_deref(), - body.custom_role_id, - body.expires_at, - ) - .await - .map_err(map_service_error)?; - Ok(Json(invitation_response_from(record))) -} - -#[utoipa::path( - delete, - path = "/api/workspaces/{id}/invitations/{invitation_id}", - tag = "Workspaces", - params( - ("id" = Uuid, Path, description = "Workspace ID"), - ("invitation_id" = Uuid, Path, description = "Invitation ID"), - ), - responses((status = 200, body = WorkspaceInvitationResponse)) -)] -pub async fn revoke_invitation( - State(ctx): State, - bearer: Bearer, - Path((workspace_id, invitation_id)): Path<(Uuid, Uuid)>, -) -> Result, StatusCode> { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_permission(&ctx, workspace_id, user_id, PERM_MEMBER_INVITE).await?; - let record = ctx - .workspace_service() - .revoke_invitation(workspace_id, invitation_id) - .await - .map_err(map_service_error)?; - Ok(Json(invitation_response_from(record))) -} - -#[utoipa::path( - get, - path = "/api/workspaces/{id}/download", - tag = "Workspaces", - params( - ("id" = Uuid, Path, description = "Workspace ID"), - ("format" = Option, Query, description = "Download format (archive only)") - ), - responses( - (status = 200, description = "Workspace download", body = DocumentDownloadBinary, content_type = "application/octet-stream"), - (status = 401, description = "Unauthorized"), - (status = 404, description = "Workspace not found") - ) -)] -pub async fn download_workspace_archive( - State(ctx): State, - bearer: Bearer, - Path(id): Path, - Query(params): Query, -) -> Result)> { - let error_response = |status: StatusCode, code: &str, message: String| { - ( - status, - Json(json!({ - "error": code, - "message": message, - })), - ) - }; - - let sub = auth::validate_bearer(&ctx, bearer) - .await - .map_err(|status| error_response(status, "unauthorized", "Unauthorized".to_string()))?; - let user_id = Uuid::parse_str(&sub).map_err(|_| { - error_response( - StatusCode::UNAUTHORIZED, - "unauthorized", - "Unauthorized".to_string(), - ) - })?; - - require_permission(&ctx, id, user_id, PERM_DOC_VIEW) - .await - .map_err(|status| error_response(status, "forbidden", "Forbidden".to_string()))?; - - let workspace = ctx - .workspace_service() - .get_workspace(id) - .await - .map_err(|err| { - let status = map_service_error(err); - let (code, message) = if status == StatusCode::NOT_FOUND { - ("not_found", "Workspace not found".to_string()) - } else { - ("internal", "Failed to load workspace".to_string()) - }; - error_response(status, code, message) - })? - .ok_or_else(|| { - error_response( - StatusCode::NOT_FOUND, - "not_found", - "Workspace not found".to_string(), - ) - })?; - - let actor = access::Actor::User(user_id); - let download = ctx - .document_service() - .download_workspace_root(&actor, id, &workspace.name, params.format.into()) - .await - .map_err(|err| match err { - ServiceError::Unauthorized | ServiceError::TokenExpired | ServiceError::Forbidden => { - error_response(StatusCode::FORBIDDEN, "forbidden", "Forbidden".to_string()) - } - ServiceError::Conflict | ServiceError::NotFound => error_response( - StatusCode::NOT_FOUND, - "not_found", - "Workspace not found".to_string(), - ), - ServiceError::BadRequest(_) => error_response( - StatusCode::BAD_REQUEST, - "bad_request", - "Invalid download request".to_string(), - ), - ServiceError::Unexpected(inner) => { - tracing::error!(error = ?inner, workspace_id = %id, "workspace_download_failed"); - error_response( - StatusCode::INTERNAL_SERVER_ERROR, - "internal", - "Failed to prepare download".to_string(), - ) - } - })?; - - let mut headers = HeaderMap::new(); - let content_type = HeaderValue::from_str(&download.content_type).map_err(|_| { - error_response( - StatusCode::INTERNAL_SERVER_ERROR, - "invalid_header", - "Failed to prepare download headers".to_string(), - ) - })?; - headers.insert(axum::http::header::CONTENT_TYPE, content_type); - headers.insert( - axum::http::header::HeaderName::from_static("x-content-type-options"), - HeaderValue::from_static("nosniff"), - ); - let disposition = format!("attachment; filename=\"{}\"", download.filename); - let content_disposition = HeaderValue::from_str(&disposition).map_err(|_| { - error_response( - StatusCode::INTERNAL_SERVER_ERROR, - "invalid_header", - "Failed to prepare download headers".to_string(), - ) - })?; - headers.insert(axum::http::header::CONTENT_DISPOSITION, content_disposition); - - Ok((headers, download.bytes).into_response()) -} - -#[utoipa::path( - post, - path = "/api/workspaces/{id}/roles", - tag = "Workspaces", - params(("id" = Uuid, Path, description = "Workspace ID")), - request_body = CreateWorkspaceRoleRequest, - responses((status = 200, body = WorkspaceRoleResponse)) -)] -pub async fn create_role( - State(ctx): State, - bearer: Bearer, - Path(id): Path, - Json(body): Json, -) -> Result, StatusCode> { - if body.name.trim().is_empty() || !validate_base_role(body.base_role.as_str()) { - return Err(StatusCode::BAD_REQUEST); - } - let overrides = normalize_overrides(body.overrides)?; - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_permission(&ctx, id, user_id, PERM_MEMBER_UPDATE_ROLE).await?; - let record = ctx - .workspace_service() - .create_role( - id, - user_id, - body.name.trim(), - body.base_role.trim(), - body.description.as_deref(), - body.priority.unwrap_or(0), - &overrides, - ) - .await - .map_err(map_service_error)?; - Ok(Json(role_response_from(record))) -} - -#[utoipa::path( - patch, - path = "/api/workspaces/{id}/roles/{role_id}", - tag = "Workspaces", - params( - ("id" = Uuid, Path, description = "Workspace ID"), - ("role_id" = Uuid, Path, description = "Role ID"), - ), - request_body = UpdateWorkspaceRoleRequest, - responses((status = 200, body = WorkspaceRoleResponse)) -)] -pub async fn update_role( - State(ctx): State, - bearer: Bearer, - Path((workspace_id, role_id)): Path<(Uuid, Uuid)>, - Json(body): Json, -) -> Result, StatusCode> { - if let Some(base) = body.base_role.as_deref() { - if !validate_base_role(base) { - return Err(StatusCode::BAD_REQUEST); - } - } - let overrides_vec = normalize_overrides(body.overrides.clone())?; - let overrides_opt = if body.overrides.is_some() { - Some(overrides_vec.as_slice()) - } else { - None - }; - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_permission(&ctx, workspace_id, user_id, PERM_MEMBER_UPDATE_ROLE).await?; - let mut record = ctx - .workspace_service() - .update_role( - workspace_id, - user_id, - role_id, - body.name.as_deref(), - body.base_role.as_deref(), - body.description.as_deref(), - body.priority, - overrides_opt, - ) - .await - .map_err(map_service_error)?; - if body.overrides.is_some() { - record.overrides = overrides_vec; - } - Ok(Json(role_response_from(record))) -} - -#[utoipa::path( - delete, - path = "/api/workspaces/{id}/roles/{role_id}", - tag = "Workspaces", - params( - ("id" = Uuid, Path, description = "Workspace ID"), - ("role_id" = Uuid, Path, description = "Role ID"), - ), - responses((status = 204)) -)] -pub async fn delete_role( - State(ctx): State, - bearer: Bearer, - Path((workspace_id, role_id)): Path<(Uuid, Uuid)>, -) -> Result { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_permission(&ctx, workspace_id, user_id, PERM_MEMBER_UPDATE_ROLE).await?; - ctx.workspace_service() - .delete_role(workspace_id, role_id) - .await - .map_err(map_service_error)?; - Ok(StatusCode::NO_CONTENT) -} - -#[utoipa::path(post, path = "/api/workspaces", tag = "Workspaces", request_body = CreateWorkspaceRequest, responses((status = 200, body = WorkspaceResponse)))] -pub async fn create_workspace( - State(ctx): State, - bearer: Bearer, - Json(payload): Json, -) -> Result, StatusCode> { - if payload.name.trim().is_empty() { - return Err(StatusCode::BAD_REQUEST); - } - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspace = ctx - .workspace_service() - .create_workspace( - user_id, - payload.name.trim(), - payload.icon.as_deref(), - payload.description.as_deref(), - ) - .await - .map_err(map_service_error)?; - let memberships = ctx - .workspace_service() - .list_for_user(user_id) - .await - .map_err(map_service_error)?; - let created = memberships - .into_iter() - .find(|item| item.id == workspace.id) - .unwrap_or(WorkspaceListItem { - id: workspace.id, - name: workspace.name, - slug: workspace.slug, - icon: workspace.icon, - description: workspace.description, - is_personal: workspace.is_personal, - role_kind: "system".to_string(), - system_role: Some("owner".to_string()), - custom_role_id: None, - is_default: false, - }); - Ok(Json(to_response(created))) -} - -#[utoipa::path( - get, - path = "/api/workspaces/{id}", - tag = "Workspaces", - params(("id" = Uuid, Path, description = "Workspace ID")), - responses((status = 200, body = WorkspaceResponse)) -)] -pub async fn get_workspace_detail( - State(ctx): State, - bearer: Bearer, - Path(id): Path, -) -> Result, StatusCode> { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let workspaces = ctx - .workspace_service() - .list_for_user(user_id) - .await - .map_err(map_service_error)?; - let workspace = workspaces - .into_iter() - .find(|ws| ws.id == id) - .ok_or(StatusCode::NOT_FOUND)?; - Ok(Json(to_response(workspace))) -} - -#[utoipa::path( - put, - path = "/api/workspaces/{id}", - tag = "Workspaces", - params(("id" = Uuid, Path, description = "Workspace ID")), - request_body = UpdateWorkspaceRequest, - responses((status = 200, body = WorkspaceResponse)) -)] -pub async fn update_workspace( - State(ctx): State, - bearer: Bearer, - Path(id): Path, - Json(payload): Json, -) -> Result, StatusCode> { - if let Some(name) = payload.name.as_deref() { - if name.trim().is_empty() { - return Err(StatusCode::BAD_REQUEST); - } - } - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_permission(&ctx, id, user_id, PERM_WORKSPACE_UPDATE).await?; - let normalized_name = payload - .name - .as_ref() - .map(|value| value.trim()) - .filter(|value| !value.is_empty()) - .map(|value| value.to_string()); - let normalized_icon = payload - .icon - .as_ref() - .map(|value| value.trim()) - .map(|value| value.to_string()); - let normalized_description = payload - .description - .as_ref() - .map(|value| value.trim()) - .map(|value| value.to_string()); - let updated = ctx - .workspace_service() - .update_workspace( - id, - normalized_name.as_deref(), - normalized_icon.as_deref(), - normalized_description.as_deref(), - ) - .await - .map_err(map_service_error)? - .ok_or(StatusCode::NOT_FOUND)?; - - // Refresh membership info to include role and default flags - let memberships = ctx - .workspace_service() - .list_for_user(user_id) - .await - .map_err(map_service_error)?; - let mut membership = memberships - .into_iter() - .find(|ws| ws.id == id) - .ok_or(StatusCode::FORBIDDEN)?; - membership.name = updated.name; - membership.icon = updated.icon; - membership.description = updated.description; - membership.slug = updated.slug; - Ok(Json(to_response(membership))) -} - -#[utoipa::path( - delete, - path = "/api/workspaces/{id}", - tag = "Workspaces", - params(("id" = Uuid, Path, description = "Workspace ID")), - responses((status = 204)) -)] -pub async fn delete_workspace( - State(ctx): State, - bearer: Bearer, - Path(id): Path, -) -> Result { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_permission(&ctx, id, user_id, PERM_WORKSPACE_DELETE).await?; - let workspace = ctx - .workspace_service() - .get_workspace(id) - .await - .map_err(map_service_error)? - .ok_or(StatusCode::NOT_FOUND)?; - if workspace.is_personal { - return Err(StatusCode::BAD_REQUEST); - } - let members = ctx - .workspace_service() - .list_members(id) - .await - .map_err(map_service_error)?; - if members.iter().any(|member| member.is_default) { - return Err(StatusCode::CONFLICT); - } - ctx.workspace_service() - .delete_workspace(id) - .await - .map_err(map_service_error)?; - Ok(StatusCode::NO_CONTENT) -} - -#[utoipa::path( - patch, - path = "/api/workspaces/{id}/members/{user_id}", - tag = "Workspaces", - params( - ("id" = Uuid, Path, description = "Workspace ID"), - ("user_id" = Uuid, Path, description = "Target user ID"), - ), - request_body = UpdateMemberRoleRequest, - responses((status = 200, body = WorkspaceMemberResponse)) -)] -pub async fn update_member_role( - State(ctx): State, - bearer: Bearer, - Path((workspace_id, member_id)): Path<(Uuid, Uuid)>, - Json(body): Json, -) -> Result, StatusCode> { - if body.role_kind != "system" && body.role_kind != "custom" { - return Err(StatusCode::BAD_REQUEST); - } - if body.role_kind == "system" { - match body.system_role.as_deref() { - Some("owner" | "admin" | "editor" | "viewer") => {} - _ => return Err(StatusCode::BAD_REQUEST), - } - } - if body.role_kind == "custom" && body.custom_role_id.is_none() { - return Err(StatusCode::BAD_REQUEST); - } - - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_permission(&ctx, workspace_id, user_id, PERM_MEMBER_UPDATE_ROLE).await?; - - ctx.workspace_service() - .update_member_role( - workspace_id, - member_id, - user_id, - &body.role_kind, - body.system_role.as_deref(), - body.custom_role_id, - ) - .await - .map_err(map_service_error)?; - - let updated = ctx - .workspace_service() - .list_members(workspace_id) - .await - .map_err(map_service_error)? - .into_iter() - .find(|m| m.user_id == member_id) - .ok_or(StatusCode::NOT_FOUND)?; - - Ok(Json(member_response_from(updated))) -} - -#[utoipa::path( - delete, - path = "/api/workspaces/{id}/members/{user_id}", - tag = "Workspaces", - params( - ("id" = Uuid, Path, description = "Workspace ID"), - ("user_id" = Uuid, Path, description = "Target user ID"), - ), - responses((status = 204)) -)] -pub async fn remove_member( - State(ctx): State, - bearer: Bearer, - Path((workspace_id, member_id)): Path<(Uuid, Uuid)>, -) -> Result { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - require_permission(&ctx, workspace_id, user_id, PERM_MEMBER_REMOVE).await?; - ctx.workspace_service() - .remove_member(workspace_id, member_id, Some(user_id)) - .await - .map_err(map_service_error)?; - Ok(StatusCode::NO_CONTENT) -} - -#[utoipa::path( - post, - path = "/api/workspaces/{id}/leave", - tag = "Workspaces", - params(("id" = Uuid, Path, description = "Workspace ID")), - responses((status = 204)) -)] -pub async fn leave_workspace( - State(ctx): State, - bearer: Bearer, - Path(workspace_id): Path, -) -> Result { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - ctx.workspace_service() - .leave_workspace(workspace_id, user_id) - .await - .map_err(map_service_error)?; - Ok(StatusCode::NO_CONTENT) -} - -#[utoipa::path( - post, - path = "/api/workspaces/{id}/switch", - tag = "Workspaces", - params(("id" = Uuid, Path, description = "Workspace ID")), - responses((status = 200, body = SwitchWorkspaceResponse)) -)] -pub async fn switch_workspace( - State(ctx): State, - bearer: Bearer, - headers: HeaderMap, - Path(id): Path, -) -> Result<(HeaderMap, Json), StatusCode> { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - ctx.workspace_service() - .set_default_workspace(user_id, id) - .await - .map_err(map_service_error)?; - let client_ip = extract_client_ip(&headers); - let user_agent = extract_user_agent(&headers); - let session_service = ctx.session_service(); - let mut issued = None; - if let Some(refresh_token) = extract_refresh_token(&headers) { - match session_service - .refresh_session( - &refresh_token, - Some(id), - SessionMetadata { - user_agent, - ip_address: client_ip.as_deref(), - }, - ) - .await - { - Ok(bundle) => issued = Some(bundle), - Err(ServiceError::Unauthorized | ServiceError::TokenExpired) => { - issued = None; - } - Err(err) => return Err(auth::map_auth_error(err)), - } - } - let issued = match issued { - Some(bundle) => bundle, - None => session_service - .issue_new_session( - user_id, - id, - false, - SessionMetadata { - user_agent, - ip_address: client_ip.as_deref(), - }, - ) - .await - .map_err(auth::map_auth_error)?, - }; - let mut response_headers = HeaderMap::new(); - apply_session_cookies(&ctx, &mut response_headers, &issued); - Ok(( - response_headers, - Json(SwitchWorkspaceResponse { - access_token: issued.access.token, - }), - )) -} - -#[utoipa::path( - get, - path = "/api/workspaces/{id}/permissions", - tag = "Workspaces", - params(("id" = Uuid, Path, description = "Workspace ID")), - responses((status = 200, body = WorkspacePermissionsResponse)) -)] -pub async fn get_workspace_permissions( - State(ctx): State, - bearer: Bearer, - Path(id): Path, -) -> Result, StatusCode> { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let set = ctx - .workspace_service() - .resolve_permission_set(id, user_id) - .await - .map_err(map_service_error)? - .ok_or(StatusCode::FORBIDDEN)?; - Ok(Json(WorkspacePermissionsResponse { - workspace_id: id, - permissions: set.to_vec(), - })) -} - -#[utoipa::path( - post, - path = "/api/workspace-invitations/{token}/accept", - tag = "Workspaces", - params(("token" = String, Path, description = "Invitation token")), - responses((status = 204)) -)] -pub async fn accept_invitation( - State(ctx): State, - bearer: Bearer, - Path(token): Path, -) -> Result { - let sub = auth::validate_bearer(&ctx, bearer).await?; - let user_id = Uuid::parse_str(&sub).map_err(|_| StatusCode::UNAUTHORIZED)?; - let user = ctx - .account_service() - .get_me(user_id) - .await - .map_err(|err| match err { - ServiceError::Unauthorized | ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::Forbidden => StatusCode::FORBIDDEN, - ServiceError::NotFound => StatusCode::UNAUTHORIZED, - _ => StatusCode::INTERNAL_SERVER_ERROR, - })? - .ok_or(StatusCode::UNAUTHORIZED)?; - - ctx.workspace_service() - .accept_invitation(&token, user_id, &user.email) - .await - .map_err(map_service_error)?; - - Ok(StatusCode::NO_CONTENT) -} diff --git a/api/src/presentation/ws/mod.rs b/api/src/presentation/ws/mod.rs deleted file mode 100644 index 08241007..00000000 --- a/api/src/presentation/ws/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod ws; -pub use ws::*; diff --git a/app/package.json b/app/package.json index 6cb1ad30..65c60166 100644 --- a/app/package.json +++ b/app/package.json @@ -10,8 +10,8 @@ "serve": "vite preview", "test": "vitest run", "lint": "eslint --ext .ts,.tsx src", - "gen:openapi": "bash -lc 'cd ../api && mkdir -p openapi && cargo run --quiet --bin export-openapi > openapi/openapi.json'", - "gen:client": "openapi-ts -i ../api/openapi/openapi.json -o src/shared/api/client -c legacy/fetch", + "gen:openapi": "bash -lc 'cd ../api && cargo run --quiet --bin refmd -- openapi export > openapi.json'", + "gen:client": "openapi-ts -i ../api/openapi.json -o src/shared/api/client -c legacy/fetch", "gen:api": "npm run gen:openapi && npm run gen:client" }, "dependencies": { diff --git a/app/src/features/edit-document/hooks/useCollaborativeDocument.ts b/app/src/features/edit-document/hooks/useCollaborativeDocument.ts index 0f113352..3279dece 100644 --- a/app/src/features/edit-document/hooks/useCollaborativeDocument.ts +++ b/app/src/features/edit-document/hooks/useCollaborativeDocument.ts @@ -56,10 +56,16 @@ export function useCollaborativeDocument(id: string, shareToken?: string) { }, [id, shareToken]) React.useEffect(() => { + const token = resolveShareToken(shareToken) + if (token) { + setIsReadOnly(shareReadOnly || archived) + return + } + if (authLoading) return const hasEditPermission = permissions.includes('doc:edit') - setIsReadOnly(shareReadOnly || archived || !hasEditPermission) - }, [authLoading, shareReadOnly, archived, permissions]) + setIsReadOnly(archived || !hasEditPermission) + }, [authLoading, shareReadOnly, archived, permissions, shareToken]) const loadMeta = React.useCallback(async () => { try { diff --git a/app/src/shared/api/client/sdk.gen.ts b/app/src/shared/api/client/sdk.gen.ts index 6b9970be..c69b54b9 100644 --- a/app/src/shared/api/client/sdk.gen.ts +++ b/app/src/shared/api/client/sdk.gen.ts @@ -3,7 +3,7 @@ import type { CancelablePromise } from './core/CancelablePromise'; import { OpenAPI } from './core/OpenAPI'; import { request as __request } from './core/request'; -import type { LoginData, LoginResponse2, LogoutResponse, MeResponse, DeleteAccountResponse, OauthLoginData, OauthLoginResponse, OauthStateData, OauthStateResponse, ListOauthProvidersResponse, RefreshSessionResponse, RegisterData, RegisterResponse, ListSessionsResponse, RevokeSessionData, RevokeSessionResponse, ListDocumentsData, ListDocumentsResponse, CreateDocumentData, CreateDocumentResponse, SearchDocumentsData, SearchDocumentsResponse, GetDocumentData, GetDocumentResponse, DeleteDocumentData, DeleteDocumentResponse, UpdateDocumentData, UpdateDocumentResponse, ArchiveDocumentData, ArchiveDocumentResponse, GetBacklinksData, GetBacklinksResponse, GetDocumentContentData, GetDocumentContentResponse, UpdateDocumentContentData, UpdateDocumentContentResponse, PatchDocumentContentData, PatchDocumentContentResponse, DownloadDocumentData, DownloadDocumentResponse, DuplicateDocumentData, DuplicateDocumentResponse, GetOutgoingLinksData, GetOutgoingLinksResponse, ListDocumentSnapshotsData, ListDocumentSnapshotsResponse, GetDocumentSnapshotDiffData, GetDocumentSnapshotDiffResponse, DownloadDocumentSnapshotData, DownloadDocumentSnapshotResponse, RestoreDocumentSnapshotData, RestoreDocumentSnapshotResponse, UnarchiveDocumentData, UnarchiveDocumentResponse, UploadFileData, UploadFileResponse2, GetFileByNameData, GetFileByNameResponse, GetFileData, GetFileResponse, GetChangesResponse, GetConfigResponse, CreateOrUpdateConfigData, CreateOrUpdateConfigResponse, DeleteConfigResponse, DeinitRepositoryResponse, GetCommitDiffData, GetCommitDiffResponse, GetWorkingDiffResponse, CheckPathIgnoredData, CheckPathIgnoredResponse, GetGitignorePatternsResponse, AddGitignorePatternsData, AddGitignorePatternsResponse, GetHistoryResponse, IgnoreDocumentData, IgnoreDocumentResponse, IgnoreFolderData, IgnoreFolderResponse, ImportRepositoryData, ImportRepositoryResponse, InitRepositoryResponse, PullRepositoryData, PullRepositoryResponse, GetPullSessionData, GetPullSessionResponse, FinalizePullSessionData, FinalizePullSessionResponse, ResolvePullSessionData, ResolvePullSessionResponse, StartPullSessionResponse, GetStatusResponse, SyncNowData, SyncNowResponse, HealthResponse, RenderMarkdownData, RenderMarkdownResponse, RenderMarkdownManyData, RenderMarkdownManyResponse, ListApiTokensResponse, CreateApiTokenData, CreateApiTokenResponse, RevokeApiTokenData, RevokeApiTokenResponse, PluginsInstallFromUrlData, PluginsInstallFromUrlResponse, PluginsGetManifestResponse, PluginsUninstallData, PluginsUninstallResponse, SseUpdatesResponse, GetUserShortcutsResponse, UpdateUserShortcutsData, UpdateUserShortcutsResponse, PluginsGetKvData, PluginsGetKvResponse, PluginsPutKvData, PluginsPutKvResponse, ListRecordsData, ListRecordsResponse, PluginsCreateRecordData, PluginsCreateRecordResponse, PluginsExecActionData, PluginsExecActionResponse, PluginsDeleteRecordData, PluginsDeleteRecordResponse, PluginsUpdateRecordData, PluginsUpdateRecordResponse, GetPublishStatusData, GetPublishStatusResponse, PublishDocumentData, PublishDocumentResponse, UnpublishDocumentData, UnpublishDocumentResponse, ListWorkspacePublicDocumentsData, ListWorkspacePublicDocumentsResponse, GetPublicByWorkspaceAndIdData, GetPublicByWorkspaceAndIdResponse, GetPublicContentByWorkspaceAndIdData, GetPublicContentByWorkspaceAndIdResponse, CreateShareData, CreateShareResponse2, ListActiveSharesResponse, ListApplicableSharesData, ListApplicableSharesResponse, BrowseShareData, BrowseShareResponse, ListDocumentSharesData, ListDocumentSharesResponse, MaterializeFolderShareData, MaterializeFolderShareResponse, ListShareMountsResponse, CreateShareMountData, CreateShareMountResponse, DeleteShareMountData, DeleteShareMountResponse, ValidateShareTokenData, ValidateShareTokenResponse, DeleteShareData, DeleteShareResponse, ListTagsData, ListTagsResponse, AcceptInvitationData, AcceptInvitationResponse, ListWorkspacesResponse, CreateWorkspaceData, CreateWorkspaceResponse, GetWorkspaceDetailData, GetWorkspaceDetailResponse, UpdateWorkspaceData, UpdateWorkspaceResponse, DeleteWorkspaceData, DeleteWorkspaceResponse, DownloadWorkspaceArchiveData, DownloadWorkspaceArchiveResponse, ListInvitationsData, ListInvitationsResponse, CreateInvitationData, CreateInvitationResponse, RevokeInvitationData, RevokeInvitationResponse, ListMembersData, ListMembersResponse, RemoveMemberData, RemoveMemberResponse, UpdateMemberRoleData, UpdateMemberRoleResponse, GetWorkspacePermissionsData, GetWorkspacePermissionsResponse, ListRolesData, ListRolesResponse, CreateRoleData, CreateRoleResponse, DeleteRoleData, DeleteRoleResponse, UpdateRoleData, UpdateRoleResponse, SwitchWorkspaceData, SwitchWorkspaceResponse2, AxumWsEntryData } from './types.gen'; +import type { LoginData, LoginResponse2, LogoutResponse, MeResponse, DeleteAccountResponse, OauthLoginData, OauthLoginResponse, OauthStateData, OauthStateResponse, ListOauthProvidersResponse, RefreshSessionResponse, RegisterData, RegisterResponse, ListSessionsResponse, RevokeSessionData, RevokeSessionResponse, ListDocumentsData, ListDocumentsResponse, CreateDocumentData, CreateDocumentResponse, SearchDocumentsData, SearchDocumentsResponse, GetDocumentData, GetDocumentResponse, DeleteDocumentData, DeleteDocumentResponse, UpdateDocumentData, UpdateDocumentResponse, ArchiveDocumentData, ArchiveDocumentResponse, GetBacklinksData, GetBacklinksResponse, GetDocumentContentData, GetDocumentContentResponse, UpdateDocumentContentData, UpdateDocumentContentResponse, PatchDocumentContentData, PatchDocumentContentResponse, DownloadDocumentData, DownloadDocumentResponse, DuplicateDocumentData, DuplicateDocumentResponse, GetOutgoingLinksData, GetOutgoingLinksResponse, ListDocumentSnapshotsData, ListDocumentSnapshotsResponse, GetDocumentSnapshotDiffData, GetDocumentSnapshotDiffResponse, DownloadDocumentSnapshotData, DownloadDocumentSnapshotResponse, RestoreDocumentSnapshotData, RestoreDocumentSnapshotResponse, UnarchiveDocumentData, UnarchiveDocumentResponse, UploadFileData, UploadFileResponse2, GetFileByNameData, GetFileByNameResponse, GetFileData, GetFileResponse, GetChangesResponse, GetConfigResponse, CreateOrUpdateConfigData, CreateOrUpdateConfigResponse, DeleteConfigResponse, DeinitRepositoryResponse, GetCommitDiffData, GetCommitDiffResponse, GetWorkingDiffResponse, CheckPathIgnoredData, CheckPathIgnoredResponse, GetGitignorePatternsResponse, AddGitignorePatternsData, AddGitignorePatternsResponse, GetHistoryResponse, IgnoreDocumentData, IgnoreDocumentResponse, IgnoreFolderData, IgnoreFolderResponse, ImportRepositoryData, ImportRepositoryResponse, InitRepositoryResponse, PullRepositoryData, PullRepositoryResponse, GetPullSessionData, GetPullSessionResponse, FinalizePullSessionData, FinalizePullSessionResponse, ResolvePullSessionData, ResolvePullSessionResponse, StartPullSessionResponse, GetStatusResponse, SyncNowData, SyncNowResponse, HealthResponse, RenderMarkdownData, RenderMarkdownResponse, RenderMarkdownManyData, RenderMarkdownManyResponse, ListApiTokensResponse, CreateApiTokenData, CreateApiTokenResponse, RevokeApiTokenData, RevokeApiTokenResponse, PluginsInstallFromUrlData, PluginsInstallFromUrlResponse, PluginsGetManifestResponse, PluginsUninstallData, PluginsUninstallResponse, SseUpdatesResponse, GetUserShortcutsResponse, UpdateUserShortcutsData, UpdateUserShortcutsResponse, PluginsGetAssetData, PluginsGetAssetResponse, PluginsGetKvData, PluginsGetKvResponse, PluginsPutKvData, PluginsPutKvResponse, ListRecordsData, ListRecordsResponse, PluginsCreateRecordData, PluginsCreateRecordResponse, PluginsExecActionData, PluginsExecActionResponse, PluginsDeleteRecordData, PluginsDeleteRecordResponse, PluginsUpdateRecordData, PluginsUpdateRecordResponse, GetPublishStatusData, GetPublishStatusResponse, PublishDocumentData, PublishDocumentResponse, UnpublishDocumentData, UnpublishDocumentResponse, ListWorkspacePublicDocumentsData, ListWorkspacePublicDocumentsResponse, GetPublicByWorkspaceAndIdData, GetPublicByWorkspaceAndIdResponse, GetPublicContentByWorkspaceAndIdData, GetPublicContentByWorkspaceAndIdResponse, CreateShareData, CreateShareResponse2, ListActiveSharesResponse, ListApplicableSharesData, ListApplicableSharesResponse, BrowseShareData, BrowseShareResponse, ListDocumentSharesData, ListDocumentSharesResponse, MaterializeFolderShareData, MaterializeFolderShareResponse, ListShareMountsResponse, CreateShareMountData, CreateShareMountResponse, DeleteShareMountData, DeleteShareMountResponse, ValidateShareTokenData, ValidateShareTokenResponse, DeleteShareData, DeleteShareResponse, EnqueueIngestEventsData, EnqueueIngestEventsResponse, ListTagsData, ListTagsResponse, AcceptInvitationData, AcceptInvitationResponse, ListWorkspacesResponse, CreateWorkspaceData, CreateWorkspaceResponse, GetWorkspaceDetailData, GetWorkspaceDetailResponse, UpdateWorkspaceData, UpdateWorkspaceResponse, DeleteWorkspaceData, DeleteWorkspaceResponse, DownloadWorkspaceArchiveData, DownloadWorkspaceArchiveResponse, ListInvitationsData, ListInvitationsResponse, CreateInvitationData, CreateInvitationResponse, RevokeInvitationData, RevokeInvitationResponse, LeaveWorkspaceData, LeaveWorkspaceResponse, ListMembersData, ListMembersResponse, RemoveMemberData, RemoveMemberResponse, UpdateMemberRoleData, UpdateMemberRoleResponse, GetWorkspacePermissionsData, GetWorkspacePermissionsResponse, ListRolesData, ListRolesResponse, CreateRoleData, CreateRoleResponse, DeleteRoleData, DeleteRoleResponse, UpdateRoleData, UpdateRoleResponse, SwitchWorkspaceData, SwitchWorkspaceResponse2, AxumWsEntryData } from './types.gen'; /** * @param data The data for the request. @@ -536,10 +536,6 @@ export const unarchiveDocument = (data: UnarchiveDocumentData): CancelablePromis }; /** - * POST /api/files (multipart/form-data) - * Fields: - * - file: binary file (required) - * - document_id: uuid (required by current schema) * @param data The data for the request. * @param data.formData * @returns UploadFileResponse File uploaded @@ -555,7 +551,6 @@ export const uploadFile = (data: UploadFileData): CancelablePromise bytes * @param data The data for the request. * @param data.filename File name * @param data.documentId Document ID @@ -576,7 +571,6 @@ export const getFileByName = (data: GetFileByNameData): CancelablePromise bytes (fallback; primary is /uploads/{filename}) * @param data The data for the request. * @param data.id File ID * @returns binary OK @@ -1072,6 +1066,22 @@ export const updateUserShortcuts = (data: UpdateUserShortcutsData): CancelablePr }); }; +/** + * @param data The data for the request. + * @param data.token Share token (optional) + * @returns unknown Plugin asset + * @throws ApiError + */ +export const pluginsGetAsset = (data: PluginsGetAssetData = {}): CancelablePromise => { + return __request(OpenAPI, { + method: 'GET', + url: '/api/plugin-assets', + query: { + token: data.token + } + }); +}; + /** * @param data The data for the request. * @param data.plugin Plugin ID @@ -1488,6 +1498,24 @@ export const deleteShare = (data: DeleteShareData): CancelablePromise => { + return __request(OpenAPI, { + method: 'POST', + url: '/api/storage/ingest', + body: data.requestBody, + mediaType: 'application/json', + errors: { + 400: 'Invalid request' + } + }); +}; + /** * @param data The data for the request. * @param data.q Filter contains @@ -1674,6 +1702,22 @@ export const revokeInvitation = (data: RevokeInvitationData): CancelablePromise< }); }; +/** + * @param data The data for the request. + * @param data.id Workspace ID + * @returns void + * @throws ApiError + */ +export const leaveWorkspace = (data: LeaveWorkspaceData): CancelablePromise => { + return __request(OpenAPI, { + method: 'POST', + url: '/api/workspaces/{id}/leave', + path: { + id: data.id + } + }); +}; + /** * @param data The data for the request. * @param data.id Workspace ID diff --git a/app/src/shared/api/client/types.gen.ts b/app/src/shared/api/client/types.gen.ts index 72317753..e8da0e9f 100644 --- a/app/src/shared/api/client/types.gen.ts +++ b/app/src/shared/api/client/types.gen.ts @@ -4,9 +4,6 @@ export type ActiveShareItem = { created_at: string; document_id: string; document_title: string; - /** - * 'document' or 'folder' - */ document_type: string; expires_at?: (string) | null; id: string; @@ -42,9 +39,6 @@ export type ApiTokenItem = { export type ApplicableShareItem = { excluded: boolean; permission: string; - /** - * 'document' or 'folder' - */ scope: string; token: string; }; @@ -147,6 +141,9 @@ export type Document = { created_by_plugin?: (string) | null; desired_path: string; id: string; + /** + * Legacy alias for `workspace_id` kept for backward compatibility with older clients. + */ owner_id: string; parent_id?: (string) | null; path?: (string) | null; @@ -327,6 +324,20 @@ export type HealthResp = { status: string; }; +export type IngestBatchRequest = { + events: Array; +}; + +export type IngestEventRequest = { + backend?: (string) | null; + content_hash?: (string) | null; + kind: IngestKindParam; + payload?: unknown; + repo_path: string; +}; + +export type IngestKindParam = 'upsert' | 'delete'; + export type InstallFromUrlBody = { token?: (string) | null; url: string; @@ -517,14 +528,8 @@ export type ShareDocumentResponse = { export type ShareItem = { expires_at?: (string) | null; id: string; - /** - * If present, this document share was materialized from a folder share - */ parent_share_id?: (string) | null; permission: string; - /** - * document | folder - */ scope: string; token: string; url: string; @@ -655,13 +660,7 @@ export type UpdateWorkspaceRoleRequest = { }; export type UploadFileMultipart = { - /** - * Target document ID - */ document_id: string; - /** - * File to upload - */ file: Blob | File; }; @@ -1244,6 +1243,15 @@ export type UpdateUserShortcutsData = { export type UpdateUserShortcutsResponse = (UserShortcutResponse); +export type PluginsGetAssetData = { + /** + * Share token (optional) + */ + token?: (string) | null; +}; + +export type PluginsGetAssetResponse = (unknown); + export type PluginsGetKvData = { /** * Document ID @@ -1504,6 +1512,12 @@ export type DeleteShareData = { export type DeleteShareResponse = (void); +export type EnqueueIngestEventsData = { + requestBody: IngestBatchRequest; +}; + +export type EnqueueIngestEventsResponse = (unknown); + export type ListTagsData = { /** * Filter contains @@ -1603,6 +1617,15 @@ export type RevokeInvitationData = { export type RevokeInvitationResponse = (WorkspaceInvitationResponse); +export type LeaveWorkspaceData = { + /** + * Workspace ID + */ + id: string; +}; + +export type LeaveWorkspaceResponse = (void); + export type ListMembersData = { /** * Workspace ID