forked from DreamLab-AI/origin-logseq-AR
-
Notifications
You must be signed in to change notification settings - Fork 19
Expand file tree
/
Copy pathdocker-compose.unified.yml
More file actions
337 lines (327 loc) · 13 KB
/
docker-compose.unified.yml
File metadata and controls
337 lines (327 loc) · 13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
# Extension fields for DRY configuration
x-common-environment: &common-environment
NVIDIA_VISIBLE_DEVICES: ${NVIDIA_VISIBLE_DEVICES:-0}
NVIDIA_DRIVER_CAPABILITIES: compute,utility
CUDA_ARCH: ${CUDA_ARCH:-75}
SYSTEM_NETWORK_PORT: ${SYSTEM_NETWORK_PORT:-4000}
# MCP and Claude Flow configuration
CLAUDE_FLOW_HOST: ${CLAUDE_FLOW_HOST:-agentic-workstation}
MCP_HOST: ${MCP_HOST:-agentic-workstation}
MCP_TCP_PORT: ${MCP_TCP_PORT:-9500}
MCP_TRANSPORT: ${MCP_TRANSPORT:-tcp}
MCP_RECONNECT_ATTEMPTS: ${MCP_RECONNECT_ATTEMPTS:-3}
MCP_RECONNECT_DELAY: ${MCP_RECONNECT_DELAY:-1000}
MCP_CONNECTION_TIMEOUT: ${MCP_CONNECTION_TIMEOUT:-30000}
ORCHESTRATOR_WS_URL: ${ORCHESTRATOR_WS_URL:-ws://mcp-orchestrator:9001/ws}
MCP_RELAY_FALLBACK_TO_MOCK: ${MCP_RELAY_FALLBACK_TO_MOCK:-true}
BOTS_ORCHESTRATOR_URL: ${BOTS_ORCHESTRATOR_URL:-ws://agentic-workstation:3002}
MANAGEMENT_API_HOST: ${MANAGEMENT_API_HOST:-agentic-workstation}
MANAGEMENT_API_PORT: ${MANAGEMENT_API_PORT:-9090}
# Solid Pod: served in-process by the external `solid-pod-rs` crate
# (ADR-053/056). POD_BASE_URL / POD_DATA_ROOT replace the legacy JSS_* vars.
x-common-healthcheck: &common-healthcheck
test: ["CMD", "curl", "-f", "http://localhost:4000/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
x-common-logging: &common-logging
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
x-gpu-resources: &gpu-resources
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu, compute, utility]
services:
# Neo4j Graph Database - Required for all profiles
neo4j:
container_name: visionflow-neo4j
image: neo4j:5.13.0
environment:
- NEO4J_AUTH=${NEO4J_USER}/${NEO4J_PASSWORD}
- NEO4J_server_memory_pagecache_size=512M
- NEO4J_server_memory_heap_max__size=1G
- NEO4J_dbms_security_procedures_unrestricted=apoc.*
- NEO4J_dbms_security_procedures_allowlist=apoc.*
ports:
- "7474:7474" # HTTP
- "7687:7687" # Bolt
volumes:
- neo4j-data:/data
- neo4j-logs:/logs
- neo4j-conf:/conf
- neo4j-plugins:/plugins
networks:
- docker_ragflow
healthcheck:
test: ["CMD-SHELL", "wget --spider --quiet http://localhost:7474 || exit 1"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: unless-stopped
logging:
<<: *common-logging
# NOTE: The JSS (JavaScriptSolidServer) Node sidecar was retired
# 2026-04-20. Solid Pod functionality is now served in-process by the
# external `solid-pod-rs` crate pinned in Cargo.toml (ADR-053/056).
# No separate service entry is required.
# Unified VisionFlow service with profile-based configuration
visionflow:
container_name: ${CONTAINER_NAME:-visionflow_container}
hostname: ${HOSTNAME:-webxr}
depends_on:
neo4j:
condition: service_healthy
build:
context: .
dockerfile: ${DOCKERFILE:-Dockerfile.unified}
target: development
args:
CUDA_ARCH: ${CUDA_ARCH:-75}
BUILD_TARGET: ${BUILD_TARGET:-development}
env_file:
- .env
environment:
<<: *common-environment
# Development-specific variables (overridden in production profile)
NODE_ENV: ${NODE_ENV:-development}
# ADR-055 H3 fail-closed: auth.rs treats missing APP_ENV as production
# and rejects the legacy X-Nostr-Pubkey path with "Legacy session auth
# not available in production. Use NIP-98." APP_ENV MUST be set to
# "development" explicitly here, otherwise /api/graph/data returns 401
# to browser-extension callers.
APP_ENV: ${APP_ENV:-development}
VITE_DEBUG: ${DEBUG_ENABLED:-true}
VITE_DEV_SERVER_PORT: ${VITE_DEV_SERVER_PORT:-5173}
VITE_API_PORT: ${VITE_API_PORT:-4000}
VITE_HMR_PORT: ${VITE_HMR_PORT:-24678}
# Default: quiet. Set RUST_LOG=debug (or module-specific) in .env when debugging.
RUST_LOG: ${RUST_LOG:-warn,webxr=info,webxr::adapters::neo4j_ontology_repository=warn,webxr::actors::graph_state_actor=warn,actix_web=warn}
RUST_LOG_REDIRECT: ${RUST_LOG_REDIRECT:-true}
DOCKER_ENV: ${DOCKER_ENV:-true}
# Dev-only: bypass Nostr auth for settings writes (never set in production)
SETTINGS_AUTH_BYPASS: "${SETTINGS_AUTH_BYPASS:-false}"
# Dev-only pubkey allowlist — add your NIP-07 browser-extension pubkey
# (hex, 64 chars) to POWER_USER_PUBKEYS via .env to unlock power-user
# affordances. APPROVED_PUBKEYS elevates to general "authenticated
# user". Feature gates (Perplexity/OpenAI/RAGFlow/Settings-sync) are
# separate allowlists. See src/config/feature_access.rs.
POWER_USER_PUBKEYS: ${POWER_USER_PUBKEYS:-}
APPROVED_PUBKEYS: ${APPROVED_PUBKEYS:-}
SETTINGS_SYNC_ENABLED_PUBKEYS: ${SETTINGS_SYNC_ENABLED_PUBKEYS:-}
OPENAI_ENABLED_PUBKEYS: ${OPENAI_ENABLED_PUBKEYS:-}
PERPLEXITY_ENABLED_PUBKEYS: ${PERPLEXITY_ENABLED_PUBKEYS:-}
RAGFLOW_ENABLED_PUBKEYS: ${RAGFLOW_ENABLED_PUBKEYS:-}
# Nostr bead provenance bridge — scoped to visionflow only (privkey must not leak to other containers)
# Bridge pubkey: eb47d8a792a4709329270a9f85f012326c61867a913791dc5f89dc7a0a760754
VISIONCLAW_NOSTR_PRIVKEY: ${VISIONCLAW_NOSTR_PRIVKEY:-}
# ServerIdentity (ADR-040) — server_identity.rs expects SERVER_NOSTR_PRIVKEY.
# Alias the existing bridge key so the server has a stable identity to sign
# kind 30023/30100/30200/30300 events with. Fallback to auto-generate in dev
# per `server_identity.rs:57-91` — APP_ENV must not be "production" for that.
SERVER_NOSTR_PRIVKEY: ${SERVER_NOSTR_PRIVKEY:-${VISIONCLAW_NOSTR_PRIVKEY:-}}
SERVER_NOSTR_AUTO_GENERATE: ${SERVER_NOSTR_AUTO_GENERATE:-true}
FORUM_RELAY_URL: ${FORUM_RELAY_URL:-}
# Neo4j Configuration
NEO4J_URI: ${NEO4J_URI:-bolt://neo4j:7687}
NEO4J_USER: ${NEO4J_USER:-neo4j}
NEO4J_PASSWORD: ${NEO4J_PASSWORD}
NEO4J_DATABASE: ${NEO4J_DATABASE:-neo4j}
volumes:
# Data volumes - ALWAYS mounted (both dev and prod)
- visionflow-data:/app/data
- visionflow-logs:/app/logs
# SOURCE CODE MOUNTS - Live sync from host for development hot-reload
# These override the COPY'd source in the image with current host files
# HOST_PROJECT_ROOT resolves DinD path translation (defaults to "." for native Docker)
- ${HOST_PROJECT_ROOT:-.}/src:/app/src:ro
- ${HOST_PROJECT_ROOT:-.}/Cargo.toml:/app/Cargo.toml:ro
- ${HOST_PROJECT_ROOT:-.}/Cargo.lock:/app/Cargo.lock:ro
- ${HOST_PROJECT_ROOT:-.}/build.rs:/app/build.rs:ro
- ${HOST_PROJECT_ROOT:-.}/whelk-rs:/app/whelk-rs:ro
- ${HOST_PROJECT_ROOT:-.}/client/src:/app/client/src:ro
- ${HOST_PROJECT_ROOT:-.}/client/public:/app/client/public:ro
- ${HOST_PROJECT_ROOT:-.}/client/index.html:/app/client/index.html:ro
- ${HOST_PROJECT_ROOT:-.}/client/vite.config.ts:/app/client/vite.config.ts:ro
- ${HOST_PROJECT_ROOT:-.}/client/tsconfig.json:/app/client/tsconfig.json:ro
# Cache volumes for build optimization
- npm-cache:/root/.npm
- cargo-cache:/root/.cargo/registry
- cargo-git-cache:/root/.cargo/git
- cargo-target-cache:/app/target
# Docker socket for controlled access (development only)
- /var/run/docker.sock:/var/run/docker.sock:ro
# Mount host data directory for graph content. Both markdown and metadata
# are rw so the GitHub sync can populate /workspace/ext/data/markdown
# with fetched .md files from GITHUB_OWNER/GITHUB_REPO. The `ro` flag
# on markdown was the cause of the "Failed to verify directory
# permissions: Read-only file system" error observed on startup.
- ${HOST_PROJECT_ROOT:-.}/data/markdown:/workspace/ext/data/markdown:rw
- ${HOST_PROJECT_ROOT:-.}/data/metadata:/workspace/ext/data/metadata:rw
ports:
# Development: Nginx entry point + API port
- "${DEV_NGINX_PORT:-3001}:3001"
- "${API_PORT:-4000}:4000"
networks:
docker_ragflow:
aliases:
- ${NETWORK_ALIAS:-webxr}
deploy:
<<: *gpu-resources
runtime: nvidia
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:4000/api/health"]
interval: 30s
timeout: 10s
retries: 5
start_period: 300s
logging:
<<: *common-logging
profiles:
- development
- dev
# Production profile configuration
visionflow-production:
container_name: ${CONTAINER_NAME:-visionflow_prod_container}
hostname: ${HOSTNAME:-webxr-prod}
depends_on:
neo4j:
condition: service_healthy
build:
context: .
dockerfile: Dockerfile.production
args:
CUDA_ARCH: ${CUDA_ARCH:-75}
BUILD_TARGET: production
REBUILD_PTX: ${REBUILD_PTX:-false}
env_file:
- .env
environment:
<<: *common-environment
# Production-specific overrides
NODE_ENV: production
RUST_LOG: ${RUST_LOG:-info}
GIT_HASH: ${GIT_HASH:-production}
VITE_DEBUG: "false"
# Neo4j Configuration
NEO4J_URI: ${NEO4J_URI:-bolt://neo4j:7687}
NEO4J_USER: ${NEO4J_USER:-neo4j}
NEO4J_PASSWORD: ${NEO4J_PASSWORD}
NEO4J_DATABASE: ${NEO4J_DATABASE:-neo4j}
# CORS — must include production domain so nginx can pass real Origin
CORS_ALLOWED_ORIGINS: ${CORS_ALLOWED_ORIGINS:-https://www.visionflow.info,https://visionflow.info,http://localhost:3000,http://localhost:3001}
# ComfyUI image generation
COMFYUI_URL: ${COMFYUI_URL:-http://comfyui:8188}
COMFYUI_SALAD_URL: ${COMFYUI_SALAD_URL:-http://comfyui:3000}
# Solid pod storage for agent pipeline
SOLID_PROXY_SECRET_KEY: ${SOLID_PROXY_SECRET_KEY}
VISIONFLOW_AGENT_KEY: ${VISIONFLOW_AGENT_KEY:-changeme-agent-key}
# Nostr bead provenance bridge — scoped here only, never in common-environment
VISIONCLAW_NOSTR_PRIVKEY: ${VISIONCLAW_NOSTR_PRIVKEY:-}
# ServerIdentity (ADR-040) — production REQUIRES an explicit key; no auto-generate here.
SERVER_NOSTR_PRIVKEY: ${SERVER_NOSTR_PRIVKEY:-${VISIONCLAW_NOSTR_PRIVKEY:-}}
FORUM_RELAY_URL: ${FORUM_RELAY_URL:-}
volumes:
# Production: ONLY data volumes (no source mounts, no Docker socket)
- visionflow-data:/app/data
- visionflow-logs:/app/logs
ports:
# Production: Only API port
- "${PROD_API_PORT:-3001}:3001"
networks:
docker_ragflow:
aliases:
- ${NETWORK_ALIAS:-webxr}
deploy:
<<: *gpu-resources
resources:
limits:
memory: 8G
cpus: '4'
reservations:
memory: 2G
cpus: '1'
runtime: nvidia
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
logging:
<<: *common-logging
profiles:
- production
- prod
# Cloudflare tunnel - works with both profiles
cloudflared:
container_name: ${CLOUDFLARED_CONTAINER:-cloudflared-tunnel}
image: cloudflare/cloudflared:latest
command: tunnel --no-autoupdate run
environment:
- TUNNEL_TOKEN=${CLOUDFLARE_TUNNEL_TOKEN}
volumes:
- ./config.yml:/etc/cloudflared/config.yml:ro
depends_on:
visionflow:
condition: service_started
required: false
visionflow-production:
condition: service_started
required: false
networks:
- docker_ragflow
restart: unless-stopped
logging:
<<: *common-logging
profiles:
- development
- dev
- production
- prod
networks:
docker_ragflow:
external: true
name: ${EXTERNAL_NETWORK:-docker_ragflow}
volumes:
# Persistent data volumes
visionflow-data:
name: ${DATA_VOLUME_NAME:-visionflow-data}
driver: local
visionflow-logs:
name: ${LOGS_VOLUME_NAME:-visionflow-logs}
driver: local
# Build cache volumes
npm-cache:
name: ${NPM_CACHE_VOLUME:-visionflow-npm-cache}
driver: local
cargo-cache:
name: ${CARGO_CACHE_VOLUME:-visionflow-cargo-cache}
driver: local
cargo-git-cache:
name: ${CARGO_GIT_CACHE_VOLUME:-visionflow-cargo-git-cache}
driver: local
cargo-target-cache:
name: ${CARGO_TARGET_CACHE_VOLUME:-visionflow-cargo-target-cache}
driver: local
# Neo4j volumes
neo4j-data:
name: ${NEO4J_DATA_VOLUME:-visionflow-neo4j-data}
driver: local
neo4j-logs:
name: ${NEO4J_LOGS_VOLUME:-visionflow-neo4j-logs}
driver: local
neo4j-conf:
name: ${NEO4J_CONF_VOLUME:-visionflow-neo4j-conf}
driver: local
neo4j-plugins:
name: ${NEO4J_PLUGINS_VOLUME:-visionflow-neo4j-plugins}
driver: local