-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
49 lines (48 loc) · 1.62 KB
/
docker-compose.yml
File metadata and controls
49 lines (48 loc) · 1.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
services:
frontend:
build:
context: ./frontend
args:
# 브라우저에서 backend에 접근하는 URL (빌드 시 주입)
NEXT_PUBLIC_GALLERY_API_URL: http://localhost:8800
ports:
- "3100:3000"
env_file:
- .env
environment:
# Members 탭: GitHub Org People 동적 로드용 (서버 캐시 30분)
GITHUB_ORG: "${GITHUB_ORG:-PlateerLab}"
GITHUB_TOKEN: "${GITHUB_TOKEN:-}"
MEMBERS_REVALIDATE_SECONDS: "${MEMBERS_REVALIDATE_SECONDS:-1800}"
MEMBERS_REVALIDATE_TOKEN: "${MEMBERS_REVALIDATE_TOKEN:-}"
depends_on:
backend:
condition: service_healthy
restart: unless-stopped
backend:
build:
context: ./backend
ports:
- "8800:8000"
env_file:
- .env
environment:
# Synaptic Memory graph persistence (per-container unless volume mounted)
SYNAPTIC_DB: /tmp/synaptic.db
# Embeddings (Qwen vLLM by default, no auth).
SYNAPTIC_EMBED_URL: "${SYNAPTIC_EMBED_URL:-}"
SYNAPTIC_EMBED_MODEL: "${SYNAPTIC_EMBED_MODEL:-default}"
# LLM chat completions (OpenAI-compatible upstream).
LLM_BASE_URL: "${LLM_BASE_URL:-https://api.openai.com/v1}"
LLM_MODEL: "${LLM_MODEL:-gpt-4o-mini}"
LLM_API_KEY: "${LLM_API_KEY:-}"
# Legacy fallback for the openai-proxy path.
OPENAI_API_KEY: "${OPENAI_API_KEY:-}"
extra_hosts:
- "host.docker.internal:host-gateway"
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"]
interval: 10s
timeout: 5s
retries: 3
restart: unless-stopped