-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
72 lines (67 loc) · 2.28 KB
/
docker-compose.yml
File metadata and controls
72 lines (67 loc) · 2.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
version: '3.8'
services:
# Backend API Server
backend:
build:
context: ./backend
dockerfile: Dockerfile
container_name: consilium-backend
restart: unless-stopped
ports:
- "3801:3801"
environment:
- NODE_ENV=production
- PORT=3801
- FRONTEND_URL=http://localhost:3800
- DATABASE_PATH=/app/data/consilium.db
- LOCAL_MODELS_SEQUENTIAL=true
- DOCKER_RUNNING=true
# Add your API keys here or use .env file
- OPENROUTER_API_KEY=${OPENROUTER_API_KEY}
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- GOOGLE_AI_API_KEY=${GOOGLE_AI_API_KEY:-}
- XAI_API_KEY=${XAI_API_KEY:-}
- MISTRAL_API_KEY=${MISTRAL_API_KEY:-}
- GROQ_API_KEY=${GROQ_API_KEY:-}
# Local model servers
# Default uses host.docker.internal to reach Ollama/LM Studio on host machine
# If running Ollama in Docker on same network, use: http://ollama:11434
- OLLAMA_URL=${OLLAMA_URL:-http://host.docker.internal:11434}
- LMSTUDIO_URL=${LMSTUDIO_URL:-http://host.docker.internal:1234}
- LOCALAI_URL=${LOCALAI_URL:-http://host.docker.internal:8080}
# VRAM Override - Set your GPU VRAM in MB for accurate context size calculation
# Docker can't detect GPU, so set this to your actual VRAM (e.g., 24000 for 24GB)
- VRAM_OVERRIDE_MB=${VRAM_OVERRIDE_MB:-}
# SearXNG Integration (for Research Mode)
# If SearXNG runs in Docker on same network, use: http://searxng:8080
# If SearXNG runs on host, use: http://host.docker.internal:4000
- SEARXNG_URL=${SEARXNG_URL:-http://host.docker.internal:4000}
- SEARCH_RESULTS_LIMIT=${SEARCH_RESULTS_LIMIT:-10}
volumes:
- consilium-data:/app/data
networks:
- consilium-network
# Access host services (Ollama, LM Studio)
extra_hosts:
- "host.docker.internal:host-gateway"
# Frontend Web App
frontend:
build:
context: ./frontend
dockerfile: Dockerfile
container_name: consilium-frontend
restart: unless-stopped
ports:
- "3800:3800"
depends_on:
- backend
networks:
- consilium-network
volumes:
consilium-data:
name: consilium-data
networks:
consilium-network:
name: consilium-network
driver: bridge