-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
144 lines (140 loc) · 3.74 KB
/
docker-compose.yml
File metadata and controls
144 lines (140 loc) · 3.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
services:
ollama:
image: ollama/ollama:0.17.5
container_name: nova-ollama
ports:
- "127.0.0.1:11434:11434"
environment:
OLLAMA_MAX_LOADED_MODELS: "3"
OLLAMA_FLASH_ATTENTION: "1"
OLLAMA_KEEP_ALIVE: "24h"
OLLAMA_NUM_CTX: "32768"
# Required models (pull manually or via entrypoint):
# ollama pull qwen3.5:27b (main model)
# ollama pull qwen3.5:9b (vision model)
# ollama pull qwen3.5:4b (fast model for routing)
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
limits:
memory: 32g
volumes:
- ollama_data:/root/.ollama
# - C:/data/finetune/merged:/finetune-model:ro # Temp mount for model import
healthcheck:
test: ["CMD", "ollama", "list"]
interval: 30s
timeout: 10s
retries: 3
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
restart: unless-stopped
nova:
build: .
image: nova-app:latest
container_name: nova-app
ports:
- "127.0.0.1:8000:8000"
depends_on:
ollama:
condition: service_healthy
searxng:
condition: service_healthy
# env_file passes LLM_PROVIDER, API keys, DISCORD_TOKEN, TELEGRAM_TOKEN, etc.
env_file: .env
environment:
OLLAMA_URL: http://ollama:11434
SEARXNG_URL: http://searxng:8080
DB_PATH: /data/nova.db
CHROMADB_PATH: /data/chromadb
ANONYMIZED_TELEMETRY: "false"
TRAINING_DATA_PATH: /data/training_data.jsonl
MCP_CONFIG_DIR: /data/mcp
ENABLE_SHELL_EXEC: "true"
XDG_CACHE_HOME: /tmp/.cache
DISPLAY: ":99" # Xvfb virtual display for desktop automation
XAUTHORITY: /tmp/.Xauthority
ENABLE_DESKTOP_AUTOMATION: "true"
volumes:
- nova_data:/data
- ./mcp:/data/mcp:ro
# Uncomment for desktop automation (requires X11 on host):
# - /tmp/.X11-unix:/tmp/.X11-unix:ro
read_only: true
tmpfs:
- /tmp
- /home/nova/.cache
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
mem_limit: 4g
cpus: 4.0
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/api/health"]
interval: 30s
timeout: 10s
retries: 3
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
restart: unless-stopped
frontend:
build:
context: ./frontend
target: dev
image: nova-frontend:latest
container_name: nova-frontend
ports:
- "127.0.0.1:5173:5173"
depends_on:
nova:
condition: service_healthy
environment:
# Browser-accessible URL (not internal docker network name)
VITE_API_URL: http://localhost:8000
# Allow internal Docker hostnames for inter-container access
__VITE_ADDITIONAL_SERVER_ALLOWED_HOSTS: nova-frontend
volumes:
- ./frontend/src:/app/src # hot-reload source changes
- ./frontend/index.html:/app/index.html
mem_limit: 512m
cpus: 1.0
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
restart: unless-stopped
searxng:
image: searxng/searxng:latest
container_name: nova-searxng
ports:
- "127.0.0.1:8888:8080"
volumes:
- ./searxng/settings.yml:/etc/searxng/settings.yml:ro
mem_limit: 384m
cpus: 1.0
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:8080/healthz"]
interval: 30s
timeout: 5s
retries: 3
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
restart: unless-stopped
volumes:
ollama_data:
nova_data: