-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
55 lines (52 loc) · 1.34 KB
/
docker-compose.yml
File metadata and controls
55 lines (52 loc) · 1.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
services:
ollama:
image: ollama/ollama:latest
container_name: ollama-service
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
environment:
- OLLAMA_HOST=0.0.0.0
- OLLAMA_KEEP_ALIVE=60m # Keep model in memory for 60 minutes
- OLLAMA_NUM_PARALLEL=2 # Process multiple requests in parallel
- OLLAMA_MAX_LOADED_MODELS=1 # Keep only 1 model loaded to save memory
deploy:
resources:
limits:
memory: 8G # Maximum memory allocation
reservations:
memory: 6G # Reserved memory allocation
shm_size: 2G # Shared memory for faster processing
networks:
- ai-network
restart: unless-stopped
api:
build: ./api
container_name: government-ai-api
ports:
- "3000:3000"
volumes:
- ./api/config:/app/config
- ./api/.env:/app/.env
env_file:
- ./api/.env
environment:
- NODE_ENV=production
depends_on:
- ollama
deploy:
resources:
limits:
memory: 1G # API doesn't need much memory
reservations:
memory: 512M
networks:
- ai-network
restart: unless-stopped
volumes:
ollama_data:
driver: local
networks:
ai-network:
driver: bridge