-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
150 lines (143 loc) · 4.75 KB
/
docker-compose.yml
File metadata and controls
150 lines (143 loc) · 4.75 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
# ============================================================================
# Docker Compose for CodeRAG — full stack
#
# Usage:
# docker compose up # Core: coderag + ollama
# docker compose --profile viewer up # Core + viewer SPA
# docker compose --profile qdrant up # Core + Qdrant vector store
# docker compose up -d # Detached mode
# ============================================================================
services:
# --------------------------------------------------------------------------
# Ollama — local LLM / embedding model server
# --------------------------------------------------------------------------
ollama:
image: ollama/ollama:latest
container_name: coderag-ollama
ports:
- "11434:11434"
volumes:
- ollama-data:/root/.ollama
# GPU support (uncomment for NVIDIA GPU)
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
restart: unless-stopped
networks:
- coderag-net
healthcheck:
test: ["CMD", "ollama", "list"]
interval: 30s
timeout: 10s
retries: 3
# --------------------------------------------------------------------------
# Ollama Init — pull required models before CodeRAG starts
# Uses the same ollama image (has CLI) and shares the model volume.
# --------------------------------------------------------------------------
ollama-init:
image: ollama/ollama:latest
container_name: coderag-ollama-init
volumes:
- ./scripts/ollama-init.sh:/init.sh:ro
entrypoint: ["/bin/sh", "/init.sh"]
environment:
OLLAMA_HOST: http://ollama:11434
CODERAG_EMBEDDING_MODEL: ${CODERAG_EMBEDDING_MODEL:-nomic-embed-text}
CODERAG_ENRICHMENT_MODEL: ${CODERAG_ENRICHMENT_MODEL:-qwen2.5-coder:1.5b}
depends_on:
ollama:
condition: service_healthy
networks:
- coderag-net
restart: "no"
# --------------------------------------------------------------------------
# CodeRAG — MCP server + API server + CLI
# --------------------------------------------------------------------------
coderag:
build:
context: .
dockerfile: Dockerfile
container_name: coderag
ports:
- "3000:3000" # API server
- "3001:3001" # MCP server (SSE transport)
volumes:
# Source code to index (read-only)
- ${CODERAG_SOURCE_DIR:-.}:/workspace:ro
# Persistent data (LanceDB, BM25 index, graph)
- coderag-data:/data/.coderag
environment:
CODERAG_PORT: 3000
CODERAG_MCP_PORT: 3001
CODERAG_ROOT_DIR: /workspace
CODERAG_STORAGE_PATH: /data/.coderag
CODERAG_EMBEDDING_PROVIDER: ${CODERAG_EMBEDDING_PROVIDER:-ollama}
CODERAG_EMBEDDING_MODEL: ${CODERAG_EMBEDDING_MODEL:-nomic-embed-text}
OLLAMA_HOST: http://ollama:11434
NODE_ENV: production
depends_on:
ollama:
condition: service_healthy
ollama-init:
condition: service_completed_successfully
networks:
- coderag-net
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/health"]
interval: 30s
timeout: 5s
start_period: 15s
retries: 3
restart: unless-stopped
# Default command: start the API server (override for other commands)
command: ["serve", "--port", "3000"]
# --------------------------------------------------------------------------
# Viewer — CodeRAG visualization SPA (optional profile)
# --------------------------------------------------------------------------
viewer:
build:
context: .
dockerfile: Dockerfile
container_name: coderag-viewer
profiles:
- viewer # Only starts with: docker compose --profile viewer up
ports:
- "5173:5173"
environment:
CODERAG_VIEWER_PORT: 5173
CODERAG_API_URL: http://coderag:3000
NODE_ENV: production
depends_on:
coderag:
condition: service_healthy
networks:
- coderag-net
command: ["viewer", "--port", "5173"]
# --------------------------------------------------------------------------
# Qdrant — alternative vector store (optional profile)
# --------------------------------------------------------------------------
qdrant:
image: qdrant/qdrant:latest
container_name: coderag-qdrant
profiles:
- qdrant # Only starts with: docker compose --profile qdrant up
ports:
- "6333:6333"
- "6334:6334"
volumes:
- qdrant-data:/qdrant/storage
environment:
QDRANT__SERVICE__GRPC_PORT: "6334"
networks:
- coderag-net
networks:
coderag-net:
driver: bridge
volumes:
coderag-data:
ollama-data:
qdrant-data: