-
Notifications
You must be signed in to change notification settings - Fork 104
Expand file tree
/
Copy pathdocker-compose.yaml
More file actions
416 lines (388 loc) · 16.3 KB
/
docker-compose.yaml
File metadata and controls
416 lines (388 loc) · 16.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
networks:
openim:
driver: bridge
services:
mongo:
image: "${MONGO_IMAGE}"
container_name: mongo
command: >
bash -c '
docker-entrypoint.sh mongod --wiredTigerCacheSizeGB $$wiredTigerCacheSizeGB --auth &
until mongosh -u $$MONGO_INITDB_ROOT_USERNAME -p $$MONGO_INITDB_ROOT_PASSWORD --authenticationDatabase admin --eval "db.runCommand({ ping: 1 })" &>/dev/null; do
echo "Waiting for MongoDB to start..."
sleep 1
done &&
mongosh -u $$MONGO_INITDB_ROOT_USERNAME -p $$MONGO_INITDB_ROOT_PASSWORD --authenticationDatabase admin --eval "
db = db.getSiblingDB(\"$$MONGO_INITDB_DATABASE\");
if (!db.getUser(\"$$MONGO_OPENIM_USERNAME\")) {
db.createUser({
user: \"$$MONGO_OPENIM_USERNAME\",
pwd: \"$$MONGO_OPENIM_PASSWORD\",
roles: [{role: \"readWrite\", db: \"$$MONGO_INITDB_DATABASE\"}]
});
print(\"User created successfully: \");
print(\"Username: $$MONGO_OPENIM_USERNAME\");
print(\"Password: $$MONGO_OPENIM_PASSWORD\");
print(\"Database: $$MONGO_INITDB_DATABASE\");
} else {
print(\"User already exists in database: $$MONGO_INITDB_DATABASE, Username: $$MONGO_OPENIM_USERNAME\");
}
" &&
tail -f /dev/null
'
volumes:
- "${DATA_DIR}/components/mongodb/data/db:/data/db"
- "${DATA_DIR}/components/mongodb/data/logs:/data/logs"
- "${DATA_DIR}/components/mongodb/data/conf:/etc/mongo"
environment:
- TZ=Asia/Shanghai
- wiredTigerCacheSizeGB=1
- MONGO_INITDB_ROOT_USERNAME=root
- MONGO_INITDB_ROOT_PASSWORD=openIM123
- MONGO_INITDB_DATABASE=openim_v3
- MONGO_OPENIM_USERNAME=${MONGO_USERNAME}
- MONGO_OPENIM_PASSWORD=${MONGO_PASSWORD}
restart: always
networks:
- openim
redis:
image: "${REDIS_IMAGE}"
container_name: redis
volumes:
- "${DATA_DIR}/components/redis/data:/data"
- "${DATA_DIR}/components/redis/config/redis.conf:/usr/local/redis/config/redis.conf"
environment:
TZ: Asia/Shanghai
restart: always
sysctls:
net.core.somaxconn: 1024
command:
[
"redis-server",
"/usr/local/redis/config/redis.conf",
"--requirepass",
"${REDIS_PASSWORD}",
"--appendonly",
"yes",
]
networks:
- openim
etcd:
image: "${ETCD_IMAGE}"
container_name: etcd
ports:
- "12379:2379"
- "12380:2380"
environment:
- ETCD_NAME=s1
- ETCD_DATA_DIR=/etcd-data
- ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379
- ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379
- ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380
- ETCD_INITIAL_ADVERTISE_PEER_URLS=http://0.0.0.0:2380
- ETCD_INITIAL_CLUSTER=s1=http://0.0.0.0:2380
- ETCD_INITIAL_CLUSTER_TOKEN=tkn
- ETCD_INITIAL_CLUSTER_STATE=new
- ALLOW_NONE_AUTHENTICATION=no
## Optional: Enable etcd authentication by setting the following credentials
# - ETCD_ROOT_USER=root
# - ETCD_ROOT_PASSWORD=openIM123
# - ETCD_USERNAME=openIM
# - ETCD_PASSWORD=openIM123
volumes:
- "${DATA_DIR}/components/etcd:/etcd-data"
command: >
/bin/sh -c '
etcd &
export ETCDCTL_API=3
echo "Waiting for etcd to become healthy..."
until etcdctl --endpoints=http://127.0.0.1:2379 endpoint health &>/dev/null; do
echo "Waiting for ETCD to start..."
sleep 1
done
echo "etcd is healthy."
if [ -n "$${ETCD_ROOT_USER}" ] && [ -n "$${ETCD_ROOT_PASSWORD}" ] && [ -n "$${ETCD_USERNAME}" ] && [ -n "$${ETCD_PASSWORD}" ]; then
echo "Authentication credentials provided. Setting up authentication..."
echo "Checking authentication status..."
if ! etcdctl --endpoints=http://127.0.0.1:2379 auth status | grep -q "Authentication Status: true"; then
echo "Authentication is disabled. Creating users and enabling..."
# Create users and setup permissions
etcdctl --endpoints=http://127.0.0.1:2379 user add $${ETCD_ROOT_USER} --new-user-password=$${ETCD_ROOT_PASSWORD} || true
etcdctl --endpoints=http://127.0.0.1:2379 user add $${ETCD_USERNAME} --new-user-password=$${ETCD_PASSWORD} || true
etcdctl --endpoints=http://127.0.0.1:2379 role add openim-role || true
etcdctl --endpoints=http://127.0.0.1:2379 role grant-permission openim-role --prefix=true readwrite / || true
etcdctl --endpoints=http://127.0.0.1:2379 role grant-permission openim-role --prefix=true readwrite "" || true
etcdctl --endpoints=http://127.0.0.1:2379 user grant-role $${ETCD_USERNAME} openim-role || true
etcdctl --endpoints=http://127.0.0.1:2379 user grant-role $${ETCD_ROOT_USER} $${ETCD_USERNAME} root || true
echo "Enabling authentication..."
etcdctl --endpoints=http://127.0.0.1:2379 auth enable
echo "Authentication enabled successfully"
else
echo "Authentication is already enabled. Checking OpenIM user..."
# Check if openIM user exists and can perform operations
if ! etcdctl --endpoints=http://127.0.0.1:2379 --user=$${ETCD_USERNAME}:$${ETCD_PASSWORD} put /test/auth "auth-check" &>/dev/null; then
echo "OpenIM user test failed. Recreating user with root credentials..."
# Try to create/update the openIM user using root credentials
etcdctl --endpoints=http://127.0.0.1:2379 --user=$${ETCD_ROOT_USER}:$${ETCD_ROOT_PASSWORD} user add $${ETCD_USERNAME} --new-user-password=$${ETCD_PASSWORD} --no-password-file || true
etcdctl --endpoints=http://127.0.0.1:2379 --user=$${ETCD_ROOT_USER}:$${ETCD_ROOT_PASSWORD} role add openim-role || true
etcdctl --endpoints=http://127.0.0.1:2379 --user=$${ETCD_ROOT_USER}:$${ETCD_ROOT_PASSWORD} role grant-permission openim-role --prefix=true readwrite / || true
etcdctl --endpoints=http://127.0.0.1:2379 --user=$${ETCD_ROOT_USER}:$${ETCD_ROOT_PASSWORD} role grant-permission openim-role --prefix=true readwrite "" || true
etcdctl --endpoints=http://127.0.0.1:2379 --user=$${ETCD_ROOT_USER}:$${ETCD_ROOT_PASSWORD} user grant-role $${ETCD_USERNAME} openim-role || true
etcdctl --endpoints=http://127.0.0.1:2379 user grant-role $${ETCD_ROOT_USER} $${ETCD_USERNAME} root || true
echo "OpenIM user recreated with required permissions"
else
echo "OpenIM user exists and has correct permissions"
etcdctl --endpoints=http://127.0.0.1:2379 --user=$${ETCD_USERNAME}:$${ETCD_PASSWORD} del /test/auth &>/dev/null
fi
fi
echo "Testing authentication with OpenIM user..."
if etcdctl --endpoints=http://127.0.0.1:2379 --user=$${ETCD_USERNAME}:$${ETCD_PASSWORD} put /test/auth "auth-works"; then
echo "Authentication working properly"
etcdctl --endpoints=http://127.0.0.1:2379 --user=$${ETCD_USERNAME}:$${ETCD_PASSWORD} del /test/auth
else
echo "WARNING: Authentication test failed"
fi
else
echo "No authentication credentials provided. Running in no-auth mode."
echo "To enable authentication, set ETCD_ROOT_USER, ETCD_ROOT_PASSWORD, ETCD_USERNAME, and ETCD_PASSWORD environment variables."
fi
tail -f /dev/null
'
restart: always
networks:
- openim
kafka:
image: "${KAFKA_IMAGE}"
container_name: kafka
user: root
restart: always
volumes:
- "${DATA_DIR}/components/kafka:/bitnami/kafka"
environment:
#KAFKA_HEAP_OPTS: "-Xms128m -Xmx256m"
TZ: Asia/Shanghai
# Unique identifier for the Kafka node (required in controller mode)
KAFKA_CFG_NODE_ID: 0
# Defines the roles this Kafka node plays: broker, controller, or both
KAFKA_CFG_PROCESS_ROLES: controller,broker
# Specifies which nodes are controller nodes for quorum voting.
# The syntax follows the KRaft mode (no ZooKeeper): node.id@host:port
# The controller listener endpoint here is kafka:9093
KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 0@kafka:9093
# Specifies which listener is used for controller-to-controller communication
KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER
# Default number of partitions for new topics
KAFKA_NUM_PARTITIONS: 8
# Whether to enable automatic topic creation
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true"
# Kafka internal listeners; Kafka supports multiple ports with different protocols
# Each port is used for a specific purpose: INTERNAL for internal broker communication,
# CONTROLLER for controller communication, EXTERNAL for external client connections.
# These logical listener names are mapped to actual protocols via KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP
# In short, Kafka is listening on three logical ports: 9092 for internal communication,
# 9093 for controller traffic, and 9094 for external access.
KAFKA_CFG_LISTENERS: "INTERNAL://:9092,CONTROLLER://:9093,EXTERNAL://:9094"
# Addresses advertised to clients. INTERNAL://kafka:9092 uses the internal Docker service name 'kafka',
# so other containers can access Kafka via kafka:9092.
# EXTERNAL://localhost:19094 is the address external clients (e.g., in the LAN) should use to connect.
# If Kafka is deployed on a different machine than IM, 'localhost'(or 'kafka') should be replaced with the LAN IP.
KAFKA_CFG_ADVERTISED_LISTENERS: "INTERNAL://kafka:9092,EXTERNAL://kafka:9094"
# Maps logical listener names to actual protocols.
# Supported protocols include: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,INTERNAL:PLAINTEXT"
# Defines which listener is used for inter-broker communication within the Kafka cluster
KAFKA_CFG_INTER_BROKER_LISTENER_NAME: "INTERNAL"
# Authentication configuration variables - comment out to disable auth
# KAFKA_USERNAME: "openIM"
# KAFKA_PASSWORD: "openIM123"
command: >
/bin/sh -c '
if [ -n "$${KAFKA_USERNAME}" ] && [ -n "$${KAFKA_PASSWORD}" ]; then
echo "=== Kafka SASL Authentication ENABLED ==="
echo "Username: $${KAFKA_USERNAME}"
# Set environment variables for SASL authentication
export KAFKA_CFG_LISTENERS="SASL_PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094"
export KAFKA_CFG_ADVERTISED_LISTENERS="SASL_PLAINTEXT://kafka:9092,EXTERNAL://kafka:9094"
export KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP="CONTROLLER:PLAINTEXT,EXTERNAL:SASL_PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT"
export KAFKA_CFG_SASL_ENABLED_MECHANISMS="PLAIN"
export KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL="PLAIN"
export KAFKA_CFG_INTER_BROKER_LISTENER_NAME="SASL_PLAINTEXT"
export KAFKA_CLIENT_USERS="$${KAFKA_USERNAME}"
export KAFKA_CLIENT_PASSWORDS="$${KAFKA_PASSWORD}"
fi
# Start Kafka with the configured environment
exec /opt/bitnami/scripts/kafka/entrypoint.sh /opt/bitnami/scripts/kafka/run.sh
'
networks:
- openim
minio:
image: "${MINIO_IMAGE}"
ports:
- "${MINIO_PORT}:9000"
- "${MINIO_CONSOLE_PORT}:9090"
container_name: minio
volumes:
- "${DATA_DIR}/components/mnt/data:/data"
- "${DATA_DIR}/components/mnt/config:/root/.minio"
environment:
TZ: Asia/Shanghai
MINIO_ROOT_USER: "${MINIO_ACCESS_KEY_ID}"
MINIO_ROOT_PASSWORD: "${MINIO_SECRET_ACCESS_KEY}"
restart: always
command: minio server /data --console-address ':9090'
networks:
- openim
openim-web-front:
image: ${OPENIM_WEB_FRONT_IMAGE}
container_name: openim-web-front
restart: always
ports:
- "${OPENIM_WEB_FRONT_PORT}:80"
networks:
- openim
openim-admin-front:
image: ${OPENIM_ADMIN_FRONT_IMAGE}
container_name: openim-admin-front
restart: always
ports:
- "${OPENIM_ADMIN_FRONT_PORT}:80"
networks:
- openim
prometheus:
image: ${PROMETHEUS_IMAGE}
container_name: prometheus
restart: always
user: root
profiles:
- m
volumes:
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml
- ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml
- ${DATA_DIR}/components/prometheus/data:/prometheus
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.listen-address=:${PROMETHEUS_PORT}"
network_mode: host
alertmanager:
image: ${ALERTMANAGER_IMAGE}
container_name: alertmanager
restart: always
profiles:
- m
volumes:
- ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml
- ./config/email.tmpl:/etc/alertmanager/email.tmpl
command:
- "--config.file=/etc/alertmanager/alertmanager.yml"
- "--web.listen-address=:${ALERTMANAGER_PORT}"
network_mode: host
grafana:
image: ${GRAFANA_IMAGE}
container_name: grafana
user: root
restart: always
profiles:
- m
environment:
- GF_SECURITY_ALLOW_EMBEDDING=true
- GF_SESSION_COOKIE_SAMESITE=none
- GF_SESSION_COOKIE_SECURE=true
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
- GF_SERVER_HTTP_PORT=${GRAFANA_PORT}
volumes:
- ${DATA_DIR:-./}/components/grafana:/var/lib/grafana
network_mode: host
node-exporter:
image: ${NODE_EXPORTER_IMAGE}
container_name: node-exporter
restart: always
profiles:
- m
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- "--path.procfs=/host/proc"
- "--path.sysfs=/host/sys"
- "--path.rootfs=/rootfs"
- "--web.listen-address=:${NODE_EXPORTER_PORT}"
network_mode: host
openim-server:
image: ${OPENIM_SERVER_IMAGE}
container_name: openim-server
init: true
ports:
- "${OPENIM_MSG_GATEWAY_PORT}:10001"
- "${OPENIM_API_PORT}:10002"
healthcheck:
test: ["CMD", "sh", "-c", "mage check"]
interval: 5s
timeout: 60s
retries: 10
environment:
- IMENV_MONGODB_ADDRESS=${MONGO_ADDRESS}
- IMENV_MONGODB_USERNAME=${MONGO_USERNAME}
- IMENV_MONGODB_PASSWORD=${MONGO_PASSWORD}
- IMENV_KAFKA_ADDRESS=${KAFKA_ADDRESS}
- IMENV_KAFKA_USERNAME=${KAFKA_USERNAME}
- IMENV_KAFKA_PASSWORD=${KAFKA_PASSWORD}
- IMENV_DISCOVERY_ETCD_ADDRESS=${ETCD_ADDRESS}
- IMENV_REDIS_ADDRESS=${REDIS_ADDRESS}
- IMENV_REDIS_PASSWORD=${REDIS_PASSWORD}
- IMENV_DISCOVERY_ETCD_USERNAME=${ETCD_USERNAME}
- IMENV_DISCOVERY_ETCD_PASSWORD=${ETCD_PASSWORD}
- IMENV_MINIO_INTERNALADDRESS=${MINIO_INTERNAL_ADDRESS}
- IMENV_MINIO_EXTERNALADDRESS=${MINIO_EXTERNAL_ADDRESS}
- IMENV_MINIO_ACCESSKEYID=${MINIO_ACCESS_KEY_ID}
- IMENV_MINIO_SECRETACCESSKEY=${MINIO_SECRET_ACCESS_KEY}
- IMENV_SHARE_SECRET=${OPENIM_SECRET}
- IMENV_LOG_ISSTDOUT=${LOG_IS_STDOUT}
- IMENV_LOG_REMAINLOGLEVEL=${LOG_LEVEL}
- IMENV_OPENIM_API_PROMETHEUS_GRAFANAURL=${GRAFANA_URL}
restart: always
depends_on:
- mongo
- redis
- etcd
- kafka
- minio
networks:
- openim
openim-chat:
image: ${OPENIM_CHAT_IMAGE}
container_name: openim-chat
init: true
healthcheck:
test: ["CMD", "sh", "-c", "mage check"]
interval: 5s
timeout: 60s
retries: 10
environment:
- CHATENV_MONGODB_ADDRESS=${MONGO_ADDRESS}
- CHATENV_MONGODB_USERNAME=${MONGO_USERNAME}
- CHATENV_MONGODB_PASSWORD=${MONGO_PASSWORD}
- CHATENV_DISCOVERY_ETCD_ADDRESS=${ETCD_ADDRESS}
- CHATENV_REDIS_ADDRESS=${REDIS_ADDRESS}
- CHATENV_REDIS_PASSWORD=${REDIS_PASSWORD}
- CHATENV_SHARE_OPENIM_SECRET=${OPENIM_SECRET}
- CHATENV_DISCOVERY_ETCD_USERNAME=${ETCD_USERNAME}
- CHATENV_DISCOVERY_ETCD_PASSWORD=${ETCD_PASSWORD}
- CHATENV_SHARE_OPENIM_APIURL=${API_URL}
- CHATENV_LOG_ISSTDOUT=${LOG_IS_STDOUT}
- CHATENV_LOG_REMAINLOGLEVEL=${LOG_LEVEL}
ports:
- "${CHAT_API_PORT}:10008"
- "${ADMIN_API_PORT}:10009"
restart: always
depends_on:
- mongo
- redis
- etcd
- kafka
- minio
- openim-server
networks:
- openim