-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
144 lines (136 loc) · 3.64 KB
/
docker-compose.yml
File metadata and controls
144 lines (136 loc) · 3.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
version: '3.8'
services:
# MinIO - High-performance S3 compatible storage
minio:
image: quay.io/minio/minio:latest
container_name: msst-minio
ports:
- "9000:9000"
- "9001:9001"
environment:
MINIO_ROOT_USER: minioadmin
MINIO_ROOT_PASSWORD: minioadmin
MINIO_DOMAIN: localhost
command: server /data --console-address ":9001"
volumes:
- minio-data:/data
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
- s3-network
# RustFS - High-performance Rust-based S3 compatible storage
rustfs:
image: rustfs/rustfs:latest
container_name: msst-rustfs
ports:
- "9002:9000" # S3 API (mapped to 9002 to avoid MinIO conflict)
- "9003:9001" # Console (mapped to 9003 to avoid MinIO conflict)
environment:
RUSTFS_ROOT_USER: rustfsadmin
RUSTFS_ROOT_PASSWORD: rustfsadmin
volumes:
- rustfs-data:/data
- rustfs-logs:/logs
user: "10001:10001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
- s3-network
# LocalStack - AWS services emulator
localstack:
image: localstack/localstack:latest
container_name: msst-localstack
ports:
- "4566:4566"
- "4510-4559:4510-4559"
environment:
- SERVICES=s3
- DEBUG=0
- DATA_DIR=/tmp/localstack/data
- DOCKER_HOST=unix:///var/run/docker.sock
- AWS_ACCESS_KEY_ID=test
- AWS_SECRET_ACCESS_KEY=test
- AWS_DEFAULT_REGION=us-east-1
volumes:
- localstack-data:/tmp/localstack
- /var/run/docker.sock:/var/run/docker.sock
networks:
- s3-network
# Garage - Distributed S3-compatible storage
garage:
image: dxflrs/garage:v0.9.1
container_name: msst-garage
ports:
- "3900:3900" # S3 API
- "3902:3902" # Admin API
environment:
GARAGE_RPC_SECRET: "$(openssl rand -hex 32)"
GARAGE_ADMIN_TOKEN: "admin-token"
GARAGE_METRICS_TOKEN: "metrics-token"
volumes:
- garage-meta:/var/lib/garage/meta
- garage-data:/var/lib/garage/data
- ./docker/garage.toml:/etc/garage.toml
command: ["garage", "server"]
networks:
- s3-network
# SeaweedFS - Distributed storage system
seaweedfs:
image: chrislusf/seaweedfs:latest
container_name: msst-seaweedfs
ports:
- "8333:8333" # S3 API
- "9333:9333" # Master server
- "8080:8080" # Volume server
- "8888:8888" # Filer
environment:
WEED_MASTER_PORT: 9333
WEED_VOLUME_PORT: 8080
WEED_FILER_PORT: 8888
WEED_S3_PORT: 8333
command: "server -s3 -s3.port=8333"
volumes:
- seaweedfs-data:/data
networks:
- s3-network
# Ceph with RadosGW (S3 interface)
# Note: This is a simplified single-node setup for testing
ceph:
image: quay.io/ceph/demo:latest
container_name: msst-ceph
environment:
CEPH_DEMO_UID: demo
CEPH_DEMO_ACCESS_KEY: demo-access-key
CEPH_DEMO_SECRET_KEY: demo-secret-key
CEPH_DEMO_BUCKET: demo-bucket
RGW_NAME: localhost
NETWORK_AUTO_DETECT: 4
CEPH_PUBLIC_NETWORK: 172.20.0.0/24
ports:
- "8082:8080" # RadosGW S3 API
volumes:
- ceph-data:/var/lib/ceph
networks:
s3-network:
ipv4_address: 172.20.0.10
volumes:
minio-data:
rustfs-data:
rustfs-logs:
localstack-data:
garage-meta:
garage-data:
seaweedfs-data:
ceph-data:
networks:
s3-network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/24