Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
fff7431
Fix Docker image tag to use lowercase
ServerSideHannes Jan 14, 2026
7d4dffe
fix: wip
ServerSideHannes Jan 15, 2026
e62533c
fix: wip
ServerSideHannes Jan 15, 2026
8da9694
fix: wip
ServerSideHannes Jan 15, 2026
4eb73d7
Add gateway service for request-level load balancing via ingress
ServerSideHannes Jan 20, 2026
4163dcc
Clean up repo: remove flowchart script, gitignore helm deps
ServerSideHannes Jan 20, 2026
5ab0a2e
Remove tracked helm dependency (downloaded at build time)
ServerSideHannes Jan 20, 2026
70c3a35
Simplify Makefile and add auto-cleanup to cluster tests
ServerSideHannes Jan 20, 2026
f235402
Simplify e2e tests and add cluster-test CI workflow
ServerSideHannes Jan 20, 2026
3c3e32d
Remove MinIO from Helm chart, update README
ServerSideHannes Jan 20, 2026
3e4a9f8
Add S3 operations, internal prefix metadata, fix cluster test
ServerSideHannes Jan 20, 2026
e656566
Make Redis optional, improve README documentation
ServerSideHannes Jan 21, 2026
8c2f666
Improve gateway/ingress documentation in README
ServerSideHannes Jan 21, 2026
b1a9dd8
Improve gateway/ingress configuration table in README
ServerSideHannes Jan 21, 2026
9e85b9f
Fix gateway documentation - it's a DNS alias, not load balancer
ServerSideHannes Jan 21, 2026
8d3ddff
Add endpoint URL examples to production deployment docs
ServerSideHannes Jan 21, 2026
f3e34f9
Add recommendation note for gateway in endpoint examples
ServerSideHannes Jan 21, 2026
3d9f76e
Integrate Architecture section into How It Works
ServerSideHannes Jan 21, 2026
93f0ee6
Consolidate Configuration sections in README
ServerSideHannes Jan 21, 2026
bebbe7f
Add gateway recommendation to Production Deployment docs
ServerSideHannes Jan 21, 2026
3ba6a84
Clarify gateway recommendation is for internal access only
ServerSideHannes Jan 21, 2026
9308fe9
Add pod scheduling options to Helm chart (nodeSelector, affinity, tol…
ServerSideHannes Jan 21, 2026
13b4ddd
Add daily helm install smoke test and update Docker publish workflow
ServerSideHannes Jan 21, 2026
d04861c
Use dynamic repository owner in workflows
ServerSideHannes Jan 21, 2026
ada02d4
fix: updated timeout
ServerSideHannes Jan 21, 2026
28f9005
fix: updated timeout
ServerSideHannes Jan 21, 2026
5f1b214
fix: updated timeout
ServerSideHannes Jan 21, 2026
a35f445
fix: improve cluster test visibility and reduce timeout issues
ServerSideHannes Jan 21, 2026
68bad57
fix: increase timeout and reduce redis replicas for slow CI runners
ServerSideHannes Jan 21, 2026
3fb8c3c
fix: disable replica requirements for single-node redis in CI
ServerSideHannes Jan 22, 2026
1c871df
fix: replace redis-ha with simple Redis for faster CI
ServerSideHannes Jan 22, 2026
34c1932
fix: use simple Redis in CI, full redis-ha locally
ServerSideHannes Jan 22, 2026
589901a
fix: add Redis and 3 replicas to helm-install-test
ServerSideHannes Jan 22, 2026
bba52cb
fix: pass CI env var and reduce CPU for CI runners
ServerSideHannes Jan 22, 2026
eff5ceb
fix: remove TTY flag and add error checking in load test
ServerSideHannes Jan 22, 2026
99689ae
fix: reduce test file size from 512MB to 10MB for CI
ServerSideHannes Jan 22, 2026
bc78b31
refactor: separate CI cluster-test from local docker-compose
ServerSideHannes Jan 22, 2026
838f429
fix: add helm repo before dependency build in cluster-test
ServerSideHannes Jan 22, 2026
5833a7f
feat: publish helm chart on main push with 0.0.0-latest version
ServerSideHannes Jan 22, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
279 changes: 279 additions & 0 deletions .github/workflows/cluster-test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,279 @@
name: Cluster Test

on:
pull_request:
paths:
- 'manifests/**'
- 'src/**'
- 'Dockerfile'
- 'e2e/**'
workflow_dispatch:

jobs:
cluster-test:
runs-on: ubuntu-latest
timeout-minutes: 20
steps:
- name: Checkout
uses: actions/checkout@v6

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

- name: Build s3proxy image
uses: docker/build-push-action@v6
with:
context: .
load: true
tags: s3proxy:latest

- name: Create Kind cluster
uses: helm/kind-action@v1
with:
node_image: kindest/node:v1.29.2
cluster_name: cluster-test

- name: Load image into Kind
run: kind load docker-image s3proxy:latest --name cluster-test

- name: Create namespace
run: kubectl create namespace s3proxy

- name: Deploy MinIO
run: |
cat <<EOF | kubectl apply -n s3proxy -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio
spec:
replicas: 1
selector:
matchLabels:
app: minio
template:
metadata:
labels:
app: minio
spec:
containers:
- name: minio
image: minio/minio:latest
args: ["server", "/data"]
env:
- name: MINIO_ROOT_USER
value: minioadmin
- name: MINIO_ROOT_PASSWORD
value: minioadmin
ports:
- containerPort: 9000
---
apiVersion: v1
kind: Service
metadata:
name: minio
spec:
selector:
app: minio
ports:
- port: 9000
EOF
kubectl wait --for=condition=ready pod -l app=minio -n s3proxy --timeout=120s

- name: Deploy Redis
run: |
cat <<EOF | kubectl apply -n s3proxy -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7-alpine
ports:
- containerPort: 6379
resources:
limits:
memory: 128Mi
cpu: 100m
---
apiVersion: v1
kind: Service
metadata:
name: redis
spec:
selector:
app: redis
ports:
- port: 6379
EOF
kubectl wait --for=condition=ready pod -l app=redis -n s3proxy --timeout=120s

- name: Build Helm dependencies
run: |
helm repo add dandydeveloper https://dandydeveloper.github.io/charts
helm repo update
helm dependency build manifests/

- name: Install s3proxy chart
run: |
helm install s3proxy manifests/ \
--namespace s3proxy \
--set image.repository=s3proxy \
--set image.tag=latest \
--set image.pullPolicy=IfNotPresent \
--set s3.host="http://minio:9000" \
--set secrets.encryptKey="test-encryption-key-32chars!!" \
--set secrets.awsAccessKeyId=minioadmin \
--set secrets.awsSecretAccessKey=minioadmin \
--set redis-ha.enabled=false \
--set externalRedis.url="redis://redis:6379/0" \
--set replicaCount=3 \
--set resources.limits.cpu=100m \
--set resources.requests.cpu=50m \
--wait \
--timeout 5m

- name: Verify pods are running
run: |
kubectl get pods -n s3proxy
POD_COUNT=$(kubectl get pods -n s3proxy -l app.kubernetes.io/name=s3proxy-python --no-headers | grep Running | wc -l)
if [ "$POD_COUNT" -lt 3 ]; then
echo "Expected 3 s3proxy pods, got $POD_COUNT"
exit 1
fi
echo "✓ All 3 s3proxy pods running"

- name: Run load test
run: |
# Save pod names for load balancing check
PODS=$(kubectl get pods -n s3proxy -l app.kubernetes.io/name=s3proxy-python -o jsonpath='{.items[*].metadata.name}')
echo "Found pods: $PODS"

# Save starting log line counts
for pod in $PODS; do
kubectl logs $pod -n s3proxy 2>/dev/null | wc -l > /tmp/$pod.start
done

# Run the load test
kubectl run s3-load-test -n s3proxy --rm -i --restart=Never \
--image=amazon/aws-cli:latest \
--env="AWS_ACCESS_KEY_ID=minioadmin" \
--env="AWS_SECRET_ACCESS_KEY=minioadmin" \
--env="AWS_DEFAULT_REGION=us-east-1" \
--command -- /bin/sh -c '
set -e
ENDPOINT="http://s3proxy-python:4433"

echo "=== Creating test bucket ==="
aws --endpoint-url $ENDPOINT s3 mb s3://load-test-bucket || true

echo "=== Generating 10MB test files ==="
mkdir -p /tmp/testfiles
for i in 1 2 3; do
dd if=/dev/urandom of=/tmp/testfiles/file-$i.bin bs=1M count=10 2>/dev/null &
done
wait
ls -lh /tmp/testfiles/

echo "=== Starting concurrent uploads ==="
START=$(date +%s)
for i in 1 2 3; do
aws --endpoint-url $ENDPOINT s3 cp /tmp/testfiles/file-$i.bin s3://load-test-bucket/file-$i.bin &
done
wait
END=$(date +%s)
echo "=== Uploads complete in $((END - START))s ==="

echo "=== Listing bucket ==="
aws --endpoint-url $ENDPOINT s3 ls s3://load-test-bucket/

echo "=== Downloading and verifying ==="
mkdir -p /tmp/downloads
for i in 1 2 3; do
aws --endpoint-url $ENDPOINT s3 cp s3://load-test-bucket/file-$i.bin /tmp/downloads/file-$i.bin &
done
wait

echo "=== Comparing checksums ==="
ORIG_SUMS=$(md5sum /tmp/testfiles/*.bin | cut -d" " -f1 | sort)
DOWN_SUMS=$(md5sum /tmp/downloads/*.bin | cut -d" " -f1 | sort)

if [ "$ORIG_SUMS" = "$DOWN_SUMS" ]; then
echo "✓ Checksums match - round-trip successful"
else
echo "✗ Checksum mismatch!"
exit 1
fi

echo "=== Verifying encryption ==="
dd if=/dev/urandom of=/tmp/encrypt-test.bin bs=1K count=100 2>/dev/null
ORIG_SIZE=$(stat -c%s /tmp/encrypt-test.bin)
ORIG_MD5=$(md5sum /tmp/encrypt-test.bin | cut -c1-32)

aws --endpoint-url $ENDPOINT s3 cp /tmp/encrypt-test.bin s3://load-test-bucket/encrypt-test.bin
aws --endpoint-url http://minio:9000 s3 cp s3://load-test-bucket/encrypt-test.bin /tmp/raw.bin 2>/dev/null || true

if [ -f /tmp/raw.bin ]; then
RAW_SIZE=$(stat -c%s /tmp/raw.bin)
RAW_MD5=$(md5sum /tmp/raw.bin | cut -c1-32)
EXPECTED_SIZE=$((ORIG_SIZE + 28))

if [ "$RAW_SIZE" = "$EXPECTED_SIZE" ] && [ "$ORIG_MD5" != "$RAW_MD5" ]; then
echo "✓ Encryption verified - size +28 bytes (GCM overhead), content differs"
else
echo "✗ Encryption check failed"
exit 1
fi
fi

echo ""
echo "✓ All tests passed!"
'

- name: Check load balancing
run: |
PODS=$(kubectl get pods -n s3proxy -l app.kubernetes.io/name=s3proxy-python -o jsonpath='{.items[*].metadata.name}')
PODS_HIT=0

for pod in $PODS; do
START_LINE=$(cat /tmp/$pod.start 2>/dev/null || echo "0")
REQUEST_COUNT=$(kubectl logs $pod -n s3proxy 2>/dev/null | tail -n +$((START_LINE + 1)) | grep -cE "GET|POST|PUT|HEAD" || echo "0")
if [ "$REQUEST_COUNT" -gt 0 ]; then
PODS_HIT=$((PODS_HIT + 1))
echo "✓ Pod $pod: received $REQUEST_COUNT requests"
else
echo " Pod $pod: received 0 requests"
fi
done

if [ "$PODS_HIT" -ge 2 ]; then
echo "✓ Load balancing verified - traffic distributed across $PODS_HIT pods"
else
echo "⚠ Traffic went to only $PODS_HIT pod(s)"
fi

- name: Show logs on failure
if: failure()
run: |
echo "=== Pod Status ==="
kubectl get pods -n s3proxy -o wide
echo ""
echo "=== S3Proxy Logs ==="
kubectl logs -l app.kubernetes.io/name=s3proxy-python -n s3proxy --tail=100
echo ""
echo "=== MinIO Logs ==="
kubectl logs -l app=minio -n s3proxy --tail=50
echo ""
echo "=== Events ==="
kubectl get events -n s3proxy --sort-by=.lastTimestamp
33 changes: 18 additions & 15 deletions .github/workflows/docker-publish.yml
Original file line number Diff line number Diff line change
@@ -1,16 +1,9 @@
name: Build and Push Docker Image

on:
workflow_dispatch:
inputs:
tag:
description: 'Docker image tag'
required: true
type: string

env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
push:
branches: [main]
tags: ['v*']

jobs:
build-and-push:
Expand All @@ -23,23 +16,33 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6

- name: Determine tags
id: tags
run: |
OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')
if [[ "$GITHUB_REF" == refs/tags/v* ]]; then
VERSION=${GITHUB_REF#refs/tags/v}
echo "tags=ghcr.io/${OWNER}/s3proxy-python:${VERSION}" >> $GITHUB_OUTPUT
else
echo "tags=ghcr.io/${OWNER}/s3proxy-python:latest" >> $GITHUB_OUTPUT
fi

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

- name: Log in to Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3.6.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}

- name: Build and push Docker image
uses: docker/build-push-action@v6
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ inputs.tag }}
push: true
tags: ${{ steps.tags.outputs.tags }}
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64
Loading