diff --git a/.github/workflows/helm-install-test.yml b/.github/workflows/helm-install-test.yml index 1ab296a..e4ef8c0 100644 --- a/.github/workflows/helm-install-test.yml +++ b/.github/workflows/helm-install-test.yml @@ -6,6 +6,11 @@ on: - cron: '0 6 * * *' workflow_dispatch: +# Explicit permissions are required for GITHUB_TOKEN to pull from GHCR +permissions: + contents: read + packages: read + jobs: helm-install: runs-on: ubuntu-latest @@ -105,6 +110,20 @@ jobs: EOF kubectl wait --for=condition=ready pod -l app=redis -n s3proxy --timeout=120s + - name: Create K8s Image Pull Secret & Patch Namespace + run: | + # 1. Create the secret using the workflow token + kubectl create secret docker-registry ghcr-login \ + --docker-server=ghcr.io \ + --docker-username=${{ github.actor }} \ + --docker-password=${{ secrets.GITHUB_TOKEN }} \ + --namespace s3proxy \ + --dry-run=client -o yaml | kubectl apply -f - + + # 2. Patch the default service account to automatically use this secret + # This acts as a fail-safe if the Helm 'imagePullSecrets' set doesn't propagate + kubectl patch serviceaccount default -n s3proxy -p '{"imagePullSecrets": [{"name": "ghcr-login"}]}' + - name: Install chart from GHCR run: | OWNER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') @@ -113,6 +132,7 @@ jobs: --set image.repository=ghcr.io/${OWNER}/s3proxy-python \ --set image.tag=latest \ --set image.pullPolicy=Always \ + --set "imagePullSecrets[0].name=ghcr-login" \ --set s3.host="http://minio:9000" \ --set secrets.encryptKey=test-encryption-key-for-ci \ --set secrets.awsAccessKeyId=minioadmin \ @@ -129,7 +149,6 @@ jobs: run: | kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=s3proxy-python -n s3proxy --timeout=120s kubectl get pods -n s3proxy - # Verify we have 3 s3proxy pods POD_COUNT=$(kubectl get pods -n s3proxy -l app.kubernetes.io/name=s3proxy-python --no-headers | grep Running | wc -l) if [ "$POD_COUNT" -lt 3 ]; then echo "Expected 3 s3proxy pods, got $POD_COUNT" @@ -140,7 +159,7 @@ jobs: - name: Check health endpoint run: | kubectl port-forward svc/s3proxy-python 4433:4433 -n s3proxy & - sleep 3 + sleep 5 curl -sf http://localhost:4433/healthz && echo "Health check passed" - name: Run S3 smoke test @@ -202,6 +221,9 @@ jobs: echo "=== Pod Status ===" kubectl get pods -n s3proxy -o wide echo "" + echo "=== Describe Failed Pods ===" + kubectl describe pods -n s3proxy -l app.kubernetes.io/name=s3proxy-python + echo "" echo "=== S3Proxy Logs ===" kubectl logs -l app.kubernetes.io/name=s3proxy-python -n s3proxy --tail=100 echo "" @@ -209,4 +231,4 @@ jobs: kubectl logs -l app=minio -n s3proxy --tail=50 echo "" echo "=== Events ===" - kubectl get events -n s3proxy --sort-by=.lastTimestamp + kubectl get events -n s3proxy --sort-by=.lastTimestamp \ No newline at end of file diff --git a/manifests/values.yaml b/manifests/values.yaml index 8d0f01d..00968be 100644 --- a/manifests/values.yaml +++ b/manifests/values.yaml @@ -3,7 +3,7 @@ replicaCount: 3 image: - repository: ghcr.io/ServerSideHannes/sseproxy-python + repository: ghcr.io/ServerSideHannes/s3proxy-python tag: latest pullPolicy: IfNotPresent @@ -123,7 +123,7 @@ affinity: {} # podAffinityTerm: # labelSelector: # matchLabels: - # app: sseproxy-python + # app: s3proxy-python # topologyKey: kubernetes.io/hostname topologySpreadConstraints: [] @@ -133,7 +133,7 @@ topologySpreadConstraints: [] # whenUnsatisfiable: ScheduleAnyway # labelSelector: # matchLabels: - # app: sseproxy-python + # app: s3proxy-python ingress: enabled: false