Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 7 additions & 13 deletions .github/workflows/storagebox-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,6 @@ jobs:
# Operator repos (from ec.yaml) not in Chart.yaml dependencies
helm repo add jetstack https://charts.jetstack.io || true
helm repo add cnpg https://cloudnative-pg.github.io/charts || true
helm repo add minio-operator https://operator.min.io || true
helm repo add k8ssandra https://helm.k8ssandra.io/stable || true
helm repo update

Expand Down Expand Up @@ -144,17 +143,13 @@ jobs:
done
kubectl wait --for=condition=Available deployment --all -n cert-manager --timeout=10s

# Install remaining operators in parallel
# Install remaining operators in parallel (Garage needs no operator)
- name: Install operators
run: |
helm install cloudnative-pg cnpg/cloudnative-pg \
--namespace cnpg --create-namespace \
--version 0.27.0 &

helm install minio-operator minio-operator/operator \
--namespace minio --create-namespace \
--version 7.1.1 &

helm install k8ssandra-operator k8ssandra/k8ssandra-operator \
--namespace k8ssandra-operator --create-namespace \
--version 1.22.0 \
Expand All @@ -164,7 +159,7 @@ jobs:

- name: Wait for operators
run: |
NAMESPACES="cnpg minio k8ssandra-operator"
NAMESPACES="cnpg k8ssandra-operator"
TIMEOUT=300; ELAPSED=0; INTERVAL=15
while [ $ELAPSED -lt $TIMEOUT ]; do
echo ""
Expand Down Expand Up @@ -220,7 +215,7 @@ jobs:
echo ""
echo "Fully ready: ${READY_COUNT}/${TOTAL}"

# We need at least postgres, minio, rqlite, cassandra (4 pods minimum)
# We need at least postgres, garage, rqlite, cassandra (4 pods minimum)
if [ "$READY_COUNT" -ge 4 ] && [ -z "$NOT_READY" ]; then
echo ""
echo "All pods are ready!"
Expand All @@ -237,7 +232,7 @@ jobs:

# Fail if key components aren't ready
kubectl wait --for=condition=Ready pods -l cnpg.io/cluster=postgres -n $NS --timeout=30s
kubectl wait --for=condition=Ready pods -l v1.min.io/tenant=minio -n $NS --timeout=30s
kubectl wait --for=condition=Ready pods -l app.kubernetes.io/name=garage -n $NS --timeout=60s
# Cassandra has a sidecar that takes longer to become ready
kubectl wait --for=condition=Ready pods -l app.kubernetes.io/managed-by=cass-operator -n $NS --timeout=180s

Expand All @@ -246,7 +241,7 @@ jobs:
run: |
pip install -r tests/requirements.txt
python tests/smoke_test.py storagebox --timeout 120 \
--components postgres minio rqlite cassandra
--components postgres garage rqlite cassandra

- name: Debug output on failure
if: failure()
Expand All @@ -264,7 +259,6 @@ jobs:
echo "=== Operator pods ==="
kubectl get pods -n cert-manager
kubectl get pods -n cnpg
kubectl get pods -n minio
kubectl get pods -n k8ssandra-operator
echo ""
echo "=== PostgreSQL Cluster status ==="
Expand All @@ -273,8 +267,8 @@ jobs:
echo "=== K8ssandraCluster status ==="
kubectl get k8ssandraclusters -n $NS -o yaml || true
echo ""
echo "=== MinIO Tenant status ==="
kubectl get tenants.minio.min.io -n $NS -o yaml || true
echo "=== Garage StatefulSet status ==="
kubectl get statefulset -l app.kubernetes.io/name=garage -n $NS -o yaml || true
echo ""
echo "=== Pod logs (last 30 lines each) ==="
for pod in $(kubectl get pods -n $NS -o name 2>/dev/null); do
Expand Down
18 changes: 14 additions & 4 deletions applications/storagebox/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -637,6 +637,19 @@ vm-ec-install-headless: vm-copy-license vm-copy-config
# Helper Targets
# ============================================================================

CMD ?= get pods -A
.PHONY: vm-kubectl
vm-kubectl:
@vm_id=$$(replicated vm ls --output json | jq -r '.[] | select(.name == "$(CLUSTER_PREFIX)-node-1" and .status == "running") | .id'); \
if [ -z "$$vm_id" ]; then \
echo "ERROR: $(CLUSTER_PREFIX)-node-1 not found or not running"; \
exit 1; \
fi; \
ssh_endpoint=$$(replicated vm ssh-endpoint $$vm_id --app $(APP_SLUG)); \
ssh -o StrictHostKeyChecking=no $$ssh_endpoint "\
sudo KUBECONFIG=/var/lib/embedded-cluster/k0s/pki/admin.conf \
/var/lib/embedded-cluster/bin/kubectl $(CMD)"

.PHONY: vm-ec-test-cycle
vm-ec-test-cycle:
@echo "=== Full Embedded Cluster Test Cycle ==="
Expand Down Expand Up @@ -738,6 +751,7 @@ help:
@echo " make vm-copy-config - Copy config values to node-1"
@echo " make vm-ec-install - Install EC on node-1 (UI mode)"
@echo " make vm-ec-install-headless - Install EC on node-1 (headless)"
@echo " make vm-kubectl CMD='...' - Run kubectl on node-1 (EC environment)"
@echo ""
@echo "Workflows:"
@echo " make test-cycle - Full KOTS test cycle: release + cluster + ready"
Expand Down Expand Up @@ -816,10 +830,6 @@ test-install-operators:
--namespace cnpg --create-namespace \
--version 0.27.0 \
--wait --timeout 5m
helm install minio-operator minio-operator/operator \
--namespace minio --create-namespace \
--version 7.1.1 \
--wait --timeout 5m
helm install k8ssandra-operator k8ssandra/k8ssandra-operator \
--namespace k8ssandra-operator --create-namespace \
--version 1.22.0 \
Expand Down
9 changes: 4 additions & 5 deletions applications/storagebox/charts/storagebox/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: v2
name: storagebox
description: A Helm chart for different storage options
type: application
version: 0.24.0
version: 0.26.8
appVersion: 1.0.0
icon: data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAzMiAzMiIgd2lkdGg9IjMyIiBoZWlnaHQ9IjMyIj48cmVjdCB4PSIyIiB5PSI2IiB3aWR0aD0iMjgiIGhlaWdodD0iMjIiIHJ4PSIzIiBmaWxsPSIjMjU2M2ViIi8+PHJlY3QgeD0iNSIgeT0iOSIgd2lkdGg9IjIyIiBoZWlnaHQ9IjUiIHJ4PSIxIiBmaWxsPSIjNjBhNWZhIi8+PHJlY3QgeD0iNSIgeT0iMTYiIHdpZHRoPSIyMiIgaGVpZ2h0PSI1IiByeD0iMSIgZmlsbD0iIzkzYzVmZCIvPjxyZWN0IHg9IjUiIHk9IjIzIiB3aWR0aD0iMjIiIGhlaWdodD0iMyIgcng9IjEiIGZpbGw9IiNiZmRiZmUiLz48Y2lyY2xlIGN4PSIyNCIgY3k9IjExLjUiIHI9IjEuNSIgZmlsbD0iIzIyYzU1ZSIvPjxjaXJjbGUgY3g9IjI0IiBjeT0iMTguNSIgcj0iMS41IiBmaWxsPSIjMjJjNTVlIi8+PC9zdmc+
dependencies:
Expand All @@ -14,10 +14,9 @@ dependencies:
version: "~1.1.2"
repository: https://charts.obeone.cloud
condition: nfs-server.enabled
- name: tenant
version: "7.1.1"
repository: https://operator.min.io
condition: tenant.enabled
- name: garage
version: "0.2.0"
condition: garage.enabled
- name: rqlite
version: "2.0.0"
repository: https://rqlite.github.io/helm-charts
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
apiVersion: v2
name: garage
description: Vendored Garage S3-compatible object storage
type: application
version: 0.2.0
appVersion: "v1.3.1"
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
{{/*
Garage fullname (scoped to parent release)
*/}}
{{- define "garage.fullname" -}}
{{ .Release.Name }}-garage
{{- end -}}

{{/*
Garage labels
*/}}
{{- define "garage.labels" -}}
app.kubernetes.io/name: garage
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: object-storage
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}

{{/*
Garage selector labels
*/}}
{{- define "garage.selectorLabels" -}}
app.kubernetes.io/name: garage
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: object-storage
{{- end -}}
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "garage.fullname" . }}
labels:
{{- include "garage.labels" . | nindent 4 }}
data:
garage.toml: |
metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
db_engine = "lmdb"

replication_mode = "none"

rpc_bind_addr = "[::]:3901"
rpc_secret_file = "/etc/garage/secrets/rpc-secret"

[s3_api]
s3_region = "garage"
api_bind_addr = "[::]:3900"
root_domain = ".s3.garage.localhost"

[s3_web]
bind_addr = "[::]:3902"
root_domain = ".web.garage.localhost"

[admin]
api_bind_addr = "0.0.0.0:3903"
admin_token_file = "/etc/garage/secrets/admin-token"
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
{{- $existing := lookup "v1" "Secret" .Release.Namespace (printf "%s-garage" .Release.Name) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "garage.fullname" . }}
labels:
{{- include "garage.labels" . | nindent 4 }}
type: Opaque
data:
{{- if $existing }}
admin-token: {{ index $existing.data "admin-token" }}
rpc-secret: {{ index $existing.data "rpc-secret" }}
{{- else }}
admin-token: {{ .Values.adminToken | default (randAlphaNum 32) | b64enc | quote }}
rpc-secret: {{ genPrivateKey "ed25519" | sha256sum | b64enc | quote }}
{{- end }}
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "garage.fullname" . }}
labels:
{{- include "garage.labels" . | nindent 4 }}
spec:
type: ClusterIP
ports:
- port: 3900
targetPort: s3
protocol: TCP
name: s3
- port: 3903
targetPort: admin
protocol: TCP
name: admin
selector:
{{- include "garage.selectorLabels" . | nindent 4 }}
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "garage.fullname" . }}
labels:
{{- include "garage.labels" . | nindent 4 }}
spec:
serviceName: {{ include "garage.fullname" . }}
replicas: 1
selector:
matchLabels:
{{- include "garage.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "garage.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
# Copy secrets to an emptyDir with mode 0600. Kubernetes fsGroup
# processing adds group-read bits to secret volume mounts, but
# Garage requires exactly mode 0600 on secret files.
initContainers:
- name: fix-secret-perms
image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}"
command: ["sh", "-c", "cp /secrets-raw/* /etc/garage/secrets/ && chmod 0600 /etc/garage/secrets/*"]
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
volumeMounts:
- name: secrets-raw
mountPath: /secrets-raw
readOnly: true
- name: secrets
mountPath: /etc/garage/secrets
containers:
- name: garage
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop: ["ALL"]
ports:
- name: s3
containerPort: 3900
protocol: TCP
- name: rpc
containerPort: 3901
protocol: TCP
- name: admin
containerPort: 3903
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: admin
initialDelaySeconds: 10
periodSeconds: 30
readinessProbe:
httpGet:
path: /health
port: admin
initialDelaySeconds: 5
periodSeconds: 10
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 10 }}
{{- end }}
volumeMounts:
- name: config
mountPath: /etc/garage.toml
subPath: garage.toml
readOnly: true
- name: secrets
mountPath: /etc/garage/secrets
readOnly: true
- name: meta
mountPath: /var/lib/garage/meta
- name: data
mountPath: /var/lib/garage/data
volumes:
- name: config
configMap:
name: {{ include "garage.fullname" . }}
- name: secrets-raw
secret:
secretName: {{ include "garage.fullname" . }}
- name: secrets
emptyDir:
medium: Memory
sizeLimit: 1Mi
volumeClaimTemplates:
- metadata:
name: meta
spec:
accessModes: ["ReadWriteOnce"]
{{- if .Values.persistence.meta.storageClass }}
storageClassName: {{ .Values.persistence.meta.storageClass }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.meta.size }}
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
{{- if .Values.persistence.data.storageClass }}
storageClassName: {{ .Values.persistence.data.storageClass }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.data.size }}
Loading