-
Notifications
You must be signed in to change notification settings - Fork 0
312 lines (273 loc) · 10.3 KB
/
integration.yml
File metadata and controls
312 lines (273 loc) · 10.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
name: Integration
on:
workflow_dispatch:
pull_request:
push:
branches:
- main
env:
CARGO_TERM_COLOR: always
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
concurrency:
group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
cancel-in-progress: true
jobs:
integration:
name: Integration (${{ matrix.name }})
runs-on: ubuntu-24.04
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- name: lifecycle
namespaces: >-
test-lifecycle
test-invalid-secret
test-missing-secret
test-rw-schema
test-ro-schema
needs_non_pg_snapshot: false
- name: error_paths
namespaces: >-
test-non-pg-data
test-wrong-bucket
needs_non_pg_snapshot: true
- name: ttls
namespaces: >-
test-min-ttl
needs_non_pg_snapshot: true
- name: switchover
namespaces: >-
test-switchover
needs_non_pg_snapshot: false
- name: consecutive_failures
namespaces: >-
test-consecutive-failures
test-phase-stuck
test-subresource-reset
needs_non_pg_snapshot: true
- name: persistent_schemas
namespaces: >-
test-persistent-schemas
test-ps-conflict
test-ps-skip-missing
needs_non_pg_snapshot: false
steps:
- uses: actions/checkout@v6
- name: Configure toolchain
run: |
rustup toolchain install --profile minimal --no-self-update stable
rustup default stable
- uses: Swatinem/rust-cache@v2
- name: Build operator
run: cargo build --bin operator
- name: Generate CRDs
run: cargo run --bin gen-crds > crds.yaml
- name: Install crane
run: |
CRANE_VERSION=v0.20.3
curl -fsSL "https://github.com/google/go-containerregistry/releases/download/${CRANE_VERSION}/go-containerregistry_Linux_x86_64.tar.gz" \
| sudo tar -xzf - -C /usr/local/bin crane
- name: Install kind
run: |
curl -fsSLo ./kind https://kind.sigs.k8s.io/dl/v0.27.0/kind-linux-amd64
chmod +x ./kind
sudo mv ./kind /usr/local/bin/kind
- name: Create kind cluster
run: |
cat <<EOF | kind create cluster --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
EOF
kubectl cluster-info
kubectl wait --for=condition=Ready nodes --all --timeout=120s
- name: Apply CRDs
run: kubectl apply -f crds.yaml
- name: Create operator namespace and config
run: |
kubectl create namespace pgro-system
# Expose the host-network operator to in-cluster jobs so the
# snapshot-list callback can reach it.
HOST_IP=$(docker inspect kind-control-plane \
--format '{{ .NetworkSettings.Networks.kind.Gateway }}')
echo "Host IP (from kind network gateway): $HOST_IP"
kubectl apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
name: postgres-restore-operator
namespace: pgro-system
spec:
ports:
- port: 8080
targetPort: 8080
---
apiVersion: v1
kind: Endpoints
metadata:
name: postgres-restore-operator
namespace: pgro-system
subsets:
- addresses:
- ip: "$HOST_IP"
ports:
- port: 8080
---
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-restore-operator-config
namespace: pgro-system
data:
maxConcurrentRestores: "4"
usePortForward: "true"
EOF
- name: Pre-pull images on kind node
run: |
load_image() {
local image="$1"
local archive="/tmp/$(echo "$image" | tr '/:' '_').tar"
crane pull --platform linux/amd64 "$image" "$archive"
kind load image-archive "$archive"
rm -f "$archive"
}
load_image minio/minio:latest
load_image minio/mc:latest
load_image kopia/kopia:0.22.3
load_image postgres:16
load_image postgres:16-alpine
load_image alpine:latest
- name: Deploy MinIO
run: |
kubectl apply -f tests/fixtures/minio.yaml
kubectl wait --namespace minio --for=condition=Available deployment/minio --timeout=120s
echo "MinIO is ready"
- name: Set up test kopia repository (postgres data)
run: |
kubectl apply -f tests/fixtures/setup-kopia-repo.yaml
for i in $(seq 1 60); do
STATUS=$(kubectl get job/setup-kopia-repo -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}' 2>/dev/null)
FAILED=$(kubectl get job/setup-kopia-repo -o jsonpath='{.status.conditions[?(@.type=="Failed")].status}' 2>/dev/null)
if [ "$STATUS" = "True" ]; then
echo "Setup job completed successfully"
break
fi
if [ "$FAILED" = "True" ]; then
echo "Setup job failed!"
kubectl describe job/setup-kopia-repo
kubectl get pods -l job-name=setup-kopia-repo -o wide
kubectl logs job/setup-kopia-repo --all-containers --prefix
exit 1
fi
if [ "$i" = "60" ]; then
echo "Setup job timed out after 300s"
kubectl describe job/setup-kopia-repo
kubectl get pods -l job-name=setup-kopia-repo -o wide
kubectl describe pods -l job-name=setup-kopia-repo
kubectl logs job/setup-kopia-repo --all-containers --prefix 2>/dev/null || true
exit 1
fi
sleep 5
done
echo "--- Setup job logs ---"
kubectl logs job/setup-kopia-repo --all-containers --prefix
echo "--- Kopia repository ready ---"
- name: Set up non-postgres kopia snapshot
if: matrix.needs_non_pg_snapshot
run: |
kubectl apply -f tests/fixtures/setup-non-postgres-snapshot.yaml
for i in $(seq 1 60); do
STATUS=$(kubectl get job/setup-non-postgres-snapshot -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}' 2>/dev/null)
FAILED=$(kubectl get job/setup-non-postgres-snapshot -o jsonpath='{.status.conditions[?(@.type=="Failed")].status}' 2>/dev/null)
if [ "$STATUS" = "True" ]; then
echo "Non-postgres snapshot job completed successfully"
break
fi
if [ "$FAILED" = "True" ]; then
echo "Non-postgres snapshot job failed!"
kubectl describe job/setup-non-postgres-snapshot
kubectl logs job/setup-non-postgres-snapshot --all-containers --prefix
exit 1
fi
if [ "$i" = "60" ]; then
echo "Non-postgres snapshot job timed out after 300s"
kubectl describe job/setup-non-postgres-snapshot
kubectl logs job/setup-non-postgres-snapshot --all-containers --prefix 2>/dev/null || true
exit 1
fi
sleep 5
done
echo "--- Non-postgres snapshot job logs ---"
kubectl logs job/setup-non-postgres-snapshot --all-containers --prefix
- name: Start operator (out-of-cluster)
run: |
RUST_LOG=info,tower_http=debug \
OPERATOR_NAMESPACE=pgro-system \
OPERATOR_SERVICE_NAME=postgres-restore-operator \
KUBECONFIG="${HOME}/.kube/config" \
./target/debug/operator > /tmp/operator.log 2>&1 &
echo $! > /tmp/operator.pid
# Wait for operator to be ready
for i in $(seq 1 30); do
if curl -sf http://[::1]:8080/readyz > /dev/null 2>&1; then
echo "Operator is ready"
break
fi
if [ "$i" = "30" ]; then
echo "Operator failed to become ready"
cat /tmp/operator.log
exit 1
fi
sleep 1
done
- name: Run integration tests
run: |
cargo test --test "${{ matrix.name }}" -- --include-ignored --nocapture
env:
RUST_LOG: info
- name: Operator logs
if: always()
run: |
echo "--- Operator logs ---"
cat /tmp/operator.log || true
- name: Cluster state on failure
if: failure()
run: |
echo "=== Namespaces ==="
kubectl get ns
echo "=== All resources in test namespaces ==="
for ns in ${{ matrix.namespaces }}; do
echo "--- Namespace: $ns ---"
kubectl get all -n "$ns" 2>/dev/null || true
done
echo "=== PostgresPhysicalReplicas ==="
kubectl get postgresphysicalreplicas -A -o yaml 2>/dev/null || true
echo "=== PostgresPhysicalRestores ==="
kubectl get postgresphysicalrestores -A -o yaml 2>/dev/null || true
echo "=== Jobs ==="
kubectl get jobs -A -o wide 2>/dev/null || true
echo "=== Pods ==="
kubectl get pods -A -o wide 2>/dev/null || true
echo "=== PVCs ==="
kubectl get pvc -A -o wide 2>/dev/null || true
echo "=== Events in test namespaces ==="
for ns in ${{ matrix.namespaces }}; do
echo "--- Events in $ns ---"
kubectl get events -n "$ns" --sort-by=.lastTimestamp 2>/dev/null || true
done
echo "=== Failed pod logs ==="
for ns in ${{ matrix.namespaces }}; do
for pod in $(kubectl get pods -n "$ns" --no-headers -o custom-columns=":metadata.name" 2>/dev/null); do
echo "--- Pod: $ns/$pod ---"
kubectl logs -n "$ns" "$pod" --all-containers 2>/dev/null || true
done
done
- name: Stop operator
if: always()
run: |
if [ -f /tmp/operator.pid ]; then
kill "$(cat /tmp/operator.pid)" 2>/dev/null || true
fi