-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathJenkinsfile
More file actions
287 lines (238 loc) · 14.1 KB
/
Jenkinsfile
File metadata and controls
287 lines (238 loc) · 14.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
pipeline {
agent any
environment {
// Docker registry prefix
REGISTRY_PREFIX = "cityzen"
// Local K3s configuration
KUBECONFIG = "/etc/rancher/k3s/k3s.yaml"
// Remote production server
DEPLOY_HOST = "192.168.72.106"
DEPLOY_USER = "saes2026"
DEPLOY_SSH_PORT = "2222"
DEPLOY_SSH = "ssh -p ${DEPLOY_SSH_PORT} -o StrictHostKeyChecking=no ${DEPLOY_USER}@${DEPLOY_HOST}"
DEPLOY_SCP = "scp -P ${DEPLOY_SSH_PORT} -o StrictHostKeyChecking=no"
DEPLOY_K8S_DIR = "/home/saes2026/k8s-deploy"
// Define the path to the virtual environment
VENV_PATH = "${WORKSPACE}/venv"
}
stages {
stage('Checkout') {
steps {
echo 'Fetching source code from repository...'
checkout scm
}
}
stage('Security & Unit Tests') {
steps {
script {
// Auto-discover backend services
def services = sh(script: "ls -d backend/*-service | xargs -n1 basename", returnStdout: true).trim().split('\n')
def parallelStages = [:]
// Global security scans
parallelStages['Semgrep Scan'] = {
sh 'semgrep scan --config p/ci --error backend/'
sh 'semgrep scan --config p/ci --error frontend/'
}
parallelStages['Trivy Scan'] = {
sh 'trivy fs --exit-code 0 --severity HIGH,CRITICAL --skip-dirs .venv --skip-dirs node_modules .'
}
// Parallel unit tests and safety checks for each service
services.each { service ->
def svcName = service.replace('-service', '')
parallelStages["Unit: ${service}"] = {
sh "python3 -m venv ${env.VENV_PATH}-${svcName}"
sh "${env.VENV_PATH}-${svcName}/bin/pip install -r backend/${service}/requirements.txt"
sh "cd backend/${service} && ${env.VENV_PATH}-${svcName}/bin/coverage run --source=app -m unittest discover -s tests"
sh "cd backend/${service} && ${env.VENV_PATH}-${svcName}/bin/coverage report"
}
parallelStages["Safety: ${service}"] = {
sh "safety check -r backend/${service}/requirements.txt"
}
}
parallel parallelStages
}
}
}
stage('Build Frontend') {
steps {
echo 'Installing dependencies...'
sh "cd frontend && npm ci"
}
}
stage('Docker Build') {
steps {
script {
def services = sh(script: "ls -d backend/*-service | xargs -n1 basename", returnStdout: true).trim().split('\n')
def parallelBuilds = [:]
// Build all backend services in parallel
services.each { service ->
def imageName = "${env.REGISTRY_PREFIX}/${service}"
parallelBuilds["Build ${service}"] = {
sh "docker build -t ${imageName}:${env.BUILD_NUMBER} -f backend/${service}/Dockerfile backend/${service}"
sh "docker tag ${imageName}:${env.BUILD_NUMBER} ${imageName}:latest"
}
}
// Build frontend
parallelBuilds['Build Frontend'] = {
sh "docker build -t ${env.REGISTRY_PREFIX}/frontend:${env.BUILD_NUMBER} --build-arg VITE_API_URL=http://authentication-service.cityzen.svc.cluster.local:8000 -f frontend/Dockerfile frontend"
sh "docker tag ${env.REGISTRY_PREFIX}/frontend:${env.BUILD_NUMBER} ${env.REGISTRY_PREFIX}/frontend:latest"
}
parallel parallelBuilds
}
}
}
stage('Image Security Scan') {
steps {
script {
def services = sh(script: "ls -d backend/*-service | xargs -n1 basename", returnStdout: true).trim().split('\n')
def parallelScans = [:]
// Scan all backend service images with unique cache directories to avoid lock conflicts
services.each { service ->
def imageName = "${env.REGISTRY_PREFIX}/${service}"
parallelScans["Scan ${service}"] = {
sh "trivy image --cache-dir /tmp/trivy-cache-${service} --exit-code 1 --severity HIGH,CRITICAL ${imageName}:${env.BUILD_NUMBER}"
}
}
// Scan frontend image
parallelScans['Scan Frontend'] = {
sh "trivy image --cache-dir /tmp/trivy-cache-frontend --exit-code 1 --severity HIGH,CRITICAL ${env.REGISTRY_PREFIX}/frontend:${env.BUILD_NUMBER}"
}
parallel parallelScans
}
}
}
stage('Deploy to Staging') {
steps {
script {
def services = sh(script: "ls -d backend/*-service | xargs -n1 basename", returnStdout: true).trim().split('\n')
// Import all backend service images into local K3s
services.each { service ->
def imageName = "${env.REGISTRY_PREFIX}/${service}"
sh "docker save ${imageName}:${env.BUILD_NUMBER} | sudo k3s ctr images import -"
sh "sudo k3s ctr images tag docker.io/${imageName}:${env.BUILD_NUMBER} docker.io/${imageName}:latest"
}
// Import frontend image
sh "docker save ${env.REGISTRY_PREFIX}/frontend:${env.BUILD_NUMBER} | sudo k3s ctr images import -"
// Create namespace and apply manifests
sh "kubectl create namespace cityzen --dry-run=client -o yaml | kubectl apply -f -"
// Create secrets for all backend services using Jenkins credentials
services.each { service ->
def credId = service.replace('-service', '-env')
def secretName = service.replace('-service', '-secret')
withCredentials([file(credentialsId: credId, variable: 'ENV_FILE')]) {
sh "kubectl create secret generic ${secretName} --from-env-file=${ENV_FILE} -n cityzen --dry-run=client -o yaml | kubectl apply -f -"
}
}
// Create minio secret
withCredentials([file(credentialsId: 'minio-env', variable: 'MINIO_ENV_FILE')]) {
sh "kubectl create secret generic minio-secret --from-env-file=${MINIO_ENV_FILE} -n cityzen --dry-run=client -o yaml | kubectl apply -f -"
}
sh "kubectl apply -f k8s/"
// Restart all deployments
sh "kubectl rollout restart deployments -n cityzen"
// Wait for all rollouts to complete before proceeding to API tests
services.each { service ->
sh "kubectl rollout status deployment/${service} -n cityzen --timeout=120s"
}
sh "kubectl rollout status deployment/frontend -n cityzen --timeout=120s"
}
}
}
stage('API Tests') {
steps {
script {
// Wait for all pods to be ready
sh 'kubectl wait --for=condition=ready pod --all -n cityzen --timeout=120s'
// Install newman if not present
sh 'which newman || npm install -g newman'
// Start port-forwards for all services
sh '''
SERVICES="authentication-service:8000 ticket-service:8002 account-service:8003 parameter-configuration-service:8004 assignment-service:8005 media-service:8006 registration-service:8007 analytics-service:8008 feedback-service:8009 map-service:8030 geolocation-service:8040 report-service:8050 notification-service:8060"
for svc in $SERVICES; do
name=$(echo $svc | cut -d: -f1)
port=$(echo $svc | cut -d: -f2)
kubectl port-forward -n cityzen svc/$name $port:$port &
done
sleep 5
'''
try {
// Find all Postman collections and run tests in parallel
def collections = sh(script: "ls -1 '${WORKSPACE}/postman'/*.postman_collection.json", returnStdout: true).trim().split('\n')
def parallelTests = [:]
collections.each { collection ->
def collectionName = collection.split('/').last().replace('.postman_collection.json', '')
parallelTests["Test: ${collectionName}"] = {
sh "newman run '${collection}' --environment '${WORKSPACE}/postman/CityZen-Jenkins.postman_environment.json' --delay-request 100"
}
}
parallel parallelTests
} finally {
// Cleanup port-forwards
sh 'pkill -f "kubectl port-forward" || true'
}
}
}
}
stage('Deploy to Production') {
steps {
script {
def services = sh(script: "ls -d backend/*-service | xargs -n1 basename", returnStdout: true).trim().split('\n')
def sshCmd = "${env.DEPLOY_SSH}"
// Transfer and import all backend service images into K3s on .106
services.each { service ->
def imageName = "${env.REGISTRY_PREFIX}/${service}"
sh "docker save ${imageName}:${env.BUILD_NUMBER} | ${sshCmd} 'sudo k3s ctr images import -'"
sh "${sshCmd} \"sudo k3s ctr images tag docker.io/${imageName}:${env.BUILD_NUMBER} docker.io/${imageName}:latest\""
}
// Transfer and import frontend image
sh "docker save ${env.REGISTRY_PREFIX}/frontend:${env.BUILD_NUMBER} | ${sshCmd} 'sudo k3s ctr images import -'"
sh "${sshCmd} \"sudo k3s ctr images tag docker.io/${env.REGISTRY_PREFIX}/frontend:${env.BUILD_NUMBER} docker.io/${env.REGISTRY_PREFIX}/frontend:latest\""
// Copy k8s manifests to .106
sh "${env.DEPLOY_SCP} -r ${WORKSPACE}/k8s/ ${env.DEPLOY_USER}@${env.DEPLOY_HOST}:${env.DEPLOY_K8S_DIR}/"
// Create namespace on .106
sh "${sshCmd} 'sudo k3s kubectl create namespace cityzen 2>/dev/null || true'"
// Create secrets for all backend services using Jenkins credentials
services.each { service ->
def credId = service.replace('-service', '-env')
def secretName = service.replace('-service', '-secret')
withCredentials([file(credentialsId: credId, variable: 'ENV_FILE')]) {
sh "${sshCmd} 'rm -f /tmp/${secretName}.env'"
sh "${env.DEPLOY_SCP} ${ENV_FILE} ${env.DEPLOY_USER}@${env.DEPLOY_HOST}:/tmp/${secretName}.env"
sh "${sshCmd} 'sudo k3s kubectl delete secret ${secretName} -n cityzen --ignore-not-found'"
sh "${sshCmd} 'sudo k3s kubectl create secret generic ${secretName} --from-env-file=/tmp/${secretName}.env -n cityzen'"
sh "${sshCmd} 'rm -f /tmp/${secretName}.env'"
}
}
// Create minio secret
withCredentials([file(credentialsId: 'minio-env', variable: 'MINIO_ENV_FILE')]) {
sh "${sshCmd} 'rm -f /tmp/minio-secret.env'"
sh "${env.DEPLOY_SCP} ${MINIO_ENV_FILE} ${env.DEPLOY_USER}@${env.DEPLOY_HOST}:/tmp/minio-secret.env"
sh "${sshCmd} 'sudo k3s kubectl delete secret minio-secret -n cityzen --ignore-not-found'"
sh "${sshCmd} 'sudo k3s kubectl create secret generic minio-secret --from-env-file=/tmp/minio-secret.env -n cityzen'"
sh "${sshCmd} 'rm -f /tmp/minio-secret.env'"
}
sh "${sshCmd} 'sudo k3s kubectl apply -f ${env.DEPLOY_K8S_DIR}/k8s/'"
// Ensure Traefik is disabled so NGINX Ingress Controller can bind to port 80
sh "${sshCmd} 'sudo k3s kubectl scale deployment traefik -n kube-system --replicas=0 2>/dev/null' || true"
// Ensure NGINX Ingress Controller is exposed as LoadBalancer on port 80
sh "${sshCmd} \"sudo k3s kubectl patch svc ingress-nginx-controller -n ingress-nginx -p '{\\\"spec\\\":{\\\"type\\\":\\\"LoadBalancer\\\"}}' || true\""
// Restart all deployments
sh "${sshCmd} 'sudo k3s kubectl rollout restart deployments -n cityzen'"
// Rollout status
services.each { service ->
sh "${sshCmd} 'sudo k3s kubectl rollout status deployment/${service} -n cityzen --timeout=120s'"
}
sh "${sshCmd} 'sudo k3s kubectl rollout status deployment/frontend -n cityzen --timeout=120s'"
}
}
}
}
post {
success {
echo 'CI/CD Pipeline finished successfully. Application is up and running.'
}
failure {
echo 'Pipeline failed. Please review the security scan or test logs.'
}
}
}