Skip to content

Commit 962ded7

Browse files
authored
Merge pull request #13 from biersoeckli/canary
25-02-07 Merging features from canary to main branch
2 parents b120027 + dd08930 commit 962ded7

62 files changed

Lines changed: 2198 additions & 196 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.github/workflows/build-release.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ jobs:
5252
with:
5353
context: ./
5454
push: true
55-
platforms: linux/amd64 #,linux/arm64
55+
platforms: linux/amd64,linux/arm64
5656
build-args: |
5757
VERSION_ARG=${{ github.ref_name }}
5858
tags: |

.github/workflows/canary-release.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ jobs:
5252
with:
5353
context: ./
5454
push: true
55-
platforms: linux/amd64
55+
platforms: linux/amd64 #,linux/arm64
5656
build-args: |
5757
VERSION_ARG=canary-${{ github.run_number }}
5858
tags: |

Dockerfile

Lines changed: 11 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,31 +1,25 @@
11
FROM node:18-alpine AS base
22

33
ARG VERSION_ARG
4-
54
RUN apk add --no-cache openssl
65

7-
# Install dependencies only when needed
86
FROM base AS deps
9-
# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
10-
RUN apk add --no-cache libc6-compat openssl
7+
8+
# Install necessary packages for building
9+
RUN apk add --no-cache libc6-compat python3 make g++
10+
1111
WORKDIR /app
1212

13-
# Install dependencies based on the preferred package manager
13+
# Install dependencies
1414
COPY yarn.lock package.json ./
1515
RUN yarn install
1616

17-
1817
# Rebuild the source code only when needed
1918
FROM base AS builder
2019
WORKDIR /app
2120
COPY --from=deps /app/node_modules ./node_modules
2221
COPY . .
2322

24-
# Next.js collects completely anonymous telemetry data about general usage.
25-
# Learn more here: https://nextjs.org/telemetry
26-
# Uncomment the following line in case you want to disable telemetry during the build.
27-
# ENV NEXT_TELEMETRY_DISABLED 1
28-
2923
RUN yarn run prisma-generate-build
3024
RUN yarn run build
3125
RUN rm -rf ./next/standalone
@@ -34,36 +28,27 @@ RUN rm -rf ./next/standalone
3428
FROM base AS runner
3529
WORKDIR /app
3630

37-
ENV NODE_ENV production
38-
# Uncomment the following line in case you want to disable telemetry during runtime.
39-
# ENV NEXT_TELEMETRY_DISABLED 1
31+
ENV NODE_ENV=production
32+
ENV PYTHON=/usr/bin/python3
33+
ENV QS_VERSION=$VERSION_ARG
4034

4135
RUN apk add --no-cache git
4236

4337
RUN addgroup --system --gid 1001 nodejs
4438
RUN adduser --system --uid 1001 nextjs
4539

46-
RUN mkdir storage
47-
RUN chown nextjs:nodejs storage
48-
49-
RUN mkdir tmp-storage
50-
RUN chown nextjs:nodejs tmp-storage
40+
RUN mkdir storage tmp-storage
41+
RUN chown nextjs:nodejs storage tmp-storage
5142

52-
# Automatically leverage output traces to reduce image size
53-
# https://nextjs.org/docs/advanced-features/output-file-tracing
5443
COPY --from=builder --chown=nextjs:nodejs /app/public ./public
5544
COPY --from=builder --chown=nextjs:nodejs /app/prisma ./prisma
5645
COPY --from=builder --chown=nextjs:nodejs /app/.next ./.next
5746
COPY --from=builder --chown=nextjs:nodejs /app/dist ./dist
58-
COPY --from=builder --chown=nextjs:nodejs /app/dist ./dist
5947
COPY --from=builder --chown=nextjs:nodejs /app/package.json ./package.json
6048
COPY --from=builder --chown=nextjs:nodejs /app/node_modules ./node_modules
6149

6250
USER nextjs
6351

6452
ENV PORT=3000
65-
ENV QS_VERSION=$VERSION_ARG
6653

67-
# server.js is created by next build from the standalone output
68-
# https://nextjs.org/docs/pages/api-reference/next-config-js/output
69-
CMD HOSTNAME="0.0.0.0" npm run start-prod
54+
CMD HOSTNAME="0.0.0.0" npm run start-prod

setup/reset-password.sh

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
#!/bin/sh
2+
3+
# curl -sfL https://get.quickstack.dev/reset-password.sh | sh -
4+
5+
DEPLOYMENT="quickstack"
6+
NAMESPACE="quickstack"
7+
8+
# Get the first pod name of the deployment
9+
POD_NAME=$(kubectl get pods -n "$NAMESPACE" -l app="$DEPLOYMENT" -o jsonpath="{.items[0].metadata.name}")
10+
11+
if [ -z "$POD_NAME" ]; then
12+
echo "Could not find a running QuickStack instance on your server/cluster."
13+
exit 1
14+
fi
15+
16+
echo "Found QuickStack instance: $POD_NAME"
17+
echo "Initializing password change..."
18+
19+
# Patch the deployment to add or update START_MODE=reset-password
20+
kubectl patch deployment "$DEPLOYMENT" -n "$NAMESPACE" --type='json' -p='[
21+
{
22+
"op": "add",
23+
"path": "/spec/template/spec/containers/0/env/-",
24+
"value": { "name": "START_MODE", "value": "reset-password" }
25+
}
26+
]'
27+
28+
echo "Initialized password change successfully, please wait..."
29+
30+
sleep 2
31+
32+
echo "Waiting for the new pod to be in Running status..."
33+
kubectl wait --for=condition=Ready pod -l app="$DEPLOYMENT" -n "$NAMESPACE" --timeout=300s
34+
35+
# Retreive the new pod name
36+
NEW_POD=""
37+
while [ -z "$NEW_POD" ] || [ "$NEW_POD" = "$OLD_POD" ]; do
38+
sleep 2
39+
NEW_POD=$(kubectl get pods -n "$NAMESPACE" -l app="$DEPLOYMENT" -o jsonpath="{.items[-1].metadata.name}")
40+
done
41+
42+
kubectl logs -f "$NEW_POD" -n "$NAMESPACE"

setup/setup-canary.sh

Lines changed: 178 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,178 @@
1+
#!/bin/bash
2+
3+
# curl -sfL https://get.quickstack.dev/setup-canary.sh | sh -
4+
5+
select_network_interface() {
6+
if [ -z "$INSTALL_K3S_INTERFACE" ]; then
7+
interfaces_with_ips=$(ip -o -4 addr show | awk '!/^[0-9]*: lo:/ {print $2, $4}' | cut -d'/' -f1)
8+
9+
echo "Available network interfaces:"
10+
echo "$interfaces_with_ips"
11+
echo ""
12+
echo "*******************************************************************************************************"
13+
echo ""
14+
echo "If you plan to use QuickStack in a cluster using multiple servers in multiple Networks (private/public),"
15+
echo "choose the network Interface you want to use for the communication between the servers."
16+
echo ""
17+
echo "If you plan to use QuickStack in a single server setup, choose the network Interface with the public IP."
18+
echo ""
19+
20+
i=1
21+
echo "$interfaces_with_ips" | while read -r iface ip; do
22+
printf "%d) %s (%s)\n" "$i" "$iface" "$ip"
23+
i=$((i + 1))
24+
done
25+
26+
printf "Please enter the number of the interface to use: "
27+
# Change read to use /dev/tty explicitly
28+
read -r choice </dev/tty
29+
30+
selected=$(echo "$interfaces_with_ips" | sed -n "${choice}p")
31+
selected_iface=$(echo "$selected" | awk '{print $1}')
32+
selected_ip=$(echo "$selected" | awk '{print $2}')
33+
34+
if [ -n "$selected" ]; then
35+
echo "Selected interface: $selected_iface ($selected_ip)"
36+
else
37+
echo "Invalid selection. Exiting."
38+
exit 1
39+
fi
40+
fi
41+
42+
echo "Using network interface: $selected_iface with IP address: $selected_ip"
43+
}
44+
45+
wait_until_all_pods_running() {
46+
47+
# Waits another 5 seconds to make sure all pods are registered for the first time.
48+
sleep 5
49+
50+
while true; do
51+
OUTPUT=$(sudo k3s kubectl get pods -A --no-headers 2>&1)
52+
53+
# Checks if there are no resources found --> Kubernetes ist still starting up
54+
if echo "$OUTPUT" | grep -q "No resources found"; then
55+
echo "Kubernetes is still starting up..."
56+
else
57+
# Extracts the STATUS column from the kubectl output and filters out the values "Running" and "Completed".
58+
STATUS=$(echo "$OUTPUT" | awk '{print $4}' | grep -vE '^(Running|Completed)$')
59+
60+
# If the STATUS variable is empty, all pods are running and the loop can be exited.
61+
if [ -z "$STATUS" ]; then
62+
echo "Pods started successfully."
63+
break
64+
else
65+
echo "Waiting for all pods to come online..."
66+
fi
67+
fi
68+
69+
# Waits for X seconds before checking the pod status again.
70+
sleep 10
71+
done
72+
73+
# Waits another 5 seconds to make sure all pods are ready.
74+
sleep 5
75+
76+
sudo kubectl get node
77+
sudo kubectl get pods -A
78+
}
79+
80+
# Prompt for network interface
81+
select_network_interface
82+
83+
# install nfs-common and open-iscsi
84+
echo "Installing nfs-common..."
85+
sudo apt-get update
86+
sudo apt-get install open-iscsi nfs-common -y
87+
88+
# Installation of k3s
89+
#curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--node-ip=192.168.1.2 --advertise-address=192.168.1.2 --node-external-ip=188.245.236.232 --flannel-iface=enp7s0" INSTALL_K3S_VERSION="v1.31.3+k3s1" sh -
90+
91+
echo "Installing k3s with --flannel-iface=$selected_iface"
92+
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--flannel-iface=$selected_iface" INSTALL_K3S_VERSION="v1.31.3+k3s1" sh -
93+
# Todo: Check for Ready node, takes ~30 seconds
94+
sudo k3s kubectl get node
95+
96+
echo "Waiting for Kubernetes to start..."
97+
wait_until_all_pods_running
98+
99+
# Installation of Longhorn
100+
sudo kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.7.2/deploy/longhorn.yaml
101+
echo "Waiting for Longhorn to start..."
102+
wait_until_all_pods_running
103+
104+
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
105+
# THIS MUST BE INSTALLED ON ALL NODES --> https://longhorn.io/docs/1.7.2/deploy/install/#installing-nfsv4-client
106+
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
107+
108+
#sudo kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.6.0/deploy/prerequisite/longhorn-nfs-installation.yaml
109+
#wait_until_all_pods_running
110+
111+
# Installation of Cert-Manager
112+
sudo kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.1/cert-manager.yaml
113+
echo "Waiting for Cert-Manager to start..."
114+
wait_until_all_pods_running
115+
sudo kubectl -n cert-manager get pod
116+
117+
# Checking installation of Longhorn
118+
sudo apt-get install jq -y
119+
sudo curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/v1.7.2/scripts/environment_check.sh | bash
120+
121+
joinTokenForOtherNodes=$(sudo cat /var/lib/rancher/k3s/server/node-token)
122+
123+
# deploy QuickStack
124+
cat <<EOF >quickstack-setup-job.yaml
125+
apiVersion: v1
126+
kind: Namespace
127+
metadata:
128+
name: quickstack
129+
---
130+
apiVersion: v1
131+
kind: ServiceAccount
132+
metadata:
133+
name: qs-service-account
134+
namespace: quickstack
135+
---
136+
apiVersion: rbac.authorization.k8s.io/v1
137+
kind: ClusterRoleBinding
138+
metadata:
139+
name: qs-role-binding
140+
subjects:
141+
- kind: ServiceAccount
142+
name: qs-service-account
143+
namespace: quickstack
144+
roleRef:
145+
kind: ClusterRole
146+
name: cluster-admin
147+
apiGroup: rbac.authorization.k8s.io
148+
---
149+
apiVersion: batch/v1
150+
kind: Job
151+
metadata:
152+
name: quickstack-setup-job
153+
namespace: quickstack
154+
spec:
155+
ttlSecondsAfterFinished: 3600
156+
template:
157+
spec:
158+
serviceAccountName: qs-service-account
159+
containers:
160+
- name: quickstack-container
161+
image: quickstack/quickstack:canary
162+
env:
163+
- name: START_MODE
164+
value: "setup"
165+
- name: K3S_JOIN_TOKEN
166+
value: "$joinTokenForOtherNodes"
167+
imagePullPolicy: Always
168+
restartPolicy: Never
169+
backoffLimit: 0
170+
EOF
171+
sudo kubectl apply -f quickstack-setup-job.yaml
172+
rm quickstack-setup-job.yaml
173+
wait_until_all_pods_running
174+
sudo kubectl logs -f job/quickstack-setup-job -n quickstack
175+
176+
# evaluate url to add node to cluster
177+
# echo "To add an additional node to the cluster, run the following command on the worker node:"
178+
# echo "curl -sfL https://get.quickstack.dev/setup-worker.sh | K3S_URL=https://<IP-ADDRESS-OR-HOSTNAME-OF-MASTERNODE>:6443 JOIN_TOKEN=$joinTokenForOtherNodes sh -"

setup/setup-worker.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ select_network_interface() {
3535
i=$((i + 1))
3636
done
3737

38-
printf "Please enter the number of the interface to use (1-%d): " "$((i-1))"
38+
printf "Please enter the number of the interface to use: "
3939
# Change read to use /dev/tty explicitly
4040
read -r choice </dev/tty
4141

setup/setup.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ select_network_interface() {
2323
i=$((i + 1))
2424
done
2525

26-
printf "Please enter the number of the interface to use (1-%d): " "$((i - 1))"
26+
printf "Please enter the number of the interface to use: "
2727
# Change read to use /dev/tty explicitly
2828
read -r choice </dev/tty
2929

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import { DateUtils } from '../../../shared/utils/date.utils';
2+
3+
describe('DateUtils', () => {
4+
5+
test('should return true for the same day', () => {
6+
const date1 = new Date(2023, 9, 10);
7+
const date2 = new Date(2023, 9, 10);
8+
expect(DateUtils.isSameDay(date1, date2)).toBe(true);
9+
});
10+
11+
test('should return false for different days', () => {
12+
const date1 = new Date(2023, 9, 10);
13+
const date2 = new Date(2023, 9, 11);
14+
expect(DateUtils.isSameDay(date1, date2)).toBe(false);
15+
});
16+
17+
test('should return false for different months', () => {
18+
const date1 = new Date(2023, 8, 10);
19+
const date2 = new Date(2023, 9, 10);
20+
expect(DateUtils.isSameDay(date1, date2)).toBe(false);
21+
});
22+
23+
test('should return false for different years', () => {
24+
const date1 = new Date(2022, 9, 10);
25+
const date2 = new Date(2023, 9, 10);
26+
expect(DateUtils.isSameDay(date1, date2)).toBe(false);
27+
});
28+
});
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
import { TraefikMeUtils } from '../../../shared/utils/traefik-me.utils';
2+
3+
describe('TraefikMeUtils', () => {
4+
describe('isValidTraefikMeDomain', () => {
5+
it('should return true for valid traefik.me domain', () => {
6+
expect(TraefikMeUtils.isValidTraefikMeDomain('example.traefik.me')).toBe(true);
7+
});
8+
9+
it('should return false for domain not ending with .traefik.me', () => {
10+
expect(TraefikMeUtils.isValidTraefikMeDomain('example.com')).toBe(false);
11+
});
12+
13+
it('should return false for domain with more than three parts', () => {
14+
expect(TraefikMeUtils.isValidTraefikMeDomain('sub.example.traefik.me')).toBe(false);
15+
});
16+
17+
it('should return false for domain with less than three parts', () => {
18+
expect(TraefikMeUtils.isValidTraefikMeDomain('traefik.me')).toBe(false);
19+
});
20+
21+
it('should return false for empty string', () => {
22+
expect(TraefikMeUtils.isValidTraefikMeDomain('')).toBe(false);
23+
});
24+
});
25+
});

0 commit comments

Comments
 (0)