From ec12ce2ea6974313ac917ac0381beaf95fec13e6 Mon Sep 17 00:00:00 2001 From: Thomas Cooper Date: Thu, 9 Apr 2026 14:37:30 +0100 Subject: [PATCH 1/5] Add resource requests and limits to all components Set CPU and memory requests equal to limits for every operator deployment and custom resource across the core and metrics overlays. * Patch Strimzi, Apicurio Registry, and Console operator deployments via kustomize strategic-merge patches * Set resource specs on Kafka node pools and entity operator (topic + user operator) in the Kafka CR * Set resource specs on Apicurio Registry app and UI containers * Set resource specs on Console API and UI containers * Set resource specs on Prometheus Operator deployment and Prometheus CR Signed-off-by: Thomas Cooper --- .../kustomization.yaml | 7 ++++ .../kustomization.yaml | 20 ++++++++++ .../base/strimzi-operator/kustomization.yaml | 7 ++++ .../stack/apicurio-registry/registry.yaml | 25 +++++++++++- .../core/stack/kafka/kustomization.yaml | 40 +++++++++++++++++++ .../stack/streamshub-console/console.yaml | 19 +++++++++ .../prometheus-operator/kustomization.yaml | 20 ++++++++++ .../metrics/stack/prometheus/prometheus.yaml | 4 ++ 8 files changed, 141 insertions(+), 1 deletion(-) diff --git a/components/core/base/apicurio-registry-operator/kustomization.yaml b/components/core/base/apicurio-registry-operator/kustomization.yaml index 80bb6d3..6f5b7b9 100644 --- a/components/core/base/apicurio-registry-operator/kustomization.yaml +++ b/components/core/base/apicurio-registry-operator/kustomization.yaml @@ -41,6 +41,13 @@ patches: spec: containers: - name: apicurio-registry-operator + resources: + requests: + cpu: 200m + memory: 256Mi + limits: + cpu: 200m + memory: 256Mi env: - name: APICURIO_OPERATOR_WATCHED_NAMESPACES value: '' diff --git a/components/core/base/streamshub-console-operator/kustomization.yaml b/components/core/base/streamshub-console-operator/kustomization.yaml index 4112aaa..2e44f7f 100644 --- a/components/core/base/streamshub-console-operator/kustomization.yaml +++ b/components/core/base/streamshub-console-operator/kustomization.yaml @@ -25,3 +25,23 @@ patches: - op: replace path: /subjects/0/namespace value: streamshub-console + - target: + kind: Deployment + name: streamshub-console-operator + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: streamshub-console-operator + spec: + template: + spec: + containers: + - name: streamshub-console-operator + resources: + requests: + cpu: 500m + memory: 256Mi + limits: + cpu: 500m + memory: 256Mi diff --git a/components/core/base/strimzi-operator/kustomization.yaml b/components/core/base/strimzi-operator/kustomization.yaml index b98890b..b0b0b81 100644 --- a/components/core/base/strimzi-operator/kustomization.yaml +++ b/components/core/base/strimzi-operator/kustomization.yaml @@ -40,6 +40,13 @@ patches: spec: containers: - name: strimzi-cluster-operator + resources: + requests: + cpu: 200m + memory: 384Mi + limits: + cpu: 200m + memory: 384Mi env: - name: STRIMZI_NAMESPACE value: '*' diff --git a/components/core/stack/apicurio-registry/registry.yaml b/components/core/stack/apicurio-registry/registry.yaml index b78a598..3672535 100644 --- a/components/core/stack/apicurio-registry/registry.yaml +++ b/components/core/stack/apicurio-registry/registry.yaml @@ -3,4 +3,27 @@ kind: ApicurioRegistry3 metadata: name: apicurio-registry spec: - app: {} + app: + podTemplateSpec: + spec: + containers: + - name: apicurio-registry-app + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 500m + memory: 512Mi + ui: + podTemplateSpec: + spec: + containers: + - name: apicurio-registry-ui + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 100m + memory: 256Mi diff --git a/components/core/stack/kafka/kustomization.yaml b/components/core/stack/kafka/kustomization.yaml index d451826..9cf519b 100644 --- a/components/core/stack/kafka/kustomization.yaml +++ b/components/core/stack/kafka/kustomization.yaml @@ -25,3 +25,43 @@ patches: - op: replace path: /metadata/labels/strimzi.io~1cluster value: dev-cluster + - target: + kind: KafkaNodePool + patch: |- + apiVersion: kafka.strimzi.io/v1beta2 + kind: KafkaNodePool + metadata: + name: dual-role + spec: + resources: + requests: + cpu: 500m + memory: 1Gi + limits: + cpu: 500m + memory: 1Gi + - target: + kind: Kafka + patch: |- + apiVersion: kafka.strimzi.io/v1beta2 + kind: Kafka + metadata: + name: dev-cluster + spec: + entityOperator: + topicOperator: + resources: + requests: + cpu: 250m + memory: 384Mi + limits: + cpu: 250m + memory: 384Mi + userOperator: + resources: + requests: + cpu: 250m + memory: 384Mi + limits: + cpu: 250m + memory: 384Mi diff --git a/components/core/stack/streamshub-console/console.yaml b/components/core/stack/streamshub-console/console.yaml index eaa1f6c..c02250a 100644 --- a/components/core/stack/streamshub-console/console.yaml +++ b/components/core/stack/streamshub-console/console.yaml @@ -4,6 +4,25 @@ metadata: name: streamshub-console spec: hostname: console.streamshub.local + containers: + api: + spec: + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 500m + memory: 512Mi + ui: + spec: + resources: + requests: + cpu: 250m + memory: 256Mi + limits: + cpu: 250m + memory: 256Mi kafkaClusters: - name: dev-cluster namespace: kafka diff --git a/components/metrics/base/prometheus-operator/kustomization.yaml b/components/metrics/base/prometheus-operator/kustomization.yaml index 4281632..855473c 100644 --- a/components/metrics/base/prometheus-operator/kustomization.yaml +++ b/components/metrics/base/prometheus-operator/kustomization.yaml @@ -20,3 +20,23 @@ patches: - op: replace path: /subjects/0/namespace value: monitoring + - target: + kind: Deployment + name: prometheus-operator + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: prometheus-operator + spec: + template: + spec: + containers: + - name: prometheus-operator + resources: + requests: + cpu: 200m + memory: 256Mi + limits: + cpu: 200m + memory: 256Mi diff --git a/components/metrics/stack/prometheus/prometheus.yaml b/components/metrics/stack/prometheus/prometheus.yaml index be2b52e..ea8311d 100644 --- a/components/metrics/stack/prometheus/prometheus.yaml +++ b/components/metrics/stack/prometheus/prometheus.yaml @@ -6,6 +6,10 @@ spec: replicas: 1 resources: requests: + cpu: 200m + memory: 400Mi + limits: + cpu: 200m memory: 400Mi serviceAccountName: prometheus-server podMonitorSelector: From 3b23a307dfc3e54814e04b639d6e3eb902602d08 Mon Sep 17 00:00:00 2001 From: Thomas Cooper Date: Thu, 9 Apr 2026 15:29:58 +0100 Subject: [PATCH 2/5] =?UTF-8?q?Add=20resource-limit=20verification,=20over?= =?UTF-8?q?lay=20docs,=20and=20CI=20improvements=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=E2=94=82=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=E2=94=82=20Add=20CI=20scripts=20that=20verify?= =?UTF-8?q?=20every=20container=20in=20an=20overlay=20has=20resource=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=E2=94=82=20requests=20and=20limits,=20?= =?UTF-8?q?and=20that=20overlay=20documentation=20pages=20declare=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=E2=94=82=20accurate=20resourc?= =?UTF-8?q?e=20totals.=20Add=20documentation=20for=20the=20core=20overlay?= =?UTF-8?q?=20and=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=E2=94=82=20a=20guide=20?= =?UTF-8?q?for=20overlay=20contributors.=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=E2=94=82=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=E2=94=82=20*=20Add=20Ve?= =?UTF-8?q?rifyResourceLimits=20script=20to=20check=20all=20containers=20a?= =?UTF-8?q?nd=20CR=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=E2=94=82?= =?UTF-8?q?=20=20=20resource=20fields=20have=20requests=20and=20limits=20s?= =?UTF-8?q?et=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=E2=94=82=20*=20Add=20Ve?= =?UTF-8?q?rifyDocumentedResources=20script=20to=20check=20documented=20cp?= =?UTF-8?q?u=5Ftotal=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=E2=94=82=20=20=20and?= =?UTF-8?q?=20memory=5Ftotal=20match=20kustomize=20build=20output=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=E2=94=82=20*=20Add=20CrdSchemaUtils=20?= =?UTF-8?q?shared=20utility=20for=20CRD=20schema=20introspection=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=E2=94=82=20*=20Add=20unit?= =?UTF-8?q?=20tests=20for=20both=20verification=20scripts=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=E2=94=82=20*=20Move=20existing=20scritp?= =?UTF-8?q?=20tests=20into=20tests=20subdirectory=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=E2=94=82=20*=20Add=20script-tests.yaml=20workfl?= =?UTF-8?q?ow=20to=20run=20script=20unit=20tests=20in=20CI=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=E2=94=82?= =?UTF-8?q?=20*=20Add=20docs/overlays/core.md=20with=20install=20instructi?= =?UTF-8?q?ons,=20components=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=E2=94=82=20=20=20table,=20and=20resource=20requirements=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=E2=94=82=20*=20Add=20docs/overlays/developing.md=20guide=20?= =?UTF-8?q?covering=20resource=20limit=20and=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=E2=94=82=20=20=20documentation=20requirements=20for=20ov?= =?UTF-8?q?erlay=20contributors=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=E2=94=82=20*=20Add=20resou?= =?UTF-8?q?rce=20requirements=20frontmatter=20and=20section=20to=20metrics?= =?UTF-8?q?=20overlay=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=E2=94=82=20=20=20docs?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=E2=94=82=20*=20Refactor=20valida?= =?UTF-8?q?te.yaml=20to=20discover=20overlays=20dynamically=20instead=20of?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=E2=94=82=20=20=20a=20hardcode?= =?UTF-8?q?d=20list=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20=E2=94=82=20*=20Update=20README=20with=20new=20script=20desc?= =?UTF-8?q?riptions=20and=20test=20instruction?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Thomas Cooper --- .github/scripts/CrdSchemaUtils.java | 274 ++++++++ .../scripts/VerifyDocumentedResources.java | 484 ++++++++++++++ .github/scripts/VerifyInstall.java | 60 ++ .github/scripts/VerifyResourceLimits.java | 247 +++++++ .../{ => tests}/ComputeTestMatrixTest.java | 2 +- .../tests/VerifyDocumentedResourcesTest.java | 587 +++++++++++++++++ .../tests/VerifyResourceLimitsTest.java | 612 ++++++++++++++++++ .github/workflows/script-tests.yaml | 28 + .github/workflows/validate.yaml | 82 ++- README.md | 10 +- docs/overlays/_index.md | 1 + docs/overlays/core.md | 55 ++ docs/overlays/developing.md | 91 +++ docs/overlays/metrics.md | 7 + 14 files changed, 2520 insertions(+), 20 deletions(-) create mode 100644 .github/scripts/CrdSchemaUtils.java create mode 100644 .github/scripts/VerifyDocumentedResources.java create mode 100644 .github/scripts/VerifyResourceLimits.java rename .github/scripts/{ => tests}/ComputeTestMatrixTest.java (99%) create mode 100644 .github/scripts/tests/VerifyDocumentedResourcesTest.java create mode 100644 .github/scripts/tests/VerifyResourceLimitsTest.java create mode 100644 .github/workflows/script-tests.yaml create mode 100644 docs/overlays/core.md create mode 100644 docs/overlays/developing.md diff --git a/.github/scripts/CrdSchemaUtils.java b/.github/scripts/CrdSchemaUtils.java new file mode 100644 index 0000000..01814fd --- /dev/null +++ b/.github/scripts/CrdSchemaUtils.java @@ -0,0 +1,274 @@ +import io.fabric8.kubernetes.api.model.Quantity; + +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Shared utilities for CRD schema introspection and resource path resolution. + * + *

Used by both {@code VerifyResourceLimits} and {@code VerifyDocumentedResources} + * to discover {@code ResourceRequirements} fields in CRD OpenAPI v3 schemas + * and resolve those paths against CR instances. + */ +public class CrdSchemaUtils { + + private CrdSchemaUtils() { } + + /** + * Extract the kind name from a CRD document. + */ + static String extractCrdKind(Map crd) { + Map names = getNestedMap(crd, "spec", "names"); + return names != null ? (String) names.get("kind") : null; + } + + /** + * Extract the OpenAPI v3 schema from the first version of a CRD. + */ + @SuppressWarnings("unchecked") + static Map extractCrdSchema(Map crd) { + Map spec = getMap(crd, "spec"); + if (spec == null) return null; + + Object versionsObj = spec.get("versions"); + if (!(versionsObj instanceof List)) return null; + + List versions = (List) versionsObj; + if (versions.isEmpty()) return null; + + Object firstVersion = versions.get(0); + if (!(firstVersion instanceof Map)) return null; + + return getNestedMap((Map) firstVersion, "schema", "openAPIV3Schema"); + } + + /** + * Recursively walk a CRD schema to find all ResourceRequirements fields. + * Records the JSON path for each field found. + * + *

This method finds ALL ResourceRequirements fields without filtering. + * Callers that need to skip certain paths (e.g., pod-level overhead fields + * inside embedded PodTemplateSpecs) should filter the results using + * {@link #isPodSpecOverheadPath(String)}. + */ + @SuppressWarnings("unchecked") + static void walkSchema(Map schemaNode, String currentPath, List result) { + if (schemaNode == null) return; + + Map properties = getMap(schemaNode, "properties"); + if (properties == null) return; + + if (isResourceRequirements(properties)) { + result.add(currentPath); + return; + } + + for (Map.Entry entry : properties.entrySet()) { + if (!(entry.getValue() instanceof Map)) continue; + + Map childSchema = (Map) entry.getValue(); + String childPath = currentPath + "." + entry.getKey(); + String type = (String) childSchema.get("type"); + + if ("array".equals(type)) { + Map items = getMap(childSchema, "items"); + if (items != null) { + walkSchema(items, childPath + "[]", result); + } + } else { + walkSchema(childSchema, childPath, result); + } + } + } + + /** + * Detect a ResourceRequirements field by its OpenAPI schema signature. + * Must have "limits" and "requests" properties where both have + * additionalProperties with x-kubernetes-int-or-string: true. + */ + static boolean isResourceRequirements(Map properties) { + if (!properties.containsKey("limits") || !properties.containsKey("requests")) { + return false; + } + + return hasIntOrStringAdditionalProperties(properties.get("limits")) + && hasIntOrStringAdditionalProperties(properties.get("requests")); + } + + @SuppressWarnings("unchecked") + private static boolean hasIntOrStringAdditionalProperties(Object fieldObj) { + if (!(fieldObj instanceof Map)) return false; + Map field = (Map) fieldObj; + Object addProps = field.get("additionalProperties"); + if (!(addProps instanceof Map)) return false; + return Boolean.TRUE.equals(((Map) addProps).get("x-kubernetes-int-or-string")); + } + + /** + * Check whether a ResourceRequirements path represents a pod-level + * overhead field embedded in a PodTemplateSpec, rather than a + * component-level resource requirement. + * + *

Pod-level resources (added in k8s 1.30) appear as siblings of + * {@code containers} inside embedded PodSpec structures like + * {@code template.spec} or {@code podTemplateSpec.spec}. These are + * infrastructure overhead and should not be required in CR configs. + * + *

CRD-level resources (e.g., Prometheus {@code spec.resources}) that + * happen to be siblings of {@code containers} are NOT filtered by this + * method — they appear at the CRD spec level, not inside an embedded + * PodTemplateSpec. + * + * @param path the dot-separated path (e.g., ".spec.app.podTemplateSpec.spec.resources") + * @return true if this is a pod-level overhead path that should be skipped + */ + static boolean isPodSpecOverheadPath(String path) { + return path.matches(".*\\.template\\.spec\\.resources$") + || path.matches(".*\\.podTemplateSpec\\.spec\\.resources$"); + } + + /** + * Resolve a path through a document, handling array segments (ending with []). + * Returns all leaf values reached along with their resolved paths. + */ + @SuppressWarnings("unchecked") + static List resolvePath(Object current, String[] segments, int index, String pathSoFar) { + if (index >= segments.length) { + return List.of(new ResolvedNode(pathSoFar, current)); + } + + String segment = segments[index]; + + if (segment.endsWith("[]")) { + String key = segment.substring(0, segment.length() - 2); + if (!(current instanceof Map)) return List.of(); + Object listObj = ((Map) current).get(key); + if (!(listObj instanceof List)) return List.of(); + + List list = (List) listObj; + List results = new ArrayList<>(); + for (int i = 0; i < list.size(); i++) { + results.addAll(resolvePath(list.get(i), segments, index + 1, + pathSoFar + "." + key + "[" + i + "]")); + } + return results; + } else { + if (!(current instanceof Map)) return List.of(); + Object child = ((Map) current).get(segment); + if (child == null) return List.of(); + return resolvePath(child, segments, index + 1, pathSoFar + "." + segment); + } + } + + // --- Kubernetes quantity parsing (delegates to Fabric8 Quantity) --- + + private static final BigDecimal MILLIS_PER_CORE = BigDecimal.valueOf(1000); + private static final BigDecimal BYTES_PER_MIB = BigDecimal.valueOf(1_048_576); + + /** + * Parse a Kubernetes CPU quantity to millicores. + * + *

Handles all Kubernetes quantity formats via Fabric8 {@link Quantity}, + * including millicore suffixes ({@code "500m"}), whole/fractional cores + * ({@code "1"}, {@code "0.5"}), and values parsed by SnakeYAML as + * {@link Integer} or {@link Double}. + * + * @param value the CPU quantity (String, Integer, or Double) + * @return the value in millicores + */ + static long parseCpuMillis(Object value) { + Quantity q = Quantity.parse(String.valueOf(value)); + return q.getNumericalAmount().multiply(MILLIS_PER_CORE).longValue(); + } + + /** + * Parse a Kubernetes memory quantity to MiB. + * + *

Handles all Kubernetes quantity formats via Fabric8 {@link Quantity}, + * including binary suffixes ({@code Ki}, {@code Mi}, {@code Gi}, {@code Ti}, + * {@code Pi}, {@code Ei}), decimal suffixes ({@code k}, {@code M}, {@code G}, + * {@code T}, {@code P}, {@code E}), exponent notation, and plain byte counts. + * + * @param value the memory quantity (String, Integer, or Double) + * @return the value in MiB (rounded half-up) + */ + static long parseMemoryMiB(Object value) { + Quantity q = Quantity.parse(String.valueOf(value)); + BigDecimal bytes = Quantity.getAmountInBytes(q); + return bytes.divide(BYTES_PER_MIB, 0, RoundingMode.HALF_UP).longValue(); + } + + /** + * Check that {@code resources.requests} equals {@code resources.limits} + * for both CPU and memory (Guaranteed QoS invariant). + * + *

Uses numeric comparison via Fabric8 {@link Quantity} so semantically + * equal values in different formats (e.g., {@code "1"} vs {@code "1000m"}) + * are treated as equal. + * + * @param resources the resources map (with "requests" and "limits" sub-maps) + * @param prefix a human-readable prefix for error messages + * @return list of invariant violation messages (empty if requests == limits) + */ + @SuppressWarnings("unchecked") + static List checkRequestsEqualsLimits(Map resources, String prefix) { + List errors = new ArrayList<>(); + if (resources == null) return errors; + + Object requestsObj = resources.get("requests"); + Object limitsObj = resources.get("limits"); + if (!(requestsObj instanceof Map) || !(limitsObj instanceof Map)) return errors; + + Map requests = (Map) requestsObj; + Map limits = (Map) limitsObj; + + if (requests.containsKey("cpu") && limits.containsKey("cpu")) { + long reqCpu = parseCpuMillis(requests.get("cpu")); + long limCpu = parseCpuMillis(limits.get("cpu")); + if (reqCpu != limCpu) { + errors.add(prefix + " requests.cpu (" + reqCpu + + "m) != limits.cpu (" + limCpu + "m)"); + } + } + if (requests.containsKey("memory") && limits.containsKey("memory")) { + long reqMem = parseMemoryMiB(requests.get("memory")); + long limMem = parseMemoryMiB(limits.get("memory")); + if (reqMem != limMem) { + errors.add(prefix + " requests.memory (" + reqMem + + "Mi) != limits.memory (" + limMem + "Mi)"); + } + } + + return errors; + } + + // --- Utilities --- + + @SuppressWarnings("unchecked") + static Map getMap(Map parent, String key) { + Object value = parent.get(key); + return value instanceof Map ? (Map) value : null; + } + + static Map getNestedMap(Map root, String... keys) { + Map current = root; + for (String key : keys) { + current = getMap(current, key); + if (current == null) return null; + } + return current; + } + + static class ResolvedNode { + final String path; + final Object value; + + ResolvedNode(String path, Object value) { + this.path = path; + this.value = value; + } + } +} diff --git a/.github/scripts/VerifyDocumentedResources.java b/.github/scripts/VerifyDocumentedResources.java new file mode 100644 index 0000000..e3a8805 --- /dev/null +++ b/.github/scripts/VerifyDocumentedResources.java @@ -0,0 +1,484 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +//DEPS org.yaml:snakeyaml:2.6 +//DEPS org.tomlj:tomlj:1.1.1 +//DEPS io.fabric8:kubernetes-model-core:7.6.1 +//SOURCES ScriptUtils.java +//SOURCES CrdSchemaUtils.java + +import org.tomlj.Toml; +import org.tomlj.TomlParseResult; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +/** + * Verify that documented resource totals in overlay doc pages match the + * actual resource requirements computed from {@code kustomize build}. + * + *

Each overlay doc stores {@code cpu_total} and {@code memory_total} + * in its TOML frontmatter. This script sums the resource requests from + * the kustomize output and asserts that documented values are greater + * than or equal to the actual values. + * + *

Also verifies the invariant that resource requests equal limits + * for every container. + */ +public class VerifyDocumentedResources { + + private static final String OVERLAYS_DIR = "overlays"; + private static final String DOCS_DIR = "docs/overlays"; + private static final Pattern CPU_DOC_PATTERN = Pattern.compile("(\\d+(?:\\.\\d+)?)\\s+CPU\\s+cores?", Pattern.CASE_INSENSITIVE); + private static final Pattern MEMORY_DOC_PATTERN = Pattern.compile("(\\d+(?:\\.\\d+)?)\\s+GiB", Pattern.CASE_INSENSITIVE); + + public static void main(String[] args) { + try { + int result = run(); + if (result != 0) { + System.exit(result); + } + } catch (UncheckedIOException e) { + System.err.println("ERROR: " + e.getMessage()); + System.exit(1); + } + } + + /** + * Main verification logic, separated from {@code main()} for testability. + * + * @return 0 on success, 1 on verification failure + */ + static int run() { + Path repoRoot = ScriptUtils.findRepoRoot(); + Path overlaysDir = repoRoot.resolve(OVERLAYS_DIR); + Path docsDir = repoRoot.resolve(DOCS_DIR); + + System.out.println("=== Verifying documented resource totals ==="); + System.out.println(); + + // Check that every overlay directory has a matching doc page + List coverageErrors = verifyAllOverlaysDocumented(overlaysDir, docsDir); + if (!coverageErrors.isEmpty()) { + coverageErrors.forEach(e -> System.err.println("ERROR: " + e)); + System.err.println(); + System.err.println("FAILED: " + coverageErrors.size() + " overlay documentation error(s) found"); + return 1; + } + + List overlayDocs = discoverOverlayDocs(docsDir); + if (overlayDocs.isEmpty()) { + System.err.println("ERROR: No overlay docs with cpu_total/memory_total found in " + docsDir); + return 1; + } + + List allErrors = new ArrayList<>(); + + for (OverlayDoc doc : overlayDocs) { + System.out.println("--- " + DOCS_DIR + "/" + doc.fileName + " ---"); + + // Verify overlay directory exists + Path overlayDir = repoRoot.resolve("overlays/" + doc.overlayName); + if (!Files.isDirectory(overlayDir)) { + allErrors.add(doc.fileName + ": overlay directory 'overlays/" + doc.overlayName + "' not found"); + continue; + } + + // Parse documented values + long docCpuMillis; + long docMemoryMiB; + try { + docCpuMillis = parseDocumentedCpu(doc.cpuTotal); + docMemoryMiB = parseDocumentedMemory(doc.memoryTotal); + } catch (IllegalArgumentException e) { + allErrors.add(doc.fileName + ": " + e.getMessage()); + continue; + } + + System.out.println(" Documented: cpu=" + docCpuMillis + "m, memory=" + docMemoryMiB + "Mi"); + + // Build kustomize output + List> baseDocs = + ScriptUtils.runKustomize(repoRoot, "overlays/" + doc.overlayName + "/base", true); + List> stackDocs = + ScriptUtils.runKustomize(repoRoot, "overlays/" + doc.overlayName + "/stack", true); + + // Sum resources + ResourceTotals totals = sumAllResources(baseDocs, stackDocs); + + System.out.println(" Actual: cpu=" + totals.cpuMillis + "m, memory=" + totals.memoryMiB + "Mi"); + + // Check documented >= actual + if (docCpuMillis < totals.cpuMillis) { + allErrors.add(doc.fileName + ": documented cpu (" + docCpuMillis + + "m) is less than actual (" + totals.cpuMillis + "m)"); + System.out.println(" cpu: " + docCpuMillis + "m < " + totals.cpuMillis + "m - FAILED"); + } else { + System.out.println(" cpu: " + docCpuMillis + "m >= " + totals.cpuMillis + "m - OK"); + } + + if (docMemoryMiB < totals.memoryMiB) { + allErrors.add(doc.fileName + ": documented memory (" + docMemoryMiB + + "Mi) is less than actual (" + totals.memoryMiB + "Mi)"); + System.out.println(" memory: " + docMemoryMiB + "Mi < " + totals.memoryMiB + "Mi - FAILED"); + } else { + System.out.println(" memory: " + docMemoryMiB + "Mi >= " + totals.memoryMiB + "Mi - OK"); + } + + // Check requests == limits invariant + if (totals.invariantErrors.isEmpty()) { + System.out.println(" requests == limits invariant: OK"); + } else { + allErrors.addAll(totals.invariantErrors); + System.out.println(" requests == limits invariant: FAILED (" + + totals.invariantErrors.size() + " violation(s))"); + } + + System.out.println(); + } + + if (allErrors.isEmpty()) { + System.out.println("All documented resource totals verified successfully"); + return 0; + } else { + allErrors.forEach(e -> System.err.println("ERROR: " + e)); + System.err.println(); + System.err.println("FAILED: " + allErrors.size() + " error(s) found"); + return 1; + } + } + + // --- Overlay doc discovery --- + + /** + * Scan the docs/overlays directory for .md files that have + * cpu_total and memory_total in their TOML frontmatter. + */ + static List discoverOverlayDocs(Path docsDir) { + List results = new ArrayList<>(); + try (DirectoryStream stream = Files.newDirectoryStream(docsDir, "*.md")) { + for (Path file : stream) { + String fileName = file.getFileName().toString(); + if (fileName.startsWith("_")) continue; + + String content = Files.readString(file); + TomlParseResult toml = extractTomlFrontmatter(content); + if (toml == null) continue; + + String cpuTotal = toml.getString("cpu_total"); + String memoryTotal = toml.getString("memory_total"); + if (cpuTotal == null || memoryTotal == null) continue; + + String overlayName = fileName.replace(".md", ""); + results.add(new OverlayDoc(fileName, overlayName, cpuTotal, memoryTotal)); + } + } catch (IOException e) { + throw new UncheckedIOException("Failed to scan " + docsDir, e); + } + return results; + } + + // --- Overlay documentation coverage --- + + /** + * Verify that every overlay directory has a corresponding documentation + * page with {@code cpu_total} and {@code memory_total} in its TOML + * frontmatter. + * + * @param overlaysDir the overlays directory (e.g., {@code overlays/}) + * @param docsDir the overlay docs directory (e.g., {@code docs/overlays/}) + * @return list of error messages (empty if all overlays are documented) + */ + static List verifyAllOverlaysDocumented(Path overlaysDir, Path docsDir) { + List errors = new ArrayList<>(); + + try (DirectoryStream stream = Files.newDirectoryStream(overlaysDir, Files::isDirectory)) { + for (Path overlayDir : stream) { + String overlayName = overlayDir.getFileName().toString(); + if (overlayName.startsWith(".")) continue; + + Path docFile = docsDir.resolve(overlayName + ".md"); + if (!Files.exists(docFile)) { + errors.add("overlay '" + overlayName + "' has no documentation page at " + + DOCS_DIR + "/" + overlayName + ".md"); + continue; + } + + String content = Files.readString(docFile); + TomlParseResult toml = extractTomlFrontmatter(content); + if (toml == null) { + errors.add(overlayName + ".md: missing TOML frontmatter (+++...+++ block)"); + continue; + } + + if (toml.getString("cpu_total") == null) { + errors.add(overlayName + ".md: missing 'cpu_total' in frontmatter"); + } + if (toml.getString("memory_total") == null) { + errors.add(overlayName + ".md: missing 'memory_total' in frontmatter"); + } + } + } catch (IOException e) { + throw new UncheckedIOException("Failed to scan " + overlaysDir, e); + } + + if (errors.isEmpty()) { + System.out.println("All overlays have documentation pages with resource totals"); + } + + return errors; + } + + // --- TOML frontmatter extraction --- + + /** + * Extract and parse the TOML frontmatter from a markdown file. + * Frontmatter is delimited by {@code +++} lines. + * + * @return the parsed TOML, or null if no frontmatter found + */ + static TomlParseResult extractTomlFrontmatter(String content) { + if (!content.startsWith("+++")) return null; + + int endIndex = content.indexOf("+++", 3); + if (endIndex < 0) return null; + + String tomlContent = content.substring(3, endIndex).trim(); + return Toml.parse(tomlContent); + } + + // --- Documented value parsing --- + + /** + * Parse a documented CPU value like "4 CPU cores" to millicores. + */ + static long parseDocumentedCpu(String value) { + Matcher matcher = CPU_DOC_PATTERN.matcher(value); + if (!matcher.find()) { + throw new IllegalArgumentException("Cannot parse CPU value: '" + value + + "' (expected format: ' CPU cores')"); + } + double cores = Double.parseDouble(matcher.group(1)); + return Math.round(cores * 1000); + } + + /** + * Parse a documented memory value like "4.5 GiB" to MiB. + */ + static long parseDocumentedMemory(String value) { + Matcher matcher = MEMORY_DOC_PATTERN.matcher(value); + if (!matcher.find()) { + throw new IllegalArgumentException("Cannot parse memory value: '" + value + + "' (expected format: ' GiB')"); + } + double gib = Double.parseDouble(matcher.group(1)); + return Math.round(gib * 1024); + } + + // --- Resource summing --- + + /** + * Sum all resource requests across Deployments (from base) and + * custom resources (from stack), using CRD schema introspection. + */ + static ResourceTotals sumAllResources(List> baseDocs, + List> stackDocs) { + long totalCpu = 0; + long totalMemory = 0; + List invariantErrors = new ArrayList<>(); + + // --- Deployments (from base and stack) --- + List> allDocs = new ArrayList<>(baseDocs); + allDocs.addAll(stackDocs); + + List> deployments = allDocs.stream() + .filter(doc -> "Deployment".equals(doc.get("kind"))) + .collect(Collectors.toList()); + + for (Map deployment : deployments) { + ResourceTotals dt = sumDeploymentResources(deployment); + totalCpu += dt.cpuMillis; + totalMemory += dt.memoryMiB; + invariantErrors.addAll(dt.invariantErrors); + } + + // --- CRD schema analysis (from base) --- + Map> crdResourcePaths = new LinkedHashMap<>(); + + List> crds = baseDocs.stream() + .filter(doc -> "CustomResourceDefinition".equals(doc.get("kind"))) + .collect(Collectors.toList()); + + for (Map crd : crds) { + String kind = CrdSchemaUtils.extractCrdKind(crd); + if (kind == null) continue; + + Map schema = CrdSchemaUtils.extractCrdSchema(crd); + if (schema == null) continue; + + Map specSchema = CrdSchemaUtils.getNestedMap(schema, "properties", "spec"); + if (specSchema == null) continue; + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(specSchema, ".spec", paths); + + // Filter out pod-level overhead paths (e.g., resources inside embedded PodTemplateSpec) + paths = paths.stream() + .filter(p -> !CrdSchemaUtils.isPodSpecOverheadPath(p)) + .collect(Collectors.toList()); + + if (!paths.isEmpty()) { + crdResourcePaths.put(kind, paths); + } + } + + // --- CR instances (from stack) --- + for (Map doc : stackDocs) { + String kind = (String) doc.get("kind"); + if (kind == null || !crdResourcePaths.containsKey(kind)) continue; + + List paths = crdResourcePaths.get(kind); + for (String path : paths) { + ResourceTotals rt = sumCrResourcePath(doc, path); + totalCpu += rt.cpuMillis; + totalMemory += rt.memoryMiB; + invariantErrors.addAll(rt.invariantErrors); + } + } + + return new ResourceTotals(totalCpu, totalMemory, invariantErrors); + } + + /** + * Sum resource requests from all containers in a Deployment. + */ + @SuppressWarnings("unchecked") + static ResourceTotals sumDeploymentResources(Map deployment) { + long cpu = 0; + long memory = 0; + List invariantErrors = new ArrayList<>(); + + Map templateSpec = CrdSchemaUtils.getNestedMap(deployment, "spec", "template", "spec"); + if (templateSpec == null) return new ResourceTotals(0, 0, List.of()); + + Object containersObj = templateSpec.get("containers"); + if (!(containersObj instanceof List)) return new ResourceTotals(0, 0, List.of()); + + ScriptUtils.ResourceRef ref = ScriptUtils.ResourceRef.fromManifest(deployment); + + for (Object item : (List) containersObj) { + if (!(item instanceof Map)) continue; + Map container = (Map) item; + String containerName = (String) container.getOrDefault("name", ""); + String prefix = ref.namespace + ":" + ref.name + "/" + containerName; + + ResourceTotals rt = extractResourceValues(container.get("resources"), prefix); + cpu += rt.cpuMillis; + memory += rt.memoryMiB; + invariantErrors.addAll(rt.invariantErrors); + } + + return new ResourceTotals(cpu, memory, invariantErrors); + } + + /** + * Sum resource requests from a single ResourceRequirements path in a CR. + */ + @SuppressWarnings("unchecked") + static ResourceTotals sumCrResourcePath(Map cr, String path) { + String[] segments = path.substring(1).split("\\."); + if (segments.length == 0) return new ResourceTotals(0, 0, List.of()); + + String resourcesKey = segments[segments.length - 1]; + String[] parentSegments = new String[segments.length - 1]; + System.arraycopy(segments, 0, parentSegments, 0, segments.length - 1); + + List parents = CrdSchemaUtils.resolvePath(cr, parentSegments, 0, ""); + if (parents.isEmpty()) return new ResourceTotals(0, 0, List.of()); + + long cpu = 0; + long memory = 0; + List invariantErrors = new ArrayList<>(); + + ScriptUtils.ResourceRef ref = ScriptUtils.ResourceRef.fromManifest(cr); + + for (CrdSchemaUtils.ResolvedNode parent : parents) { + if (!(parent.value instanceof Map)) continue; + Map parentMap = (Map) parent.value; + Object resources = parentMap.get(resourcesKey); + String fullPath = ref.kind + "/" + ref.name + " " + parent.path + "." + resourcesKey; + + ResourceTotals rt = extractResourceValues(resources, fullPath); + cpu += rt.cpuMillis; + memory += rt.memoryMiB; + invariantErrors.addAll(rt.invariantErrors); + } + + return new ResourceTotals(cpu, memory, invariantErrors); + } + + /** + * Extract CPU and memory request values from a resources object, + * and verify the requests == limits invariant. + */ + @SuppressWarnings("unchecked") + static ResourceTotals extractResourceValues(Object resourcesObj, String prefix) { + if (!(resourcesObj instanceof Map)) return new ResourceTotals(0, 0, List.of()); + + Map resources = (Map) resourcesObj; + long cpu = 0; + long memory = 0; + + Object requestsObj = resources.get("requests"); + + if (requestsObj instanceof Map) { + Map requests = (Map) requestsObj; + if (requests.containsKey("cpu")) { + cpu = CrdSchemaUtils.parseCpuMillis(requests.get("cpu")); + } + if (requests.containsKey("memory")) { + memory = CrdSchemaUtils.parseMemoryMiB(requests.get("memory")); + } + } + + List invariantErrors = CrdSchemaUtils.checkRequestsEqualsLimits(resources, prefix); + + return new ResourceTotals(cpu, memory, invariantErrors); + } + + // --- Data classes --- + + static class OverlayDoc { + final String fileName; + final String overlayName; + final String cpuTotal; + final String memoryTotal; + + OverlayDoc(String fileName, String overlayName, String cpuTotal, String memoryTotal) { + this.fileName = fileName; + this.overlayName = overlayName; + this.cpuTotal = cpuTotal; + this.memoryTotal = memoryTotal; + } + } + + static class ResourceTotals { + final long cpuMillis; + final long memoryMiB; + final List invariantErrors; + + ResourceTotals(long cpuMillis, long memoryMiB, List invariantErrors) { + this.cpuMillis = cpuMillis; + this.memoryMiB = memoryMiB; + this.invariantErrors = invariantErrors; + } + } +} diff --git a/.github/scripts/VerifyInstall.java b/.github/scripts/VerifyInstall.java index 22dd846..6dd78f6 100644 --- a/.github/scripts/VerifyInstall.java +++ b/.github/scripts/VerifyInstall.java @@ -3,16 +3,22 @@ //DEPS org.yaml:snakeyaml:2.6 //SOURCES ScriptUtils.java +import io.fabric8.kubernetes.api.model.Container; import io.fabric8.kubernetes.api.model.GenericKubernetesResource; +import io.fabric8.kubernetes.api.model.Pod; +import io.fabric8.kubernetes.api.model.ResourceRequirements; import io.fabric8.kubernetes.api.model.apps.Deployment; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.KubernetesClientBuilder; import io.fabric8.kubernetes.client.dsl.Resource; import java.nio.file.Path; +import java.util.ArrayList; import java.util.HashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -121,6 +127,60 @@ public static void main(String[] args) { allPassed = false; } } + + // Step 5: Verify resource limits on all quickstart pods + System.out.println(); + System.out.println("--- Verifying pod resource limits ---"); + + Set namespaces = allDocs.stream() + .map(ScriptUtils::extractNamespace) + .filter(ns -> ns != null && !ns.isEmpty()) + .collect(Collectors.toCollection(LinkedHashSet::new)); + + for (String ns : namespaces) { + List pods = client.pods().inNamespace(ns) + .withLabel("app.kubernetes.io/part-of", "streamshub-developer-quickstart") + .list().getItems(); + + for (Pod pod : pods) { + String podName = pod.getMetadata().getName(); + for (Container container : pod.getSpec().getContainers()) { + String containerName = container.getName(); + ResourceRequirements resources = container.getResources(); + List missing = new ArrayList<>(); + + if (resources == null || resources.getRequests() == null + || resources.getRequests().isEmpty()) { + missing.add("requests"); + } else { + if (!resources.getRequests().containsKey("cpu")) + missing.add("requests.cpu"); + if (!resources.getRequests().containsKey("memory")) + missing.add("requests.memory"); + } + + if (resources == null || resources.getLimits() == null + || resources.getLimits().isEmpty()) { + missing.add("limits"); + } else { + if (!resources.getLimits().containsKey("cpu")) + missing.add("limits.cpu"); + if (!resources.getLimits().containsKey("memory")) + missing.add("limits.memory"); + } + + if (!missing.isEmpty()) { + System.err.println("ERROR: Pod " + ns + "/" + podName + + " container " + containerName + + " missing: " + String.join(", ", missing)); + allPassed = false; + } else { + System.out.println(" Pod " + ns + "/" + podName + + " container " + containerName + " - OK"); + } + } + } + } } if (!allPassed) { diff --git a/.github/scripts/VerifyResourceLimits.java b/.github/scripts/VerifyResourceLimits.java new file mode 100644 index 0000000..85d86f0 --- /dev/null +++ b/.github/scripts/VerifyResourceLimits.java @@ -0,0 +1,247 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +//DEPS org.yaml:snakeyaml:2.6 +//DEPS io.fabric8:kubernetes-model-core:7.6.1 +//SOURCES ScriptUtils.java +//SOURCES CrdSchemaUtils.java + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Verify that all Deployments and custom resources in the quickstart + * have resource limits configured. + * + *

For Deployments, checks that every container has + * {@code resources.requests} and {@code resources.limits} with both + * {@code cpu} and {@code memory}. + * + *

For custom resources, introspects CRD OpenAPI v3 schemas to + * discover {@code ResourceRequirements} fields, then checks that + * the corresponding CR instances have those fields populated. + * Optional features (where the parent path doesn't exist in the CR) + * are skipped. + * + *

Also verifies that resource requests equal limits (Guaranteed QoS). + * + *

Environment variables: + *

    + *
  • {@code OVERLAY} — overlay name (default: "core")
  • + *
+ */ +public class VerifyResourceLimits { + + private static final String DEFAULT_OVERLAY = "core"; + + public static void main(String[] args) { + String overlay = System.getenv().getOrDefault("OVERLAY", DEFAULT_OVERLAY); + + Path repoRoot = ScriptUtils.findRepoRoot(); + + System.out.println("=== Verifying resource limits (overlay: " + overlay + ") ==="); + System.out.println(); + + // Render both layers + List> baseDocs = + ScriptUtils.runKustomize(repoRoot, "overlays/" + overlay + "/base", true); + List> stackDocs = + ScriptUtils.runKustomize(repoRoot, "overlays/" + overlay + "/stack", true); + + List errors = new ArrayList<>(); + + // --- Check Deployments --- + System.out.println("--- Checking Deployments ---"); + List> deployments = baseDocs.stream() + .filter(doc -> "Deployment".equals(doc.get("kind"))) + .collect(Collectors.toList()); + + for (Map deployment : deployments) { + errors.addAll(checkDeploymentResources(deployment)); + } + System.out.println(); + + // --- Walk CRD schemas --- + System.out.println("--- CRD schema analysis ---"); + Map> crdResourcePaths = new LinkedHashMap<>(); + + List> crds = baseDocs.stream() + .filter(doc -> "CustomResourceDefinition".equals(doc.get("kind"))) + .collect(Collectors.toList()); + + for (Map crd : crds) { + String kind = CrdSchemaUtils.extractCrdKind(crd); + if (kind == null) continue; + + Map schema = CrdSchemaUtils.extractCrdSchema(crd); + if (schema == null) continue; + + // Walk from spec level + Map specSchema = CrdSchemaUtils.getNestedMap(schema, "properties", "spec"); + if (specSchema == null) continue; + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(specSchema, ".spec", paths); + + // Filter out pod-level overhead paths (e.g., resources inside embedded PodTemplateSpec) + paths = paths.stream() + .filter(p -> !CrdSchemaUtils.isPodSpecOverheadPath(p)) + .collect(Collectors.toList()); + + if (!paths.isEmpty()) { + crdResourcePaths.put(kind, paths); + System.out.println(" " + kind + ": " + paths.size() + + " ResourceRequirements path(s) found"); + paths.forEach(p -> System.out.println(" " + p)); + } + } + System.out.println(); + + // --- Check CR instances --- + System.out.println("--- Checking CR instances ---"); + + for (Map doc : stackDocs) { + String kind = (String) doc.get("kind"); + if (kind == null || !crdResourcePaths.containsKey(kind)) continue; + + ScriptUtils.ResourceRef ref = ScriptUtils.ResourceRef.fromManifest(doc); + List paths = crdResourcePaths.get(kind); + + for (String path : paths) { + errors.addAll(validateCrResourcePath(doc, ref, path)); + } + } + + // --- Results --- + System.out.println(); + if (errors.isEmpty()) { + System.out.println("All resource limits verified successfully"); + } else { + errors.forEach(e -> System.err.println("ERROR: " + e)); + System.err.println(); + System.err.println("FAILED: " + errors.size() + " resource limit violation(s) found"); + System.exit(1); + } + } + + // --- Deployment checking --- + + @SuppressWarnings("unchecked") + static List checkDeploymentResources(Map deployment) { + List errors = new ArrayList<>(); + ScriptUtils.ResourceRef ref = ScriptUtils.ResourceRef.fromManifest(deployment); + + Map templateSpec = CrdSchemaUtils.getNestedMap(deployment, "spec", "template", "spec"); + if (templateSpec == null) return errors; + + Object containersObj = templateSpec.get("containers"); + if (!(containersObj instanceof List)) return errors; + + for (Object item : (List) containersObj) { + if (!(item instanceof Map)) continue; + Map container = (Map) item; + String containerName = (String) container.getOrDefault("name", ""); + String prefix = ref.namespace + ":" + ref.name + "/" + containerName; + + List containerErrors = checkResourcesObject( + container.get("resources"), prefix); + + if (containerErrors.isEmpty()) { + System.out.println(" " + prefix + " - OK"); + } else { + errors.addAll(containerErrors); + } + } + return errors; + } + + // --- CR instance validation --- + + /** + * Validate a single ResourceRequirements path on a CR instance. + * If the parent path doesn't exist, the path is skipped (optional feature). + * If the parent exists but resources are missing/incomplete, an error is reported. + */ + @SuppressWarnings("unchecked") + static List validateCrResourcePath(Map cr, + ScriptUtils.ResourceRef ref, + String path) { + // Split path like ".spec.entityOperator.topicOperator.resources" + // into segments: ["spec", "entityOperator", "topicOperator", "resources"] + String[] segments = path.substring(1).split("\\."); + if (segments.length == 0) return List.of(); + + // The last segment should be "resources" (the ResourceRequirements field itself) + // Navigate to the parent and check if the resources field exists + String resourcesKey = segments[segments.length - 1]; + String[] parentSegments = new String[segments.length - 1]; + System.arraycopy(segments, 0, parentSegments, 0, segments.length - 1); + + List parents = CrdSchemaUtils.resolvePath(cr, parentSegments, 0, ""); + if (parents.isEmpty()) { + System.out.println(" " + ref.kind + "/" + ref.name + " " + path + + " - SKIPPED (not configured)"); + return List.of(); + } + + List errors = new ArrayList<>(); + for (CrdSchemaUtils.ResolvedNode parent : parents) { + if (!(parent.value instanceof Map)) continue; + Map parentMap = (Map) parent.value; + Object resources = parentMap.get(resourcesKey); + String fullPath = ref.kind + "/" + ref.name + " " + parent.path + "." + resourcesKey; + + List fieldErrors = checkResourcesObject(resources, fullPath); + if (fieldErrors.isEmpty()) { + System.out.println(" " + fullPath + " - OK"); + } else { + errors.addAll(fieldErrors); + } + } + return errors; + } + + // --- Resource object validation --- + + /** + * Check that a resources object has limits and requests with cpu and memory, + * and that requests equal limits (Guaranteed QoS). + */ + @SuppressWarnings("unchecked") + static List checkResourcesObject(Object resourcesObj, String prefix) { + List errors = new ArrayList<>(); + + if (!(resourcesObj instanceof Map)) { + errors.add(prefix + " missing resources"); + return errors; + } + + Map resources = (Map) resourcesObj; + + Object requestsObj = resources.get("requests"); + Object limitsObj = resources.get("limits"); + + if (!(requestsObj instanceof Map)) { + errors.add(prefix + " missing resources.requests"); + } else { + Map reqMap = (Map) requestsObj; + if (!reqMap.containsKey("cpu")) errors.add(prefix + " missing resources.requests.cpu"); + if (!reqMap.containsKey("memory")) errors.add(prefix + " missing resources.requests.memory"); + } + + if (!(limitsObj instanceof Map)) { + errors.add(prefix + " missing resources.limits"); + } else { + Map limMap = (Map) limitsObj; + if (!limMap.containsKey("cpu")) errors.add(prefix + " missing resources.limits.cpu"); + if (!limMap.containsKey("memory")) errors.add(prefix + " missing resources.limits.memory"); + } + + // Verify requests == limits invariant (Guaranteed QoS) + errors.addAll(CrdSchemaUtils.checkRequestsEqualsLimits(resources, prefix)); + + return errors; + } +} diff --git a/.github/scripts/ComputeTestMatrixTest.java b/.github/scripts/tests/ComputeTestMatrixTest.java similarity index 99% rename from .github/scripts/ComputeTestMatrixTest.java rename to .github/scripts/tests/ComputeTestMatrixTest.java index e7f697a..4e692be 100644 --- a/.github/scripts/ComputeTestMatrixTest.java +++ b/.github/scripts/tests/ComputeTestMatrixTest.java @@ -1,7 +1,7 @@ ///usr/bin/env jbang "$0" "$@" ; exit $? //DEPS org.junit.jupiter:junit-jupiter:6.0.3 //DEPS org.junit.platform:junit-platform-launcher:6.0.3 -//SOURCES ComputeTestMatrix.java +//SOURCES ../ComputeTestMatrix.java import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; diff --git a/.github/scripts/tests/VerifyDocumentedResourcesTest.java b/.github/scripts/tests/VerifyDocumentedResourcesTest.java new file mode 100644 index 0000000..1c7b3be --- /dev/null +++ b/.github/scripts/tests/VerifyDocumentedResourcesTest.java @@ -0,0 +1,587 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +//DEPS org.junit.jupiter:junit-jupiter:6.0.3 +//DEPS org.junit.platform:junit-platform-launcher:6.0.3 +//DEPS org.tomlj:tomlj:1.1.1 +//DEPS io.fabric8:kubernetes-model-core:7.6.1 +//SOURCES ../VerifyDocumentedResources.java + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.junit.platform.launcher.Launcher; +import org.junit.platform.launcher.LauncherDiscoveryRequest; +import org.junit.platform.launcher.core.LauncherFactory; +import org.junit.platform.launcher.core.LauncherDiscoveryRequestBuilder; +import org.junit.platform.launcher.listeners.SummaryGeneratingListener; +import org.junit.platform.launcher.listeners.TestExecutionSummary; +import org.tomlj.TomlParseResult; + +import java.io.IOException; +import java.io.PrintWriter; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; +import static org.junit.platform.engine.discovery.DiscoverySelectors.selectClass; + +public class VerifyDocumentedResourcesTest { + + private static final String CORE_FRONTMATTER = + "+++\ntitle = 'Core'\ncpu_total = '4 CPU cores'\nmemory_total = '4 GiB'\n+++\n"; + + public static void main(String[] args) { + LauncherDiscoveryRequest request = LauncherDiscoveryRequestBuilder.request() + .selectors(selectClass(VerifyDocumentedResourcesTest.class)) + .build(); + + SummaryGeneratingListener listener = new SummaryGeneratingListener(); + Launcher launcher = LauncherFactory.create(); + launcher.execute(request, listener); + + TestExecutionSummary summary = listener.getSummary(); + summary.printTo(new PrintWriter(System.out)); + + if (summary.getTestsFailedCount() > 0) { + summary.getFailures().forEach(failure -> + failure.getException().printStackTrace()); + System.exit(1); + } + } + + // --- Overlay documentation coverage tests --- + + @Test + void passesWhenAllOverlaysHaveDocs(@TempDir Path tempDir) throws IOException { + OverlayTestDirs dirs = createOverlayTestDirs(tempDir, "core"); + Files.writeString(dirs.docsDir.resolve("core.md"), CORE_FRONTMATTER); + + List errors = VerifyDocumentedResources.verifyAllOverlaysDocumented(dirs.overlaysDir, dirs.docsDir); + assertTrue(errors.isEmpty(), "Expected no errors but got: " + errors); + } + + @Test + void failsWhenOverlayHasNoDocPage(@TempDir Path tempDir) throws IOException { + OverlayTestDirs dirs = createOverlayTestDirs(tempDir, "core", "tracing"); + Files.writeString(dirs.docsDir.resolve("core.md"), CORE_FRONTMATTER); + + List errors = VerifyDocumentedResources.verifyAllOverlaysDocumented(dirs.overlaysDir, dirs.docsDir); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("tracing")); + assertTrue(errors.get(0).contains("no documentation page")); + } + + @Test + void failsWhenDocMissingFrontmatter(@TempDir Path tempDir) throws IOException { + OverlayTestDirs dirs = createOverlayTestDirs(tempDir, "core"); + Files.writeString(dirs.docsDir.resolve("core.md"), "# Core\nNo frontmatter here.\n"); + + List errors = VerifyDocumentedResources.verifyAllOverlaysDocumented(dirs.overlaysDir, dirs.docsDir); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("missing TOML frontmatter")); + } + + @Test + void failsWhenDocMissingCpuTotal(@TempDir Path tempDir) throws IOException { + OverlayTestDirs dirs = createOverlayTestDirs(tempDir, "core"); + Files.writeString(dirs.docsDir.resolve("core.md"), + "+++\ntitle = 'Core'\nmemory_total = '4 GiB'\n+++\n"); + + List errors = VerifyDocumentedResources.verifyAllOverlaysDocumented(dirs.overlaysDir, dirs.docsDir); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("missing 'cpu_total'")); + } + + @Test + void failsWhenDocMissingMemoryTotal(@TempDir Path tempDir) throws IOException { + OverlayTestDirs dirs = createOverlayTestDirs(tempDir, "core"); + Files.writeString(dirs.docsDir.resolve("core.md"), + "+++\ntitle = 'Core'\ncpu_total = '4 CPU cores'\n+++\n"); + + List errors = VerifyDocumentedResources.verifyAllOverlaysDocumented(dirs.overlaysDir, dirs.docsDir); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("missing 'memory_total'")); + } + + @Test + void skipsHiddenDirectories(@TempDir Path tempDir) throws IOException { + OverlayTestDirs dirs = createOverlayTestDirs(tempDir, "core"); + Files.createDirectories(dirs.overlaysDir.resolve(".hidden")); + Files.writeString(dirs.docsDir.resolve("core.md"), CORE_FRONTMATTER); + + List errors = VerifyDocumentedResources.verifyAllOverlaysDocumented(dirs.overlaysDir, dirs.docsDir); + assertTrue(errors.isEmpty(), "Hidden dirs should be skipped but got: " + errors); + } + + // --- TOML frontmatter extraction tests --- + + @Test + void extractsTomlFrontmatter() { + String content = "+++\ntitle = 'Core'\ncpu_total = '4 CPU cores'\nmemory_total = '4.5 GiB'\n+++\n# Heading\n"; + TomlParseResult toml = VerifyDocumentedResources.extractTomlFrontmatter(content); + + assertNotNull(toml); + assertEquals("Core", toml.getString("title")); + assertEquals("4 CPU cores", toml.getString("cpu_total")); + assertEquals("4.5 GiB", toml.getString("memory_total")); + } + + @Test + void returnsNullForMissingFrontmatter() { + String content = "# Just a heading\nSome text\n"; + assertNull(VerifyDocumentedResources.extractTomlFrontmatter(content)); + } + + @Test + void returnsNullForUnclosedFrontmatter() { + String content = "+++\ntitle = 'Core'\n# No closing delimiter\n"; + assertNull(VerifyDocumentedResources.extractTomlFrontmatter(content)); + } + + @Test + void handlesExtraFieldsInFrontmatter() { + String content = "+++\ntitle = 'Metrics'\nweight = 1\ncpu_total = '4 CPU cores'\nmemory_total = '5 GiB'\n+++\n"; + TomlParseResult toml = VerifyDocumentedResources.extractTomlFrontmatter(content); + + assertNotNull(toml); + assertEquals("4 CPU cores", toml.getString("cpu_total")); + assertEquals("5 GiB", toml.getString("memory_total")); + assertEquals(1L, toml.getLong("weight")); + } + + // --- Documented CPU parsing tests --- + + @Test + void parsesWholeCpuCores() { + assertEquals(4000, VerifyDocumentedResources.parseDocumentedCpu("4 CPU cores")); + } + + @Test + void parsesFractionalCpuCores() { + assertEquals(3500, VerifyDocumentedResources.parseDocumentedCpu("3.5 CPU cores")); + } + + @Test + void parsesSingularCpuCore() { + assertEquals(1000, VerifyDocumentedResources.parseDocumentedCpu("1 CPU core")); + } + + @Test + void throwsOnUnknownCpuFormat() { + assertThrows(IllegalArgumentException.class, + () -> VerifyDocumentedResources.parseDocumentedCpu("4 threads")); + } + + @Test + void throwsOnCpuWithoutNumber() { + assertThrows(IllegalArgumentException.class, + () -> VerifyDocumentedResources.parseDocumentedCpu("some CPU cores")); + } + + // --- Documented memory parsing tests --- + + @Test + void parsesWholeGiB() { + assertEquals(5120, VerifyDocumentedResources.parseDocumentedMemory("5 GiB")); + } + + @Test + void parsesFractionalGiB() { + assertEquals(4608, VerifyDocumentedResources.parseDocumentedMemory("4.5 GiB")); + } + + @Test + void throwsOnUnknownMemoryFormat() { + assertThrows(IllegalArgumentException.class, + () -> VerifyDocumentedResources.parseDocumentedMemory("4.5 GB")); + } + + @Test + void throwsOnMalformedMemoryDecimal() { + assertThrows(IllegalArgumentException.class, + () -> VerifyDocumentedResources.parseDocumentedMemory("...GiB")); + } + + // --- Kubernetes CPU quantity parsing tests (via CrdSchemaUtils) --- + + @Test + void parsesMillicoreCpu() { + assertEquals(200, CrdSchemaUtils.parseCpuMillis("200m")); + } + + @Test + void parsesWholeCpuString() { + assertEquals(1000, CrdSchemaUtils.parseCpuMillis("1")); + } + + @Test + void parsesFractionalCpuString() { + assertEquals(500, CrdSchemaUtils.parseCpuMillis("0.5")); + } + + @Test + void parsesIntegerCpuFromYaml() { + assertEquals(1000, CrdSchemaUtils.parseCpuMillis(Integer.valueOf(1))); + } + + @Test + void parsesDoubleCpuFromYaml() { + assertEquals(500, CrdSchemaUtils.parseCpuMillis(Double.valueOf(0.5))); + } + + @Test + void parsesEquivalentCpuFormats() { + // "1" and "1000m" should both parse to 1000 millicores + assertEquals(CrdSchemaUtils.parseCpuMillis("1"), CrdSchemaUtils.parseCpuMillis("1000m")); + } + + // --- Kubernetes memory quantity parsing tests (via CrdSchemaUtils) --- + + @Test + void parsesMiMemory() { + assertEquals(256, CrdSchemaUtils.parseMemoryMiB("256Mi")); + } + + @Test + void parsesGiMemory() { + assertEquals(1024, CrdSchemaUtils.parseMemoryMiB("1Gi")); + } + + @Test + void parsesMMemory() { + // 400M (decimal megabytes) -> 400 * 1000000 / 1048576 ≈ 381 MiB + assertEquals(381, CrdSchemaUtils.parseMemoryMiB("400M")); + } + + @Test + void parsesKiMemory() { + // 1024 Ki = 1 MiB + assertEquals(1, CrdSchemaUtils.parseMemoryMiB("1024Ki")); + } + + @Test + void parsesTiMemory() { + // 1 Ti = 1048576 MiB + assertEquals(1_048_576, CrdSchemaUtils.parseMemoryMiB("1Ti")); + } + + @Test + void parsesGMemory() { + // 1G (decimal gigabyte) = 1000000000 bytes ≈ 954 MiB + assertEquals(954, CrdSchemaUtils.parseMemoryMiB("1G")); + } + + @Test + void parsesLowercaseKMemory() { + // 1048576k = 1048576 * 1000 bytes = 1048576000 bytes ≈ 1000 MiB + assertEquals(1000, CrdSchemaUtils.parseMemoryMiB("1048576k")); + } + + @Test + void parsesIntegerMemoryFromYaml() { + // 268435456 bytes = 256 MiB + assertEquals(256, CrdSchemaUtils.parseMemoryMiB(Integer.valueOf(268435456))); + } + + // --- Resource extraction and invariant tests --- + + @Test + void extractsResourceValues() { + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.extractResourceValues( + guaranteedResources("500m", "512Mi"), "test"); + + assertEquals(500, totals.cpuMillis); + assertEquals(512, totals.memoryMiB); + assertTrue(totals.invariantErrors.isEmpty()); + } + + @Test + void returnsZeroForMissingResources() { + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.extractResourceValues(null, "test"); + + assertEquals(0, totals.cpuMillis); + assertEquals(0, totals.memoryMiB); + } + + @Test + void detectsRequestsNotEqualLimitsCpu() { + Map resources = Map.of( + "requests", Map.of("cpu", "200m", "memory", "256Mi"), + "limits", Map.of("cpu", "500m", "memory", "256Mi")); + + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.extractResourceValues(resources, "test"); + + assertEquals(200, totals.cpuMillis); + assertEquals(1, totals.invariantErrors.size()); + assertTrue(totals.invariantErrors.get(0).contains("requests.cpu")); + } + + @Test + void detectsRequestsNotEqualLimitsMemory() { + Map resources = Map.of( + "requests", Map.of("cpu", "200m", "memory", "256Mi"), + "limits", Map.of("cpu", "200m", "memory", "512Mi")); + + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.extractResourceValues(resources, "test"); + + assertEquals(256, totals.memoryMiB); + assertEquals(1, totals.invariantErrors.size()); + assertTrue(totals.invariantErrors.get(0).contains("requests.memory")); + } + + @Test + void passesWhenRequestsEqualLimits() { + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.extractResourceValues( + guaranteedResources("200m", "256Mi"), "test"); + + assertTrue(totals.invariantErrors.isEmpty()); + } + + // --- Deployment summing tests --- + + @Test + void sumsDeploymentContainerResources() { + Map deployment = testDeployment("test-op", + Map.of("name", "main", "resources", guaranteedResources("200m", "256Mi")), + Map.of("name", "sidecar", "resources", guaranteedResources("100m", "128Mi"))); + + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.sumDeploymentResources(deployment); + + assertEquals(300, totals.cpuMillis); + assertEquals(384, totals.memoryMiB); + assertTrue(totals.invariantErrors.isEmpty()); + } + + // --- Comparison logic tests --- + + @Test + void passesWhenDocumentedExceedsActual() { + // doc=4000m, actual=3250m -> should pass (no error) + long documented = 4000; + long actual = 3250; + assertTrue(documented >= actual); + } + + @Test + void passesWhenDocumentedEqualsActual() { + long documented = 3250; + long actual = 3250; + assertTrue(documented >= actual); + } + + @Test + void failsWhenDocumentedBelowActual() { + long documented = 3000; + long actual = 3250; + assertFalse(documented >= actual); + } + + // --- CRD schema walking tests (shared via CrdSchemaUtils) --- + + @Test + void findsResourcesPathInSchema() { + Map schema = Map.of( + "properties", Map.of( + "resources", resourceRequirementsSchema(), + "replicas", Map.of("type", "integer"))); + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(schema, ".spec", paths); + + assertEquals(List.of(".spec.resources"), paths); + } + + @Test + void findsNestedResourcesPath() { + Map schema = Map.of( + "properties", Map.of( + "app", Map.of( + "properties", Map.of( + "resources", resourceRequirementsSchema())))); + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(schema, ".spec", paths); + + assertEquals(List.of(".spec.app.resources"), paths); + } + + @Test + void includesResourcesSiblingToContainers() { + // walkSchema finds ALL ResourceRequirements, including resources + // that are siblings of containers. CRDs like Prometheus have + // spec.resources (main container) alongside spec.containers (sidecars). + Map schema = Map.of( + "properties", Map.of( + "containers", Map.of( + "type", "array", + "items", Map.of( + "properties", Map.of( + "resources", resourceRequirementsSchema()))), + "resources", resourceRequirementsSchema())); + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(schema, ".spec", paths); + + assertEquals(2, paths.size()); + assertTrue(paths.contains(".spec.containers[].resources")); + assertTrue(paths.contains(".spec.resources")); + } + + // --- CR resource path summing tests --- + + @Test + void sumsCrResourcePath() { + Map cr = testCr(Map.of( + "resources", guaranteedResources("500m", "1Gi"))); + + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.sumCrResourcePath(cr, ".spec.resources"); + + assertEquals(500, totals.cpuMillis); + assertEquals(1024, totals.memoryMiB); + assertTrue(totals.invariantErrors.isEmpty()); + } + + @Test + void returnsZeroForMissingCrPath() { + Map cr = testCr(Map.of("name", "test")); + + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.sumCrResourcePath(cr, ".spec.app.resources"); + + assertEquals(0, totals.cpuMillis); + assertEquals(0, totals.memoryMiB); + } + + @Test + void sumsResourcesAcrossArrayElements() { + Map cr = testCr(Map.of( + "containers", List.of( + Map.of("name", "app", "resources", guaranteedResources("500m", "512Mi")), + Map.of("name", "ui", "resources", guaranteedResources("100m", "256Mi"))))); + + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.sumCrResourcePath(cr, ".spec.containers[].resources"); + + assertEquals(600, totals.cpuMillis); + assertEquals(768, totals.memoryMiB); + } + + // --- Pod-spec overhead filtering test --- + + @Test + void sumAllResourcesFiltersPodSpecOverheadPaths() { + // CRD with both component-level and pod-spec overhead ResourceRequirements + Map crd = testCrd("TestCR", Map.of( + // Component-level resources (should be counted) + "resources", resourceRequirementsSchema(), + // Pod-spec overhead (should be filtered) + "template", Map.of( + "properties", Map.of( + "spec", Map.of( + "properties", Map.of( + "resources", resourceRequirementsSchema())))))); + + // CR instance with resources at both paths + Map cr = testCr(Map.of( + "resources", guaranteedResources("500m", "512Mi"), + "template", Map.of( + "spec", Map.of( + "resources", guaranteedResources("100m", "128Mi"))))); + + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.sumAllResources(List.of(crd), List.of(cr)); + + // Only component-level resources should be counted, not the pod-spec overhead + assertEquals(500, totals.cpuMillis); + assertEquals(512, totals.memoryMiB); + assertTrue(totals.invariantErrors.isEmpty()); + } + + // --- Helpers --- + + /** Build a Guaranteed QoS resources map (requests == limits). */ + private static Map guaranteedResources(String cpu, String memory) { + return Map.of( + "requests", Map.of("cpu", cpu, "memory", memory), + "limits", Map.of("cpu", cpu, "memory", memory)); + } + + /** Build a test CR with standard metadata and the given spec. */ + private static Map testCr(Map spec) { + Map cr = new LinkedHashMap<>(); + cr.put("apiVersion", "test/v1"); + cr.put("kind", "TestCR"); + cr.put("metadata", Map.of("name", "my-cr", "namespace", "default")); + cr.put("spec", spec); + return cr; + } + + /** Build a test Deployment with the given containers. */ + @SafeVarargs + private static Map testDeployment(String name, Map... containers) { + Map deployment = new LinkedHashMap<>(); + deployment.put("apiVersion", "apps/v1"); + deployment.put("kind", "Deployment"); + deployment.put("metadata", Map.of("name", name, "namespace", "default")); + deployment.put("spec", Map.of( + "template", Map.of( + "spec", Map.of( + "containers", List.of(containers))))); + return deployment; + } + + /** Build a test CRD with the given kind and spec-level schema properties. */ + private static Map testCrd(String kind, Map specProperties) { + Map crd = new LinkedHashMap<>(); + crd.put("apiVersion", "apiextensions.k8s.io/v1"); + crd.put("kind", "CustomResourceDefinition"); + crd.put("metadata", Map.of("name", kind.toLowerCase() + "s.test.io")); + crd.put("spec", Map.of( + "names", Map.of("kind", kind), + "versions", List.of(Map.of( + "name", "v1", + "schema", Map.of( + "openAPIV3Schema", Map.of( + "properties", Map.of( + "spec", Map.of( + "properties", specProperties)))))))); + return crd; + } + + /** Build a minimal ResourceRequirements-shaped schema node. */ + private static Map resourceRequirementsSchema() { + return Map.of( + "type", "object", + "properties", Map.of( + "limits", Map.of( + "type", "object", + "additionalProperties", Map.of( + "x-kubernetes-int-or-string", true)), + "requests", Map.of( + "type", "object", + "additionalProperties", Map.of( + "x-kubernetes-int-or-string", true)))); + } + + /** Create overlay and docs directories with the given overlay names. */ + private static OverlayTestDirs createOverlayTestDirs(Path tempDir, String... overlayNames) throws IOException { + Path overlaysDir = tempDir.resolve("overlays"); + Path docsDir = tempDir.resolve("docs"); + for (String name : overlayNames) { + Files.createDirectories(overlaysDir.resolve(name)); + } + Files.createDirectories(docsDir); + return new OverlayTestDirs(overlaysDir, docsDir); + } + + private record OverlayTestDirs(Path overlaysDir, Path docsDir) { } +} diff --git a/.github/scripts/tests/VerifyResourceLimitsTest.java b/.github/scripts/tests/VerifyResourceLimitsTest.java new file mode 100644 index 0000000..ea7edcd --- /dev/null +++ b/.github/scripts/tests/VerifyResourceLimitsTest.java @@ -0,0 +1,612 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +//DEPS org.junit.jupiter:junit-jupiter:6.0.3 +//DEPS org.junit.platform:junit-platform-launcher:6.0.3 +//DEPS io.fabric8:kubernetes-model-core:7.6.1 +//SOURCES ../VerifyResourceLimits.java + +import org.junit.jupiter.api.Test; +import org.junit.platform.launcher.Launcher; +import org.junit.platform.launcher.LauncherDiscoveryRequest; +import org.junit.platform.launcher.core.LauncherFactory; +import org.junit.platform.launcher.core.LauncherDiscoveryRequestBuilder; +import org.junit.platform.launcher.listeners.SummaryGeneratingListener; +import org.junit.platform.launcher.listeners.TestExecutionSummary; + +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; +import static org.junit.platform.engine.discovery.DiscoverySelectors.selectClass; + +public class VerifyResourceLimitsTest { + + public static void main(String[] args) { + LauncherDiscoveryRequest request = LauncherDiscoveryRequestBuilder.request() + .selectors(selectClass(VerifyResourceLimitsTest.class)) + .build(); + + SummaryGeneratingListener listener = new SummaryGeneratingListener(); + Launcher launcher = LauncherFactory.create(); + launcher.execute(request, listener); + + TestExecutionSummary summary = listener.getSummary(); + summary.printTo(new PrintWriter(System.out)); + + if (summary.getTestsFailedCount() > 0) { + summary.getFailures().forEach(failure -> + failure.getException().printStackTrace()); + System.exit(1); + } + } + + // --- CrdSchemaUtils.isResourceRequirements tests --- + + @Test + void detectsResourceRequirementsSignature() { + Map properties = Map.of( + "limits", Map.of( + "type", "object", + "additionalProperties", Map.of("x-kubernetes-int-or-string", true)), + "requests", Map.of( + "type", "object", + "additionalProperties", Map.of("x-kubernetes-int-or-string", true))); + + assertTrue(CrdSchemaUtils.isResourceRequirements(properties)); + } + + @Test + void rejectsPropertiesWithoutLimits() { + Map properties = Map.of( + "requests", Map.of( + "type", "object", + "additionalProperties", Map.of("x-kubernetes-int-or-string", true))); + + assertFalse(CrdSchemaUtils.isResourceRequirements(properties)); + } + + @Test + void rejectsPropertiesWithoutRequests() { + Map properties = Map.of( + "limits", Map.of( + "type", "object", + "additionalProperties", Map.of("x-kubernetes-int-or-string", true))); + + assertFalse(CrdSchemaUtils.isResourceRequirements(properties)); + } + + @Test + void rejectsLimitsWithoutIntOrStringMarker() { + Map properties = Map.of( + "limits", Map.of( + "type", "object", + "additionalProperties", Map.of("type", "string")), + "requests", Map.of( + "type", "object", + "additionalProperties", Map.of("x-kubernetes-int-or-string", true))); + + assertFalse(CrdSchemaUtils.isResourceRequirements(properties)); + } + + @Test + void rejectsRequestsWithoutIntOrStringMarker() { + Map properties = Map.of( + "limits", Map.of( + "type", "object", + "additionalProperties", Map.of("x-kubernetes-int-or-string", true)), + "requests", Map.of( + "type", "object", + "additionalProperties", Map.of("type", "string"))); + + assertFalse(CrdSchemaUtils.isResourceRequirements(properties)); + } + + @Test + void rejectsUnrelatedProperties() { + Map properties = Map.of( + "name", Map.of("type", "string"), + "replicas", Map.of("type", "integer")); + + assertFalse(CrdSchemaUtils.isResourceRequirements(properties)); + } + + // --- CrdSchemaUtils.walkSchema tests --- + + @Test + void findsDirectResourcesField() { + // Schema like KafkaNodePool: spec.resources is a ResourceRequirements + Map schema = Map.of( + "properties", Map.of( + "resources", resourceRequirementsSchema(), + "replicas", Map.of("type", "integer"))); + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(schema, ".spec", paths); + + assertEquals(List.of(".spec.resources"), paths); + } + + @Test + void findsNestedResourcesField() { + // Schema like Kafka: spec.entityOperator.topicOperator.resources + Map schema = Map.of( + "properties", Map.of( + "entityOperator", Map.of( + "properties", Map.of( + "topicOperator", Map.of( + "properties", Map.of( + "resources", resourceRequirementsSchema())))))); + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(schema, ".spec", paths); + + assertEquals(List.of(".spec.entityOperator.topicOperator.resources"), paths); + } + + @Test + void findsResourcesInsideArrayItems() { + // Schema like PodTemplateSpec: spec.containers[].resources + Map schema = Map.of( + "properties", Map.of( + "containers", Map.of( + "type", "array", + "items", Map.of( + "properties", Map.of( + "name", Map.of("type", "string"), + "resources", resourceRequirementsSchema()))))); + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(schema, ".spec", paths); + + assertEquals(List.of(".spec.containers[].resources"), paths); + } + + @Test + void findsAllResourcesIncludingContainerSiblings() { + // walkSchema finds ALL ResourceRequirements paths, including resources + // that are siblings of containers (e.g., Prometheus spec.resources). + // Filtering is done by callers via isPodSpecOverheadPath. + Map schema = Map.of( + "properties", Map.of( + "containers", Map.of( + "type", "array", + "items", Map.of( + "properties", Map.of( + "resources", resourceRequirementsSchema()))), + "resources", resourceRequirementsSchema())); + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(schema, ".spec", paths); + + assertEquals(2, paths.size()); + assertTrue(paths.contains(".spec.containers[].resources")); + assertTrue(paths.contains(".spec.resources")); + } + + @Test + void findsMultiplePaths() { + // Schema with two ResourceRequirements fields at different levels + Map schema = Map.of( + "properties", Map.of( + "resources", resourceRequirementsSchema(), + "build", Map.of( + "properties", Map.of( + "resources", resourceRequirementsSchema())))); + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(schema, ".spec", paths); + + assertEquals(2, paths.size()); + assertTrue(paths.contains(".spec.resources")); + assertTrue(paths.contains(".spec.build.resources")); + } + + @Test + void returnsEmptyForSchemaWithoutResources() { + Map schema = Map.of( + "properties", Map.of( + "name", Map.of("type", "string"), + "config", Map.of( + "properties", Map.of( + "logLevel", Map.of("type", "string"))))); + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(schema, ".spec", paths); + + assertTrue(paths.isEmpty()); + } + + // --- CrdSchemaUtils.isPodSpecOverheadPath tests --- + + @Test + void identifiesPodTemplateSpecOverheadPath() { + assertTrue(CrdSchemaUtils.isPodSpecOverheadPath(".spec.app.podTemplateSpec.spec.resources")); + assertTrue(CrdSchemaUtils.isPodSpecOverheadPath(".spec.ui.podTemplateSpec.spec.resources")); + } + + @Test + void identifiesTemplateSpecOverheadPath() { + assertTrue(CrdSchemaUtils.isPodSpecOverheadPath(".spec.template.spec.resources")); + } + + @Test + void doesNotFilterCrdLevelResources() { + // CRD-level resources (e.g., Prometheus spec.resources) are NOT pod overhead + assertFalse(CrdSchemaUtils.isPodSpecOverheadPath(".spec.resources")); + assertFalse(CrdSchemaUtils.isPodSpecOverheadPath(".spec.entityOperator.topicOperator.resources")); + assertFalse(CrdSchemaUtils.isPodSpecOverheadPath(".spec.containers[].resources")); + } + + // --- checkResourcesObject tests --- + + @Test + void acceptsCompleteResourcesObject() { + List errors = VerifyResourceLimits.checkResourcesObject( + guaranteedResources("500m", "512Mi"), "test"); + assertTrue(errors.isEmpty()); + } + + @Test + void rejectsMissingResources() { + List errors = VerifyResourceLimits.checkResourcesObject(null, "test"); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("missing resources")); + } + + @Test + void rejectsMissingRequests() { + Map resources = Map.of( + "limits", Map.of("cpu", "500m", "memory", "512Mi")); + + List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("missing resources.requests")); + } + + @Test + void rejectsMissingLimits() { + Map resources = Map.of( + "requests", Map.of("cpu", "500m", "memory", "512Mi")); + + List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("missing resources.limits")); + } + + @Test + void rejectsMissingCpuInRequests() { + Map resources = Map.of( + "requests", Map.of("memory", "512Mi"), + "limits", Map.of("cpu", "500m", "memory", "512Mi")); + + List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("missing resources.requests.cpu")); + } + + @Test + void rejectsMissingMemoryInLimits() { + Map resources = Map.of( + "requests", Map.of("cpu", "500m", "memory", "512Mi"), + "limits", Map.of("cpu", "500m")); + + List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("missing resources.limits.memory")); + } + + @Test + void reportsMultipleMissingFields() { + // Empty requests and limits maps + Map resources = Map.of( + "requests", Map.of(), + "limits", Map.of()); + + List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); + assertEquals(4, errors.size()); + } + + // --- requests == limits invariant tests --- + + @Test + void rejectsRequestsNotEqualLimitsCpu() { + Map resources = Map.of( + "requests", Map.of("cpu", "200m", "memory", "256Mi"), + "limits", Map.of("cpu", "500m", "memory", "256Mi")); + + List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("requests.cpu")); + assertTrue(errors.get(0).contains("!=")); + } + + @Test + void rejectsRequestsNotEqualLimitsMemory() { + Map resources = Map.of( + "requests", Map.of("cpu", "200m", "memory", "256Mi"), + "limits", Map.of("cpu", "200m", "memory", "512Mi")); + + List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("requests.memory")); + assertTrue(errors.get(0).contains("!=")); + } + + @Test + void passesWhenRequestsEqualLimits() { + List errors = VerifyResourceLimits.checkResourcesObject( + guaranteedResources("200m", "256Mi"), "test"); + assertTrue(errors.isEmpty()); + } + + @Test + void passesWhenRequestsEqualLimitsInDifferentFormats() { + // "1" (1 core) and "1000m" (1000 millicores) are semantically equal + Map resources = Map.of( + "requests", Map.of("cpu", "1", "memory", "1Gi"), + "limits", Map.of("cpu", "1000m", "memory", "1024Mi")); + + List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); + assertTrue(errors.isEmpty(), "Expected no errors but got: " + errors); + } + + // --- CrdSchemaUtils.resolvePath tests --- + + @Test + void resolvesSimpleMapPath() { + Map doc = Map.of( + "spec", Map.of( + "resources", Map.of("cpu", "500m"))); + + List results = + CrdSchemaUtils.resolvePath(doc, new String[]{"spec", "resources"}, 0, ""); + + assertEquals(1, results.size()); + assertEquals(".spec.resources", results.get(0).path); + } + + @Test + void resolvesNestedMapPath() { + Map doc = Map.of( + "spec", Map.of( + "entityOperator", Map.of( + "topicOperator", Map.of( + "resources", "found")))); + + List results = CrdSchemaUtils.resolvePath( + doc, new String[]{"spec", "entityOperator", "topicOperator", "resources"}, 0, ""); + + assertEquals(1, results.size()); + assertEquals("found", results.get(0).value); + } + + @Test + void returnsEmptyForMissingIntermediateKey() { + Map doc = Map.of("spec", Map.of("name", "test")); + + List results = CrdSchemaUtils.resolvePath( + doc, new String[]{"spec", "entityOperator", "resources"}, 0, ""); + + assertTrue(results.isEmpty()); + } + + @Test + void resolvesArrayPath() { + Map doc = Map.of( + "spec", Map.of( + "containers", List.of( + Map.of("name", "app", "resources", "r1"), + Map.of("name", "ui", "resources", "r2")))); + + List results = CrdSchemaUtils.resolvePath( + doc, new String[]{"spec", "containers[]"}, 0, ""); + + assertEquals(2, results.size()); + assertEquals(".spec.containers[0]", results.get(0).path); + assertEquals(".spec.containers[1]", results.get(1).path); + } + + @Test + void resolvesFieldInsideArrayElements() { + Map doc = Map.of( + "containers", List.of( + Map.of("name", "app", "resources", "r1"), + Map.of("name", "sidecar", "resources", "r2"))); + + List results = CrdSchemaUtils.resolvePath( + doc, new String[]{"containers[]", "resources"}, 0, ""); + + assertEquals(2, results.size()); + assertEquals("r1", results.get(0).value); + assertEquals("r2", results.get(1).value); + } + + @Test + void returnsEmptyForMissingArrayKey() { + Map doc = Map.of("spec", Map.of("name", "test")); + + List results = CrdSchemaUtils.resolvePath( + doc, new String[]{"spec", "containers[]", "resources"}, 0, ""); + + assertTrue(results.isEmpty()); + } + + @Test + void returnsEmptyForEmptyArray() { + Map doc = Map.of("containers", List.of()); + + List results = CrdSchemaUtils.resolvePath( + doc, new String[]{"containers[]", "resources"}, 0, ""); + + assertTrue(results.isEmpty()); + } + + // --- validateCrResourcePath tests --- + + @Test + void validatesPopulatedResourcePath() { + Map cr = testCr(Map.of( + "resources", guaranteedResources("500m", "512Mi"))); + + List errors = validatePath(cr, ".spec.resources"); + + assertTrue(errors.isEmpty()); + } + + @Test + void skipsUnconfiguredOptionalPath() { + Map cr = testCr(Map.of("name", "test")); + + List errors = validatePath(cr, ".spec.cruiseControl.resources"); + + // Parent "cruiseControl" doesn't exist — should be skipped, not an error + assertTrue(errors.isEmpty()); + } + + @Test + void reportsErrorWhenParentExistsButResourcesMissing() { + Map cr = testCr(Map.of( + "entityOperator", Map.of( + "topicOperator", Map.of( + "watchedNamespace", "my-ns")))); + + List errors = validatePath(cr, ".spec.entityOperator.topicOperator.resources"); + + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("missing resources")); + } + + @Test + void validatesResourcesInsideArrayElements() { + Map cr = testCr(Map.of( + "podTemplateSpec", Map.of( + "spec", Map.of( + "containers", List.of( + Map.of("name", "app", + "resources", guaranteedResources("500m", "512Mi"))))))); + + List errors = validatePath(cr, ".spec.podTemplateSpec.spec.containers[].resources"); + + assertTrue(errors.isEmpty()); + } + + @Test + void reportsErrorForIncompleteResourcesInArray() { + Map cr = testCr(Map.of( + "containers", List.of( + Map.of("name", "app", + "resources", Map.of( + "requests", Map.of("cpu", "500m")))))); + + List errors = validatePath(cr, ".spec.containers[].resources"); + + assertFalse(errors.isEmpty()); + } + + // --- CrdSchemaUtils.extractCrdKind tests --- + + @Test + void extractsKindFromCrd() { + Map crd = Map.of( + "spec", Map.of( + "names", Map.of("kind", "Kafka", "plural", "kafkas"))); + + assertEquals("Kafka", CrdSchemaUtils.extractCrdKind(crd)); + } + + @Test + void returnsNullForCrdWithoutNames() { + Map crd = Map.of("spec", Map.of()); + assertNull(CrdSchemaUtils.extractCrdKind(crd)); + } + + // --- Deployment checking tests --- + + @Test + void acceptsDeploymentWithCompleteResources() { + Map deployment = testDeployment("my-operator", + Map.of("name", "operator", "resources", guaranteedResources("200m", "256Mi"))); + + List errors = VerifyResourceLimits.checkDeploymentResources(deployment); + assertTrue(errors.isEmpty()); + } + + @Test + void reportsDeploymentContainerWithoutResources() { + Map deployment = testDeployment("my-operator", + Map.of("name", "operator")); + + List errors = VerifyResourceLimits.checkDeploymentResources(deployment); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("my-operator")); + assertTrue(errors.get(0).contains("operator")); + assertTrue(errors.get(0).contains("missing resources")); + } + + @Test + void checksAllContainersInDeployment() { + Map deployment = testDeployment("my-app", + Map.of("name", "main", "resources", guaranteedResources("200m", "256Mi")), + Map.of("name", "sidecar")); + + List errors = VerifyResourceLimits.checkDeploymentResources(deployment); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("sidecar")); + } + + // --- Helpers --- + + /** Build a Guaranteed QoS resources map (requests == limits). */ + private static Map guaranteedResources(String cpu, String memory) { + return Map.of( + "requests", Map.of("cpu", cpu, "memory", memory), + "limits", Map.of("cpu", cpu, "memory", memory)); + } + + /** Build a test CR with standard metadata and the given spec. */ + private static Map testCr(Map spec) { + Map cr = new LinkedHashMap<>(); + cr.put("apiVersion", "test/v1"); + cr.put("kind", "TestCR"); + cr.put("metadata", Map.of("name", "my-cr", "namespace", "default")); + cr.put("spec", spec); + return cr; + } + + /** Build a test Deployment with the given containers. */ + @SafeVarargs + private static Map testDeployment(String name, Map... containers) { + Map deployment = new LinkedHashMap<>(); + deployment.put("apiVersion", "apps/v1"); + deployment.put("kind", "Deployment"); + deployment.put("metadata", Map.of("name", name, "namespace", "default")); + deployment.put("spec", Map.of( + "template", Map.of( + "spec", Map.of( + "containers", List.of(containers))))); + return deployment; + } + + /** Validate a CR resource path using the standard test CR ref. */ + private static List validatePath(Map cr, String path) { + ScriptUtils.ResourceRef ref = ScriptUtils.ResourceRef.fromManifest(cr); + return VerifyResourceLimits.validateCrResourcePath(cr, ref, path); + } + + /** Build a minimal ResourceRequirements-shaped schema node. */ + private static Map resourceRequirementsSchema() { + return Map.of( + "type", "object", + "properties", Map.of( + "limits", Map.of( + "type", "object", + "additionalProperties", Map.of( + "x-kubernetes-int-or-string", true)), + "requests", Map.of( + "type", "object", + "additionalProperties", Map.of( + "x-kubernetes-int-or-string", true)))); + } +} diff --git a/.github/workflows/script-tests.yaml b/.github/workflows/script-tests.yaml new file mode 100644 index 0000000..a2d8076 --- /dev/null +++ b/.github/workflows/script-tests.yaml @@ -0,0 +1,28 @@ +name: Script Tests + +on: + pull_request: + paths: + - '.github/scripts/**' + push: + branches: + - main + paths: + - '.github/scripts/**' + +jobs: + unit-tests: + name: Script unit tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up JBang + uses: jbangdev/setup-jbang@2b1b465a7b75f4222b81426f23a01e013aa7b95c # v0.1.1 + + - name: Run all tests + run: | + for test in .github/scripts/tests/*.java; do + echo "--- Running $(basename "$test") ---" + jbang "$test" + done diff --git a/.github/workflows/validate.yaml b/.github/workflows/validate.yaml index 9f9b843..4821bc3 100644 --- a/.github/workflows/validate.yaml +++ b/.github/workflows/validate.yaml @@ -15,12 +15,51 @@ on: - '.docs-preview/**' jobs: + shellcheck: + name: Lint shell scripts + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Run ShellCheck + uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # 2.0.0 + with: + version: v0.11.0 + scandir: "." + + yamllint: + name: Lint YAML files + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Install yamllint + run: | + sudo apt-get update + sudo apt-get install -y yamllint + + - name: Run yamllint + run: yamllint -d relaxed . + + discover-overlays: + name: Discover overlays + runs-on: ubuntu-latest + outputs: + overlays: ${{ steps.find.outputs.overlays }} + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: List overlay directories + id: find + run: echo "overlays=$(ls -d overlays/*/ | xargs -n1 basename | jq -R -s -c 'split("\n") | map(select(. != ""))')" >> "$GITHUB_OUTPUT" + kustomize-build: name: Validate Kustomize (${{ matrix.overlay }}/${{ matrix.layer }}) + needs: [discover-overlays] runs-on: ubuntu-latest strategy: matrix: - overlay: [core, metrics] + overlay: ${{ fromJSON(needs.discover-overlays.outputs.overlays) }} layer: [base, stack] steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 @@ -36,29 +75,38 @@ jobs: kubectl kustomize overlays/${{ matrix.overlay }}/${{ matrix.layer }}/ \ | grep -q 'app.kubernetes.io/part-of: streamshub-developer-quickstart' - shellcheck: - name: Lint shell scripts + verify-resource-limits: + name: Verify resource limits (${{ matrix.overlay }}) + needs: [discover-overlays] runs-on: ubuntu-latest + strategy: + matrix: + overlay: ${{ fromJSON(needs.discover-overlays.outputs.overlays) }} steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - name: Run ShellCheck - uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # 2.0.0 - with: - version: v0.11.0 - scandir: "." - additional_files: "install.sh uninstall.sh update-version.sh" + - name: Set up kubectl + uses: azure/setup-kubectl@776406bce94f63e41d621b960d78ee25c8b76ede # v4.0.1 - yamllint: - name: Lint YAML files + - name: Set up JBang + uses: jbangdev/setup-jbang@2b1b465a7b75f4222b81426f23a01e013aa7b95c # v0.1.1 + + - name: Verify resource limits + env: + OVERLAY: ${{ matrix.overlay }} + run: jbang .github/scripts/VerifyResourceLimits.java + + verify-documented-resources: + name: Verify documented resources runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - name: Install yamllint - run: | - sudo apt-get update - sudo apt-get install -y yamllint + - name: Set up kubectl + uses: azure/setup-kubectl@776406bce94f63e41d621b960d78ee25c8b76ede # v4.0.1 - - name: Run yamllint - run: yamllint -d relaxed . + - name: Set up JBang + uses: jbangdev/setup-jbang@2b1b465a7b75f4222b81426f23a01e013aa7b95c # v0.1.1 + + - name: Verify documented resources match kustomize output + run: jbang .github/scripts/VerifyDocumentedResources.java diff --git a/README.md b/README.md index 4907aee..99fe1e4 100644 --- a/README.md +++ b/README.md @@ -75,6 +75,7 @@ Full documentation is available in the [`docs/`](docs/) directory: For development workflows including updating component versions and testing scripts locally, see: - [Updating Component Versions](docs/architecture.md#updating-component-versions) - [Install from a Local Checkout](docs/installation.md#install-from-a-local-checkout) +- [Developing Overlays](docs/overlays/developing.md) — resource limits and documentation requirements for overlays ### Previewing Documentation Locally @@ -134,13 +135,18 @@ The test scripts are [JBang](https://www.jbang.dev/) scripts located in `.github | `ComputeTestMatrix.java` | Computes the CI test matrix from overlay dependencies | | `VerifyInstall.java` | Verifies deployments and custom resources are ready | | `VerifyUninstall.java` | Verifies all quickstart resources are removed | +| `VerifyResourceLimits.java` | Verifies all containers and CRs have resource limits | +| `VerifyDocumentedResources.java` | Verifies documented resource totals match kustomize output | | `Debug.java` | Dumps diagnostic info (CR status, events, pod logs) | -| `ComputeTestMatrixTest.java` | Unit tests for the matrix computation logic | +| `CrdSchemaUtils.java` | Shared CRD schema introspection utilities | +| `tests/ComputeTestMatrixTest.java` | Unit tests for the matrix computation logic | +| `tests/VerifyResourceLimitsTest.java` | Unit tests for resource limit verification | +| `tests/VerifyDocumentedResourcesTest.java` | Unit tests for documented resource verification | To run the unit tests: ```shell -jbang .github/scripts/ComputeTestMatrixTest.java +for test in .github/scripts/tests/*.java; do jbang "$test"; done ``` To run the verification scripts against a live cluster: diff --git a/docs/overlays/_index.md b/docs/overlays/_index.md index b130733..aca8c7c 100644 --- a/docs/overlays/_index.md +++ b/docs/overlays/_index.md @@ -24,4 +24,5 @@ curl -sL https://raw.githubusercontent.com/streamshub/developer-quickstart/main/ | Overlay | Description | |-----------------------|--------------------------------------------------------------------------------------------------------------------------------------| +| [core](core.md) | The default stack: Strimzi, Kafka, Apicurio Registry, and StreamsHub Console. No `OVERLAY` variable needed. | | [metrics](metrics.md) | Adds Prometheus Operator, a Prometheus instance, and Kafka metrics collection via PodMonitors. Wires the Console to display metrics. | diff --git a/docs/overlays/core.md b/docs/overlays/core.md new file mode 100644 index 0000000..38e5d63 --- /dev/null +++ b/docs/overlays/core.md @@ -0,0 +1,55 @@ ++++ +title = 'Core' +weight = 0 +cpu_total = '4 CPU cores' +memory_total = '4.5 GiB' ++++ + +# Core Overlay + +The core overlay is the default deployment. +It installs the base event-streaming stack without any optional extensions. + +## Quick-Start Install + +```shell +curl -sL https://raw.githubusercontent.com/streamshub/developer-quickstart/main/install.sh | bash +``` + +No `OVERLAY` variable is needed — the core overlay is used by default. + +## Manual Install + +```shell +# Phase 1 — Operators and CRDs +kubectl apply -k 'https://github.com/streamshub/developer-quickstart//overlays/core/base?ref=main' + +# Optionally, wait for the operators to be ready +kubectl wait --for=condition=Available deployment/strimzi-cluster-operator -n strimzi --timeout=120s +kubectl wait --for=condition=Available deployment/apicurio-registry-operator -n apicurio-registry --timeout=120s +kubectl wait --for=condition=Available deployment/streamshub-console-operator -n streamshub-console --timeout=120s + +# Phase 2 — Operands +kubectl apply -k 'https://github.com/streamshub/developer-quickstart//overlays/core/stack?ref=main' +``` + +## Uninstall + +```shell +curl -sL https://raw.githubusercontent.com/streamshub/developer-quickstart/main/uninstall.sh | bash +``` + +## Components + +| Component | Namespace | Description | +|---------------------------|----------------------|---------------------------------------------------| +| Strimzi Kafka Operator | `strimzi` | Manages Kafka clusters via CRDs | +| Apicurio Registry Operator| `apicurio-registry` | Manages schema registry instances | +| StreamsHub Console Operator| `streamshub-console`| Manages the Console web UI | +| Kafka cluster | `kafka` | Single-node Kafka cluster (`dev-cluster`) | +| Apicurio Registry | `apicurio-registry` | Schema registry instance with app and UI | +| StreamsHub Console | `streamshub-console` | Web UI for managing Kafka clusters | + +## Resource Requirements + +The core overlay requires at least {{< param cpu_total >}} and {{< param memory_total >}} of allocatable cluster resources. diff --git a/docs/overlays/developing.md b/docs/overlays/developing.md new file mode 100644 index 0000000..a855156 --- /dev/null +++ b/docs/overlays/developing.md @@ -0,0 +1,91 @@ ++++ +title = 'Developing Overlays' +weight = 100 ++++ + +# Developing Overlays + +This page covers the requirements for adding or modifying overlays. +CI enforces various checks on the overlays automatically on every pull request. + +## Directory Structure + +Every overlay must have both a directory under `overlays/` and a matching documentation page under `docs/overlays/`. +The directory name and the doc filename (without `.md`) must match exactly. + +``` +overlays// +├── base/ # Phase 1: operators and CRDs +│ └── kustomization.yaml +└── stack/ # Phase 2: operands + └── kustomization.yaml + +docs/overlays/.md # Documentation page with resource totals +``` + +CI automatically discovers overlays from the `overlays/` directory — there is no manual CI matrix to update. +The `VerifyDocumentedResources` script checks that every overlay directory has a matching doc page. + +### Adding a New Overlay — Checklist + +1. Create `overlays//base/kustomization.yaml` and `overlays//stack/kustomization.yaml` +2. Create `docs/overlays/.md` with TOML frontmatter containing `cpu_total` and `memory_total` +3. Set resource limits on all Deployment containers and custom resource fields +4. Verify locally with `OVERLAY= jbang .github/scripts/VerifyResourceLimits.java` +5. Verify documentation totals with `jbang .github/scripts/VerifyDocumentedResources.java` + +## Resource Limits + +Every container in the overlay must have `resources.requests` and `resources.limits` with both `cpu` and `memory` specified. +Requests must equal limits (Guaranteed QoS). + +This applies to: + +- Deployment containers — patched via the component's `kustomization.yaml` +- Custom resource fields — set in the CR manifest (e.g., `spec.resources`, `spec.app.resources`) + +### Optional Features + +The CI script discovers resource fields by walking CRD OpenAPI schemas. +If a CRD declares a `ResourceRequirements` field for an optional feature (e.g., `spec.cruiseControl.resources`), but the CR instance does not configure that feature at all, the resource check is skipped — the CI output will show `SKIPPED (not configured)` for these paths. + +If the feature **is** configured (i.e., the parent path exists in the CR), resource limits **must** be set on it. +In short: if you use it, you must set limits on it. + +To verify locally: + +```shell +OVERLAY=core jbang .github/scripts/VerifyResourceLimits.java +``` + +## Documented Resource Totals + +Each overlay must have a documentation page under `docs/overlays/` with the total resource requirements in its TOML frontmatter: + +```toml ++++ +title = 'My Overlay' +cpu_total = '4 CPU cores' +memory_total = '5 GiB' ++++ +``` + +The `cpu_total` and `memory_total` values must be greater than or equal to the sum of all resource requests in the overlay's `kustomize build` output. +Use human-friendly round numbers, the CI enforces `>=`, not exact equality. + +Render the values in the page body using Hugo's built-in `param` shortcode: + +```markdown +## Resource Requirements + +This overlay requires at least {{}} and +{{}} of allocatable cluster resources. +``` + +To verify locally: + +```shell +jbang .github/scripts/VerifyDocumentedResources.java +``` + +This script auto-discovers all overlay docs with `cpu_total` and `memory_total` frontmatter, sums the actual resources from `kustomize build`, and checks that the documented values are sufficient. diff --git a/docs/overlays/metrics.md b/docs/overlays/metrics.md index fa7c460..130fcc9 100644 --- a/docs/overlays/metrics.md +++ b/docs/overlays/metrics.md @@ -1,6 +1,8 @@ +++ title = 'Metrics' weight = 1 +cpu_total = '4 CPU cores' +memory_total = '5 GiB' +++ # Metrics Overlay @@ -53,6 +55,11 @@ The overlay also patches existing resources: - Kafka — enables the [Strimzi Metrics Reporter](https://strimzi.io/docs/operators/latest/deploying#proc-metrics-kafka-str) on the `dev-cluster`, exposing JMX metrics at `/metrics` - Console — adds Prometheus as a metrics data source so the Console UI displays Kafka metrics +## Resource Requirements + +The metrics overlay requires at least {{< param cpu_total >}} and {{< param memory_total >}} of allocatable cluster resources. +This includes the resources for the [core](core.md) stack plus the additional monitoring components listed above. + ## How Metrics Flow ``` From 10a1f53d7af31adc139b984b83d0d6a7d293b0f7 Mon Sep 17 00:00:00 2001 From: Thomas Cooper Date: Fri, 10 Apr 2026 15:34:00 +0100 Subject: [PATCH 3/5] Improve overlay dev documentation and helper scripts * Update overlay developer docs with more details * Add helper script to show the resource limits set in a given overlay * Set the docs preview script to use the same hugo-book version as the StreamsHub site Signed-off-by: Thomas Cooper --- .github/scripts/ShowOverlayResources.java | 350 ++++++++++++++++++ .../tests/ShowOverlayResourcesTest.java | 291 +++++++++++++++ README.md | 4 +- docs/overlays/developing.md | 22 +- docs/preview.sh | 25 +- 5 files changed, 682 insertions(+), 10 deletions(-) create mode 100644 .github/scripts/ShowOverlayResources.java create mode 100644 .github/scripts/tests/ShowOverlayResourcesTest.java diff --git a/.github/scripts/ShowOverlayResources.java b/.github/scripts/ShowOverlayResources.java new file mode 100644 index 0000000..2398cdc --- /dev/null +++ b/.github/scripts/ShowOverlayResources.java @@ -0,0 +1,350 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +//DEPS org.yaml:snakeyaml:2.6 +//DEPS io.fabric8:kubernetes-model-core:7.6.1 +//SOURCES ScriptUtils.java +//SOURCES CrdSchemaUtils.java + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Show a per-component resource breakdown for an overlay and suggest + * frontmatter values for the overlay's documentation page. + * + *

This is a developer helper tool — it always exits successfully + * (unless {@code kustomize} itself fails). It does not enforce any + * rules; use {@code VerifyResourceLimits} and + * {@code VerifyDocumentedResources} for CI verification. + * + *

Environment variables: + *

    + *
  • {@code OVERLAY} — overlay name (default: "core")
  • + *
+ */ +public class ShowOverlayResources { + + private static final String DEFAULT_OVERLAY = "core"; + + public static void main(String[] args) { + String overlay = System.getenv().getOrDefault("OVERLAY", DEFAULT_OVERLAY); + run(overlay); + } + + /** + * Main logic, separated from {@code main()} for testability. + * + * @param overlay the overlay name + * @return the collected resource rows + */ + static List run(String overlay) { + Path repoRoot = ScriptUtils.findRepoRoot(); + + System.out.println("=== Resource breakdown (overlay: " + overlay + ") ==="); + System.out.println(); + + List> baseDocs = + ScriptUtils.runKustomize(repoRoot, "overlays/" + overlay + "/base", true); + List> stackDocs = + ScriptUtils.runKustomize(repoRoot, "overlays/" + overlay + "/stack", true); + + List rows = collectAllRows(baseDocs, stackDocs); + + printTable(rows); + printSummary(rows); + + return rows; + } + + // --- Row collection --- + + /** + * Collect resource rows from all Deployments and custom resources. + */ + static List collectAllRows(List> baseDocs, + List> stackDocs) { + List rows = new ArrayList<>(); + List> allDocs = new ArrayList<>(baseDocs); + allDocs.addAll(stackDocs); + + rows.addAll(collectDeploymentRows(allDocs)); + + Map> crdResourcePaths = buildCrdResourcePaths(baseDocs); + rows.addAll(collectCrRows(stackDocs, crdResourcePaths)); + + return rows; + } + + /** + * Collect one row per container in each Deployment. + */ + @SuppressWarnings("unchecked") + static List collectDeploymentRows(List> docs) { + List rows = new ArrayList<>(); + + List> deployments = docs.stream() + .filter(doc -> "Deployment".equals(doc.get("kind"))) + .collect(Collectors.toList()); + + for (Map deployment : deployments) { + ScriptUtils.ResourceRef ref = ScriptUtils.ResourceRef.fromManifest(deployment); + Map templateSpec = + CrdSchemaUtils.getNestedMap(deployment, "spec", "template", "spec"); + if (templateSpec == null) continue; + + Object containersObj = templateSpec.get("containers"); + if (!(containersObj instanceof List)) continue; + + for (Object item : (List) containersObj) { + if (!(item instanceof Map)) continue; + Map container = (Map) item; + String containerName = (String) container.getOrDefault("name", ""); + + long[] values = extractRequestValues(container.get("resources")); + rows.add(new ResourceRow("Deployment", ref.namespace, ref.name, + containerName, values[0], values[1])); + } + } + return rows; + } + + /** + * Walk CRD schemas to discover ResourceRequirements paths. + */ + static Map> buildCrdResourcePaths(List> baseDocs) { + Map> crdResourcePaths = new LinkedHashMap<>(); + + List> crds = baseDocs.stream() + .filter(doc -> "CustomResourceDefinition".equals(doc.get("kind"))) + .collect(Collectors.toList()); + + for (Map crd : crds) { + String kind = CrdSchemaUtils.extractCrdKind(crd); + if (kind == null) continue; + + Map schema = CrdSchemaUtils.extractCrdSchema(crd); + if (schema == null) continue; + + Map specSchema = + CrdSchemaUtils.getNestedMap(schema, "properties", "spec"); + if (specSchema == null) continue; + + List paths = new ArrayList<>(); + CrdSchemaUtils.walkSchema(specSchema, ".spec", paths); + + paths = paths.stream() + .filter(p -> !CrdSchemaUtils.isPodSpecOverheadPath(p)) + .collect(Collectors.toList()); + + if (!paths.isEmpty()) { + crdResourcePaths.put(kind, paths); + } + } + return crdResourcePaths; + } + + /** + * Collect one row per resolved ResourceRequirements path in each CR. + * Skips unconfigured optional features. + */ + @SuppressWarnings("unchecked") + static List collectCrRows(List> stackDocs, + Map> crdResourcePaths) { + List rows = new ArrayList<>(); + + for (Map doc : stackDocs) { + String kind = (String) doc.get("kind"); + if (kind == null || !crdResourcePaths.containsKey(kind)) continue; + + ScriptUtils.ResourceRef ref = ScriptUtils.ResourceRef.fromManifest(doc); + List paths = crdResourcePaths.get(kind); + + for (String path : paths) { + String[] segments = path.substring(1).split("\\."); + if (segments.length == 0) continue; + + String resourcesKey = segments[segments.length - 1]; + String[] parentSegments = new String[segments.length - 1]; + System.arraycopy(segments, 0, parentSegments, 0, segments.length - 1); + + List parents = + CrdSchemaUtils.resolvePath(doc, parentSegments, 0, ""); + + if (parents.isEmpty()) continue; + + for (CrdSchemaUtils.ResolvedNode parent : parents) { + if (!(parent.value instanceof Map)) continue; + Map parentMap = (Map) parent.value; + Object resourcesObj = parentMap.get(resourcesKey); + + long[] values = extractRequestValues(resourcesObj); + String resolvedPath = parent.path + "." + resourcesKey; + rows.add(new ResourceRow(kind, ref.namespace, ref.name, + resolvedPath, values[0], values[1])); + } + } + } + return rows; + } + + // --- Value extraction --- + + /** + * Extract CPU (millicores) and memory (MiB) request values from a + * resources object. + * + * @return {@code long[]{cpuMillis, memoryMiB}} + */ + @SuppressWarnings("unchecked") + private static long[] extractRequestValues(Object resourcesObj) { + long cpu = 0, memory = 0; + if (resourcesObj instanceof Map) { + Map resources = (Map) resourcesObj; + Object requestsObj = resources.get("requests"); + if (requestsObj instanceof Map) { + Map requests = (Map) requestsObj; + if (requests.containsKey("cpu")) + cpu = CrdSchemaUtils.parseCpuMillis(requests.get("cpu")); + if (requests.containsKey("memory")) + memory = CrdSchemaUtils.parseMemoryMiB(requests.get("memory")); + } + } + return new long[]{cpu, memory}; + } + + // --- Output --- + + /** + * Print a column-aligned resource table. + */ + static void printTable(List rows) { + List deployments = rows.stream() + .filter(r -> "Deployment".equals(r.type)) + .collect(Collectors.toList()); + + List crs = rows.stream() + .filter(r -> !"Deployment".equals(r.type)) + .collect(Collectors.toList()); + + if (!deployments.isEmpty()) { + System.out.println("--- Deployments ---"); + int nsWidth = columnWidth(deployments, r -> r.namespace, "NAMESPACE"); + int nameWidth = columnWidth(deployments, r -> r.name, "NAME"); + int detailWidth = columnWidth(deployments, r -> r.detail, "CONTAINER"); + + String fmt = " %-" + nsWidth + "s %-" + nameWidth + "s %-" + detailWidth + "s %6s %10s%n"; + System.out.printf(fmt, "NAMESPACE", "NAME", "CONTAINER", "CPU(m)", "MEMORY(Mi)"); + + for (ResourceRow row : deployments) { + System.out.printf(fmt, row.namespace, row.name, row.detail, + row.cpuMillis, row.memoryMiB); + } + System.out.println(); + } + + if (!crs.isEmpty()) { + System.out.println("--- Custom Resources ---"); + int kindWidth = columnWidth(crs, r -> r.type, "KIND"); + int nameWidth = columnWidth(crs, r -> r.name, "NAME"); + int detailWidth = columnWidth(crs, r -> r.detail, "PATH"); + + String fmt = " %-" + kindWidth + "s %-" + nameWidth + "s %-" + detailWidth + "s %6s %10s%n"; + System.out.printf(fmt, "KIND", "NAME", "PATH", "CPU(m)", "MEMORY(Mi)"); + + for (ResourceRow row : crs) { + System.out.printf(fmt, row.type, row.name, row.detail, + row.cpuMillis, row.memoryMiB); + } + System.out.println(); + } + + if (rows.isEmpty()) { + System.out.println(" (no resources found)"); + System.out.println(); + } + } + + /** + * Print summary totals and suggested frontmatter values. + */ + static void printSummary(List rows) { + long totalCpu = rows.stream().mapToLong(r -> r.cpuMillis).sum(); + long totalMemory = rows.stream().mapToLong(r -> r.memoryMiB).sum(); + + double cpuCores = totalCpu / 1000.0; + double memoryGiB = totalMemory / 1024.0; + + System.out.println("--- Summary ---"); + System.out.printf(" Total: %dm CPU (%.3g cores), %d MiB memory (%.4g GiB)%n", + totalCpu, cpuCores, totalMemory, memoryGiB); + System.out.println(); + + System.out.println("--- Suggested frontmatter ---"); + System.out.println(" cpu_total = '" + suggestCpuFrontmatter(totalCpu) + "'"); + System.out.println(" memory_total = '" + suggestMemoryFrontmatter(totalMemory) + "'"); + } + + // --- Frontmatter suggestion --- + + /** + * Suggest a human-friendly {@code cpu_total} value by rounding up + * to the next whole number of cores. + */ + static String suggestCpuFrontmatter(long totalMillis) { + long cores = (long) Math.ceil(totalMillis / 1000.0); + if (cores == 1) { + return "1 CPU core"; + } + return cores + " CPU cores"; + } + + /** + * Suggest a human-friendly {@code memory_total} value by rounding + * up to the next 0.5 GiB increment. + */ + static String suggestMemoryFrontmatter(long totalMiB) { + double gib = totalMiB / 1024.0; + double rounded = Math.ceil(gib * 2.0) / 2.0; + if (rounded == Math.floor(rounded)) { + return (long) rounded + " GiB"; + } + return String.format("%.1f GiB", rounded); + } + + // --- Utilities --- + + private static int columnWidth(List rows, + java.util.function.Function extractor, + String header) { + int max = header.length(); + for (ResourceRow row : rows) { + max = Math.max(max, extractor.apply(row).length()); + } + return max; + } + + // --- Data classes --- + + /** A single row in the resource breakdown table. */ + static class ResourceRow { + final String type; + final String namespace; + final String name; + final String detail; + final long cpuMillis; + final long memoryMiB; + + ResourceRow(String type, String namespace, String name, + String detail, long cpuMillis, long memoryMiB) { + this.type = type; + this.namespace = namespace; + this.name = name; + this.detail = detail; + this.cpuMillis = cpuMillis; + this.memoryMiB = memoryMiB; + } + } +} diff --git a/.github/scripts/tests/ShowOverlayResourcesTest.java b/.github/scripts/tests/ShowOverlayResourcesTest.java new file mode 100644 index 0000000..12d1fc6 --- /dev/null +++ b/.github/scripts/tests/ShowOverlayResourcesTest.java @@ -0,0 +1,291 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +//DEPS org.junit.jupiter:junit-jupiter:6.0.3 +//DEPS org.junit.platform:junit-platform-launcher:6.0.3 +//DEPS io.fabric8:kubernetes-model-core:7.6.1 +//SOURCES ../ShowOverlayResources.java + +import org.junit.jupiter.api.Test; +import org.junit.platform.launcher.Launcher; +import org.junit.platform.launcher.LauncherDiscoveryRequest; +import org.junit.platform.launcher.core.LauncherFactory; +import org.junit.platform.launcher.core.LauncherDiscoveryRequestBuilder; +import org.junit.platform.launcher.listeners.SummaryGeneratingListener; +import org.junit.platform.launcher.listeners.TestExecutionSummary; + +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.*; +import static org.junit.platform.engine.discovery.DiscoverySelectors.selectClass; + +public class ShowOverlayResourcesTest { + + public static void main(String[] args) { + LauncherDiscoveryRequest request = LauncherDiscoveryRequestBuilder.request() + .selectors(selectClass(ShowOverlayResourcesTest.class)) + .build(); + + SummaryGeneratingListener listener = new SummaryGeneratingListener(); + Launcher launcher = LauncherFactory.create(); + launcher.execute(request, listener); + + TestExecutionSummary summary = listener.getSummary(); + summary.printTo(new PrintWriter(System.out)); + + if (summary.getTestsFailedCount() > 0) { + summary.getFailures().forEach(failure -> + failure.getException().printStackTrace()); + System.exit(1); + } + } + + // --- CPU frontmatter suggestion tests --- + + @Test + void suggestsCpuRoundUpToNextCore() { + assertEquals("4 CPU cores", ShowOverlayResources.suggestCpuFrontmatter(3100)); + } + + @Test + void suggestsCpuExactCore() { + assertEquals("3 CPU cores", ShowOverlayResources.suggestCpuFrontmatter(3000)); + } + + @Test + void suggestsCpuSingleCore() { + assertEquals("1 CPU core", ShowOverlayResources.suggestCpuFrontmatter(800)); + } + + @Test + void suggestsCpuZero() { + assertEquals("0 CPU cores", ShowOverlayResources.suggestCpuFrontmatter(0)); + } + + // --- Memory frontmatter suggestion tests --- + + @Test + void suggestsMemoryRoundUpToHalfGiB() { + // 3456 MiB = 3.375 GiB -> rounds up to 3.5 + assertEquals("3.5 GiB", ShowOverlayResources.suggestMemoryFrontmatter(3456)); + } + + @Test + void suggestsMemoryExactGiB() { + // 4096 MiB = 4.0 GiB -> exactly 4 + assertEquals("4 GiB", ShowOverlayResources.suggestMemoryFrontmatter(4096)); + } + + @Test + void suggestsMemoryHalfGiBBoundary() { + // 2560 MiB = 2.5 GiB -> exactly 2.5 + assertEquals("2.5 GiB", ShowOverlayResources.suggestMemoryFrontmatter(2560)); + } + + @Test + void suggestsMemoryRoundsUpFromJustOverHalf() { + // 2561 MiB = 2.501 GiB -> rounds up to 3.0 + assertEquals("3 GiB", ShowOverlayResources.suggestMemoryFrontmatter(2561)); + } + + @Test + void suggestsMemoryZero() { + assertEquals("0 GiB", ShowOverlayResources.suggestMemoryFrontmatter(0)); + } + + // --- Deployment row collection tests --- + + @Test + void collectsDeploymentRows() { + Map deployment = testDeployment("test-op", "test-ns", + Map.of("name", "main", "resources", guaranteedResources("200m", "256Mi")), + Map.of("name", "sidecar", "resources", guaranteedResources("100m", "128Mi"))); + + List rows = + ShowOverlayResources.collectDeploymentRows(List.of(deployment)); + + assertEquals(2, rows.size()); + + ShowOverlayResources.ResourceRow main = rows.get(0); + assertEquals("Deployment", main.type); + assertEquals("test-ns", main.namespace); + assertEquals("test-op", main.name); + assertEquals("main", main.detail); + assertEquals(200, main.cpuMillis); + assertEquals(256, main.memoryMiB); + + ShowOverlayResources.ResourceRow sidecar = rows.get(1); + assertEquals("sidecar", sidecar.detail); + assertEquals(100, sidecar.cpuMillis); + assertEquals(128, sidecar.memoryMiB); + } + + @Test + void returnsEmptyRowsForDeploymentWithoutTemplateSpec() { + Map deployment = new LinkedHashMap<>(); + deployment.put("apiVersion", "apps/v1"); + deployment.put("kind", "Deployment"); + deployment.put("metadata", Map.of("name", "broken", "namespace", "default")); + deployment.put("spec", Map.of()); + + List rows = + ShowOverlayResources.collectDeploymentRows(List.of(deployment)); + + assertTrue(rows.isEmpty()); + } + + // --- CR row collection tests --- + + @Test + void collectsCrRows() { + Map cr = testCr("TestCR", Map.of( + "resources", guaranteedResources("500m", "1Gi"))); + + Map> paths = Map.of("TestCR", List.of(".spec.resources")); + + List rows = + ShowOverlayResources.collectCrRows(List.of(cr), paths); + + assertEquals(1, rows.size()); + assertEquals("TestCR", rows.get(0).type); + assertEquals("my-cr", rows.get(0).name); + assertEquals(".spec.resources", rows.get(0).detail); + assertEquals(500, rows.get(0).cpuMillis); + assertEquals(1024, rows.get(0).memoryMiB); + } + + @Test + void collectsCrRowsFromArray() { + Map cr = testCr("TestCR", Map.of( + "containers", List.of( + Map.of("name", "app", "resources", guaranteedResources("500m", "512Mi")), + Map.of("name", "ui", "resources", guaranteedResources("100m", "256Mi"))))); + + Map> paths = Map.of("TestCR", List.of(".spec.containers[].resources")); + + List rows = + ShowOverlayResources.collectCrRows(List.of(cr), paths); + + assertEquals(2, rows.size()); + assertEquals(500, rows.get(0).cpuMillis); + assertEquals(512, rows.get(0).memoryMiB); + assertEquals(100, rows.get(1).cpuMillis); + assertEquals(256, rows.get(1).memoryMiB); + } + + @Test + void skipsUnconfiguredCrPath() { + Map cr = testCr("TestCR", Map.of("name", "test")); + + Map> paths = Map.of("TestCR", List.of(".spec.app.resources")); + + List rows = + ShowOverlayResources.collectCrRows(List.of(cr), paths); + + assertTrue(rows.isEmpty()); + } + + // --- Full pipeline test --- + + @Test + void collectsAllRowsFromDeploymentsAndCrs() { + Map crd = testCrd("TestCR", Map.of( + "resources", resourceRequirementsSchema())); + + Map deployment = testDeployment("op", "ns", + Map.of("name", "main", "resources", guaranteedResources("200m", "256Mi"))); + + Map cr = testCr("TestCR", Map.of( + "resources", guaranteedResources("500m", "512Mi"))); + + List rows = + ShowOverlayResources.collectAllRows(List.of(crd, deployment), List.of(cr)); + + assertEquals(2, rows.size()); + + long totalCpu = rows.stream().mapToLong(r -> r.cpuMillis).sum(); + long totalMemory = rows.stream().mapToLong(r -> r.memoryMiB).sum(); + assertEquals(700, totalCpu); + assertEquals(768, totalMemory); + } + + @Test + void buildCrdResourcePathsFiltersPodSpecOverhead() { + Map crd = testCrd("TestCR", Map.of( + "resources", resourceRequirementsSchema(), + "template", Map.of( + "properties", Map.of( + "spec", Map.of( + "properties", Map.of( + "resources", resourceRequirementsSchema())))))); + + Map> paths = ShowOverlayResources.buildCrdResourcePaths(List.of(crd)); + + assertEquals(1, paths.get("TestCR").size()); + assertEquals(".spec.resources", paths.get("TestCR").get(0)); + } + + // --- Helpers --- + + private static Map guaranteedResources(String cpu, String memory) { + return Map.of( + "requests", Map.of("cpu", cpu, "memory", memory), + "limits", Map.of("cpu", cpu, "memory", memory)); + } + + private static Map testCr(String kind, Map spec) { + Map cr = new LinkedHashMap<>(); + cr.put("apiVersion", "test/v1"); + cr.put("kind", kind); + cr.put("metadata", Map.of("name", "my-cr", "namespace", "default")); + cr.put("spec", spec); + return cr; + } + + @SafeVarargs + private static Map testDeployment(String name, String namespace, + Map... containers) { + Map deployment = new LinkedHashMap<>(); + deployment.put("apiVersion", "apps/v1"); + deployment.put("kind", "Deployment"); + deployment.put("metadata", Map.of("name", name, "namespace", namespace)); + deployment.put("spec", Map.of( + "template", Map.of( + "spec", Map.of( + "containers", List.of(containers))))); + return deployment; + } + + private static Map testCrd(String kind, Map specProperties) { + Map crd = new LinkedHashMap<>(); + crd.put("apiVersion", "apiextensions.k8s.io/v1"); + crd.put("kind", "CustomResourceDefinition"); + crd.put("metadata", Map.of("name", kind.toLowerCase() + "s.test.io")); + crd.put("spec", Map.of( + "names", Map.of("kind", kind), + "versions", List.of(Map.of( + "name", "v1", + "schema", Map.of( + "openAPIV3Schema", Map.of( + "properties", Map.of( + "spec", Map.of( + "properties", specProperties)))))))); + return crd; + } + + private static Map resourceRequirementsSchema() { + return Map.of( + "type", "object", + "properties", Map.of( + "limits", Map.of( + "type", "object", + "additionalProperties", Map.of( + "x-kubernetes-int-or-string", true)), + "requests", Map.of( + "type", "object", + "additionalProperties", Map.of( + "x-kubernetes-int-or-string", true)))); + } +} diff --git a/README.md b/README.md index 99fe1e4..bee770a 100644 --- a/README.md +++ b/README.md @@ -137,11 +137,13 @@ The test scripts are [JBang](https://www.jbang.dev/) scripts located in `.github | `VerifyUninstall.java` | Verifies all quickstart resources are removed | | `VerifyResourceLimits.java` | Verifies all containers and CRs have resource limits | | `VerifyDocumentedResources.java` | Verifies documented resource totals match kustomize output | +| `ShowOverlayResources.java` | Shows per-component resource breakdown and suggests frontmatter values | | `Debug.java` | Dumps diagnostic info (CR status, events, pod logs) | | `CrdSchemaUtils.java` | Shared CRD schema introspection utilities | | `tests/ComputeTestMatrixTest.java` | Unit tests for the matrix computation logic | | `tests/VerifyResourceLimitsTest.java` | Unit tests for resource limit verification | | `tests/VerifyDocumentedResourcesTest.java` | Unit tests for documented resource verification | +| `tests/ShowOverlayResourcesTest.java` | Unit tests for overlay resource breakdown | To run the unit tests: @@ -166,7 +168,7 @@ The scripts accept configuration via environment variables: | Variable | Used by | Default | Description | |----------|---------|---------|-------------| -| `OVERLAY` | VerifyInstall, Debug | `core` | Overlay name to verify | +| `OVERLAY` | VerifyInstall, ShowOverlayResources, Debug | `core` | Overlay name to verify | | `TIMEOUT` | VerifyInstall | `600s` | Wait timeout (supports `s`, `m`, `h` suffixes) | | `CONDITION_OVERRIDES` | VerifyInstall | *(empty)* | Space-separated `apiGroup=Condition` pairs | | `PLATFORMS` | ComputeTestMatrix | `minikube kind` | Space-separated list of target platforms | diff --git a/docs/overlays/developing.md b/docs/overlays/developing.md index a855156..89294dd 100644 --- a/docs/overlays/developing.md +++ b/docs/overlays/developing.md @@ -29,15 +29,16 @@ The `VerifyDocumentedResources` script checks that every overlay directory has a ### Adding a New Overlay — Checklist 1. Create `overlays//base/kustomization.yaml` and `overlays//stack/kustomization.yaml` -2. Create `docs/overlays/.md` with TOML frontmatter containing `cpu_total` and `memory_total` -3. Set resource limits on all Deployment containers and custom resource fields -4. Verify locally with `OVERLAY= jbang .github/scripts/VerifyResourceLimits.java` -5. Verify documentation totals with `jbang .github/scripts/VerifyDocumentedResources.java` +2. Set resource limits on all Deployment containers and relevant custom resource fields +3. Run `OVERLAY= jbang .github/scripts/ShowOverlayResources.java` to see the per-component breakdown and suggested frontmatter values +4. Create `docs/overlays/.md` with TOML frontmatter containing `cpu_total` and `memory_total` (use the suggested values from step 3) +5. Verify locally with `OVERLAY= jbang .github/scripts/VerifyResourceLimits.java` +6. Verify documentation totals with `jbang .github/scripts/VerifyDocumentedResources.java` ## Resource Limits Every container in the overlay must have `resources.requests` and `resources.limits` with both `cpu` and `memory` specified. -Requests must equal limits (Guaranteed QoS). +Requests must equal limits. This applies to: @@ -89,3 +90,14 @@ jbang .github/scripts/VerifyDocumentedResources.java ``` This script auto-discovers all overlay docs with `cpu_total` and `memory_total` frontmatter, sums the actual resources from `kustomize build`, and checks that the documented values are sufficient. + +### Calculating Resource Totals + +To see the per-component resource breakdown and the suggested `cpu_total` / `memory_total` values for your overlay: + +```shell +OVERLAY=core jbang .github/scripts/ShowOverlayResources.java +``` + +The script shows every Deployment container and custom resource field with its CPU and memory requests, then prints the totals and suggests round-up values suitable for the frontmatter. +This is a helper tool — it does not enforce anything and always exits successfully. diff --git a/docs/preview.sh b/docs/preview.sh index 946c951..09946dc 100755 --- a/docs/preview.sh +++ b/docs/preview.sh @@ -17,6 +17,11 @@ DOCS_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_DIR="$(cd "${DOCS_DIR}/.." && pwd)" SITE_DIR="${REPO_DIR}/.docs-preview" THEME_REPO="https://github.com/alex-shpak/hugo-book.git" +# To update the theme, change THEME_REF to the desired commit SHA from +# https://github.com/alex-shpak/hugo-book/commits/main +# The script detects mismatches and re-fetches automatically. +# This commit matches the version used by the streamshub-site. +THEME_REF="9d6ad30e9e44077846ece81cdd9e59122fccf4af" THEME_DIR="${SITE_DIR}/themes/hugo-book" ## Check prerequisites ## @@ -40,11 +45,23 @@ fi ## Fetch theme (cached across runs) ## -if [ ! -d "${THEME_DIR}" ]; then - echo "Fetching hugo-book theme..." - git clone --depth 1 "${THEME_REPO}" "${THEME_DIR}" +fetch_theme() { + echo "Fetching hugo-book theme at ${THEME_REF}..." + rm -rf "${THEME_DIR}" + mkdir -p "${THEME_DIR}" + git -C "${THEME_DIR}" init -q + git -C "${THEME_DIR}" remote add origin "${THEME_REPO}" + git -C "${THEME_DIR}" fetch --depth 1 origin "${THEME_REF}" + git -C "${THEME_DIR}" checkout -q FETCH_HEAD +} + +if [ ! -d "${THEME_DIR}/.git" ]; then + fetch_theme +elif ! git -C "${THEME_DIR}" cat-file -e "${THEME_REF}^{commit}" 2>/dev/null; then + echo "Cached theme is at a different version; updating..." + fetch_theme else - echo "Using cached hugo-book theme." + echo "Using cached hugo-book theme (${THEME_REF:0:12})." fi ## Generate hugo.toml ## From d3b553e1f7d1abb662baf51e868c5db88f4c9908 Mon Sep 17 00:00:00 2001 From: Thomas Cooper Date: Fri, 10 Apr 2026 15:37:37 +0100 Subject: [PATCH 4/5] Increase smoke test timeouts and minikube resources Signed-off-by: Thomas Cooper --- .github/workflows/integration.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml index a885fb1..94eb274 100644 --- a/.github/workflows/integration.yaml +++ b/.github/workflows/integration.yaml @@ -54,6 +54,8 @@ jobs: if: matrix.platform == 'minikube' uses: medyagh/setup-minikube@e9e035a86bbc3caea26a450bd4dbf9d0c453682e # v0.0.21 with: + cpus: 'max' + memory: 10g minikube-version: 'latest' addons: registry,ingress,ingress-dns insecure-registry: 'localhost:5000,10.0.0.0/24' @@ -79,3 +81,4 @@ jobs: with: overlay: ${{ matrix.overlay }} condition-overrides: ${{ matrix.condition-overrides }} + timeout: '600s' From 863d04e3800244ee1358f80842bd4f489e80a1a0 Mon Sep 17 00:00:00 2001 From: Thomas Cooper Date: Fri, 10 Apr 2026 16:42:21 +0100 Subject: [PATCH 5/5] Switch to Burstable QoS to fix CI CPU exhaustion on 4-CPU runners MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The requirement for requests == limits caused total CPU requests (3,650m for metrics overlay) to exceed what a 4-CPU minikube node can allocate after Kubernetes system overhead (~900m), leaving the console deployment stuck Pending with "Insufficient cpu". Lower CPU requests while keeping limits unchanged so pods reserve less for scheduling but can still burst under load: * Console operator: 500m → 100m request, 500m limit * Kafka dual-role: 500m → 250m request, 500m limit * Apicurio registry app: 500m → 250m request, 500m limit * Console API: 500m → 250m request, 500m limit Relax CI invariant from requests == limits to requests <= limits: * Rename checkRequestsEqualsLimits → checkRequestsNotExceedLimits * Update VerifyResourceLimits and VerifyDocumentedResources call sites * Update unit tests to accept requests < limits, reject requests > limits * Update documented cpu_total from 4 to 3 CPU cores for both overlays --- .github/scripts/CrdSchemaUtils.java | 16 ++++----- .../scripts/VerifyDocumentedResources.java | 14 ++++---- .github/scripts/VerifyResourceLimits.java | 6 ++-- .../tests/VerifyDocumentedResourcesTest.java | 34 ++++++++++++++++--- .../tests/VerifyResourceLimitsTest.java | 32 +++++++++++++---- .../kustomization.yaml | 2 +- .../stack/apicurio-registry/registry.yaml | 2 +- .../core/stack/kafka/kustomization.yaml | 2 +- .../stack/streamshub-console/console.yaml | 2 +- docs/overlays/core.md | 2 +- docs/overlays/developing.md | 1 - docs/overlays/metrics.md | 2 +- 12 files changed, 79 insertions(+), 36 deletions(-) diff --git a/.github/scripts/CrdSchemaUtils.java b/.github/scripts/CrdSchemaUtils.java index 01814fd..1a92152 100644 --- a/.github/scripts/CrdSchemaUtils.java +++ b/.github/scripts/CrdSchemaUtils.java @@ -202,8 +202,8 @@ static long parseMemoryMiB(Object value) { } /** - * Check that {@code resources.requests} equals {@code resources.limits} - * for both CPU and memory (Guaranteed QoS invariant). + * Check that {@code resources.requests} does not exceed {@code resources.limits} + * for both CPU and memory. * *

Uses numeric comparison via Fabric8 {@link Quantity} so semantically * equal values in different formats (e.g., {@code "1"} vs {@code "1000m"}) @@ -211,10 +211,10 @@ static long parseMemoryMiB(Object value) { * * @param resources the resources map (with "requests" and "limits" sub-maps) * @param prefix a human-readable prefix for error messages - * @return list of invariant violation messages (empty if requests == limits) + * @return list of invariant violation messages (empty if requests <= limits) */ @SuppressWarnings("unchecked") - static List checkRequestsEqualsLimits(Map resources, String prefix) { + static List checkRequestsNotExceedLimits(Map resources, String prefix) { List errors = new ArrayList<>(); if (resources == null) return errors; @@ -228,17 +228,17 @@ static List checkRequestsEqualsLimits(Map resources, Str if (requests.containsKey("cpu") && limits.containsKey("cpu")) { long reqCpu = parseCpuMillis(requests.get("cpu")); long limCpu = parseCpuMillis(limits.get("cpu")); - if (reqCpu != limCpu) { + if (reqCpu > limCpu) { errors.add(prefix + " requests.cpu (" + reqCpu - + "m) != limits.cpu (" + limCpu + "m)"); + + "m) > limits.cpu (" + limCpu + "m)"); } } if (requests.containsKey("memory") && limits.containsKey("memory")) { long reqMem = parseMemoryMiB(requests.get("memory")); long limMem = parseMemoryMiB(limits.get("memory")); - if (reqMem != limMem) { + if (reqMem > limMem) { errors.add(prefix + " requests.memory (" + reqMem - + "Mi) != limits.memory (" + limMem + "Mi)"); + + "Mi) > limits.memory (" + limMem + "Mi)"); } } diff --git a/.github/scripts/VerifyDocumentedResources.java b/.github/scripts/VerifyDocumentedResources.java index e3a8805..df97690 100644 --- a/.github/scripts/VerifyDocumentedResources.java +++ b/.github/scripts/VerifyDocumentedResources.java @@ -30,8 +30,8 @@ * the kustomize output and asserts that documented values are greater * than or equal to the actual values. * - *

Also verifies the invariant that resource requests equal limits - * for every container. + *

Also verifies the invariant that resource requests do not exceed + * limits for every container. */ public class VerifyDocumentedResources { @@ -133,12 +133,12 @@ static int run() { System.out.println(" memory: " + docMemoryMiB + "Mi >= " + totals.memoryMiB + "Mi - OK"); } - // Check requests == limits invariant + // Check requests <= limits invariant if (totals.invariantErrors.isEmpty()) { - System.out.println(" requests == limits invariant: OK"); + System.out.println(" requests <= limits invariant: OK"); } else { allErrors.addAll(totals.invariantErrors); - System.out.println(" requests == limits invariant: FAILED (" + System.out.println(" requests <= limits invariant: FAILED (" + totals.invariantErrors.size() + " violation(s))"); } @@ -427,7 +427,7 @@ static ResourceTotals sumCrResourcePath(Map cr, String path) { /** * Extract CPU and memory request values from a resources object, - * and verify the requests == limits invariant. + * and verify the requests <= limits invariant. */ @SuppressWarnings("unchecked") static ResourceTotals extractResourceValues(Object resourcesObj, String prefix) { @@ -449,7 +449,7 @@ static ResourceTotals extractResourceValues(Object resourcesObj, String prefix) } } - List invariantErrors = CrdSchemaUtils.checkRequestsEqualsLimits(resources, prefix); + List invariantErrors = CrdSchemaUtils.checkRequestsNotExceedLimits(resources, prefix); return new ResourceTotals(cpu, memory, invariantErrors); } diff --git a/.github/scripts/VerifyResourceLimits.java b/.github/scripts/VerifyResourceLimits.java index 85d86f0..84b37b3 100644 --- a/.github/scripts/VerifyResourceLimits.java +++ b/.github/scripts/VerifyResourceLimits.java @@ -25,7 +25,7 @@ * Optional features (where the parent path doesn't exist in the CR) * are skipped. * - *

Also verifies that resource requests equal limits (Guaranteed QoS). + *

Also verifies that resource requests do not exceed limits. * *

Environment variables: *

    @@ -239,8 +239,8 @@ static List checkResourcesObject(Object resourcesObj, String prefix) { if (!limMap.containsKey("memory")) errors.add(prefix + " missing resources.limits.memory"); } - // Verify requests == limits invariant (Guaranteed QoS) - errors.addAll(CrdSchemaUtils.checkRequestsEqualsLimits(resources, prefix)); + // Verify requests <= limits invariant + errors.addAll(CrdSchemaUtils.checkRequestsNotExceedLimits(resources, prefix)); return errors; } diff --git a/.github/scripts/tests/VerifyDocumentedResourcesTest.java b/.github/scripts/tests/VerifyDocumentedResourcesTest.java index 1c7b3be..902a2d0 100644 --- a/.github/scripts/tests/VerifyDocumentedResourcesTest.java +++ b/.github/scripts/tests/VerifyDocumentedResourcesTest.java @@ -308,7 +308,7 @@ void returnsZeroForMissingResources() { } @Test - void detectsRequestsNotEqualLimitsCpu() { + void passesWhenRequestsBelowLimitsCpu() { Map resources = Map.of( "requests", Map.of("cpu", "200m", "memory", "256Mi"), "limits", Map.of("cpu", "500m", "memory", "256Mi")); @@ -317,12 +317,11 @@ void detectsRequestsNotEqualLimitsCpu() { VerifyDocumentedResources.extractResourceValues(resources, "test"); assertEquals(200, totals.cpuMillis); - assertEquals(1, totals.invariantErrors.size()); - assertTrue(totals.invariantErrors.get(0).contains("requests.cpu")); + assertTrue(totals.invariantErrors.isEmpty()); } @Test - void detectsRequestsNotEqualLimitsMemory() { + void passesWhenRequestsBelowLimitsMemory() { Map resources = Map.of( "requests", Map.of("cpu", "200m", "memory", "256Mi"), "limits", Map.of("cpu", "200m", "memory", "512Mi")); @@ -331,6 +330,33 @@ void detectsRequestsNotEqualLimitsMemory() { VerifyDocumentedResources.extractResourceValues(resources, "test"); assertEquals(256, totals.memoryMiB); + assertTrue(totals.invariantErrors.isEmpty()); + } + + @Test + void detectsRequestsExceedingLimitsCpu() { + Map resources = Map.of( + "requests", Map.of("cpu", "500m", "memory", "256Mi"), + "limits", Map.of("cpu", "200m", "memory", "256Mi")); + + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.extractResourceValues(resources, "test"); + + assertEquals(500, totals.cpuMillis); + assertEquals(1, totals.invariantErrors.size()); + assertTrue(totals.invariantErrors.get(0).contains("requests.cpu")); + } + + @Test + void detectsRequestsExceedingLimitsMemory() { + Map resources = Map.of( + "requests", Map.of("cpu", "200m", "memory", "512Mi"), + "limits", Map.of("cpu", "200m", "memory", "256Mi")); + + VerifyDocumentedResources.ResourceTotals totals = + VerifyDocumentedResources.extractResourceValues(resources, "test"); + + assertEquals(512, totals.memoryMiB); assertEquals(1, totals.invariantErrors.size()); assertTrue(totals.invariantErrors.get(0).contains("requests.memory")); } diff --git a/.github/scripts/tests/VerifyResourceLimitsTest.java b/.github/scripts/tests/VerifyResourceLimitsTest.java index ea7edcd..ad0c572 100644 --- a/.github/scripts/tests/VerifyResourceLimitsTest.java +++ b/.github/scripts/tests/VerifyResourceLimitsTest.java @@ -308,30 +308,48 @@ void reportsMultipleMissingFields() { assertEquals(4, errors.size()); } - // --- requests == limits invariant tests --- + // --- requests <= limits invariant tests --- @Test - void rejectsRequestsNotEqualLimitsCpu() { + void passesWhenRequestsBelowLimitsCpu() { Map resources = Map.of( "requests", Map.of("cpu", "200m", "memory", "256Mi"), "limits", Map.of("cpu", "500m", "memory", "256Mi")); List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); - assertEquals(1, errors.size()); - assertTrue(errors.get(0).contains("requests.cpu")); - assertTrue(errors.get(0).contains("!=")); + assertTrue(errors.isEmpty(), "Expected no errors but got: " + errors); } @Test - void rejectsRequestsNotEqualLimitsMemory() { + void passesWhenRequestsBelowLimitsMemory() { Map resources = Map.of( "requests", Map.of("cpu", "200m", "memory", "256Mi"), "limits", Map.of("cpu", "200m", "memory", "512Mi")); + List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); + assertTrue(errors.isEmpty(), "Expected no errors but got: " + errors); + } + + @Test + void rejectsRequestsExceedingLimitsCpu() { + Map resources = Map.of( + "requests", Map.of("cpu", "500m", "memory", "256Mi"), + "limits", Map.of("cpu", "200m", "memory", "256Mi")); + + List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); + assertEquals(1, errors.size()); + assertTrue(errors.get(0).contains("requests.cpu")); + } + + @Test + void rejectsRequestsExceedingLimitsMemory() { + Map resources = Map.of( + "requests", Map.of("cpu", "200m", "memory", "512Mi"), + "limits", Map.of("cpu", "200m", "memory", "256Mi")); + List errors = VerifyResourceLimits.checkResourcesObject(resources, "test"); assertEquals(1, errors.size()); assertTrue(errors.get(0).contains("requests.memory")); - assertTrue(errors.get(0).contains("!=")); } @Test diff --git a/components/core/base/streamshub-console-operator/kustomization.yaml b/components/core/base/streamshub-console-operator/kustomization.yaml index 2e44f7f..71ae7f7 100644 --- a/components/core/base/streamshub-console-operator/kustomization.yaml +++ b/components/core/base/streamshub-console-operator/kustomization.yaml @@ -40,7 +40,7 @@ patches: - name: streamshub-console-operator resources: requests: - cpu: 500m + cpu: 100m memory: 256Mi limits: cpu: 500m diff --git a/components/core/stack/apicurio-registry/registry.yaml b/components/core/stack/apicurio-registry/registry.yaml index 3672535..ae87bc5 100644 --- a/components/core/stack/apicurio-registry/registry.yaml +++ b/components/core/stack/apicurio-registry/registry.yaml @@ -10,7 +10,7 @@ spec: - name: apicurio-registry-app resources: requests: - cpu: 500m + cpu: 250m memory: 512Mi limits: cpu: 500m diff --git a/components/core/stack/kafka/kustomization.yaml b/components/core/stack/kafka/kustomization.yaml index 9cf519b..a220352 100644 --- a/components/core/stack/kafka/kustomization.yaml +++ b/components/core/stack/kafka/kustomization.yaml @@ -35,7 +35,7 @@ patches: spec: resources: requests: - cpu: 500m + cpu: 250m memory: 1Gi limits: cpu: 500m diff --git a/components/core/stack/streamshub-console/console.yaml b/components/core/stack/streamshub-console/console.yaml index c02250a..1a86426 100644 --- a/components/core/stack/streamshub-console/console.yaml +++ b/components/core/stack/streamshub-console/console.yaml @@ -9,7 +9,7 @@ spec: spec: resources: requests: - cpu: 500m + cpu: 250m memory: 512Mi limits: cpu: 500m diff --git a/docs/overlays/core.md b/docs/overlays/core.md index 38e5d63..7fd3feb 100644 --- a/docs/overlays/core.md +++ b/docs/overlays/core.md @@ -1,7 +1,7 @@ +++ title = 'Core' weight = 0 -cpu_total = '4 CPU cores' +cpu_total = '3 CPU cores' memory_total = '4.5 GiB' +++ diff --git a/docs/overlays/developing.md b/docs/overlays/developing.md index 89294dd..5efebea 100644 --- a/docs/overlays/developing.md +++ b/docs/overlays/developing.md @@ -38,7 +38,6 @@ The `VerifyDocumentedResources` script checks that every overlay directory has a ## Resource Limits Every container in the overlay must have `resources.requests` and `resources.limits` with both `cpu` and `memory` specified. -Requests must equal limits. This applies to: diff --git a/docs/overlays/metrics.md b/docs/overlays/metrics.md index 130fcc9..45da3db 100644 --- a/docs/overlays/metrics.md +++ b/docs/overlays/metrics.md @@ -1,7 +1,7 @@ +++ title = 'Metrics' weight = 1 -cpu_total = '4 CPU cores' +cpu_total = '3 CPU cores' memory_total = '5 GiB' +++