diff --git a/charts/clickhouse/templates/chi.yaml b/charts/clickhouse/templates/chi.yaml
index 790a7428..6d4184f1 100644
--- a/charts/clickhouse/templates/chi.yaml
+++ b/charts/clickhouse/templates/chi.yaml
@@ -1,4 +1,10 @@
{{- $service_name := tpl (include "clickhouse.serviceTemplateName" . ) . -}}
+{{- $extraPortNames := list }}
+{{- range .Values.clickhouse.extraPorts }}
+{{- $extraPortNames = append $extraPortNames .name }}
+{{- end }}
+{{- $serviceHttpsPort := (((.Values.clickhouse).settings).https_port) | default "" -}}
+{{- $serviceSecureTcpPort := (((.Values.clickhouse).settings).tcp_port_secure) | default "" -}}
---
apiVersion: "clickhouse.altinity.com/v1"
kind: ClickHouseInstallation
@@ -62,6 +68,16 @@ spec:
targetPort: {{ .containerPort }}
{{- end }}
{{- end }}
+ {{- if and $serviceHttpsPort (not (has "https" $extraPortNames)) }}
+ - name: https
+ port: {{ $serviceHttpsPort }}
+ targetPort: {{ $serviceHttpsPort }}
+ {{- end }}
+ {{- if and $serviceSecureTcpPort (not (has "tcp-secure" $extraPortNames)) }}
+ - name: tcp-secure
+ port: {{ $serviceSecureTcpPort }}
+ targetPort: {{ $serviceSecureTcpPort }}
+ {{- end }}
selector:
{{- include "clickhouse.selectorLabels" . | nindent 12 }}
{{- if .Values.clickhouse.lbService.enabled }}
@@ -96,6 +112,16 @@ spec:
targetPort: {{ .containerPort }}
{{- end }}
{{- end }}
+ {{- if and $serviceHttpsPort (not (has "https" $extraPortNames)) }}
+ - name: https
+ port: {{ $serviceHttpsPort }}
+ targetPort: {{ $serviceHttpsPort }}
+ {{- end }}
+ {{- if and $serviceSecureTcpPort (not (has "tcp-secure" $extraPortNames)) }}
+ - name: tcp-secure
+ port: {{ $serviceSecureTcpPort }}
+ targetPort: {{ $serviceSecureTcpPort }}
+ {{- end }}
selector:
{{- include "clickhouse.selectorLabels" . | nindent 12 }}
{{- end }}
@@ -190,7 +216,8 @@ spec:
{{- if .Values.clickhouse.settings }}
settings:
{{- range $key, $value := .Values.clickhouse.settings }}
- {{ $key }}: "{{ $value }}"
+ {{- $valueIsNumeric := regexMatch "^[0-9]+$" ( $value | toString ) }}
+ {{ $key }}: {{ ternary $value ( $value | quote ) $valueIsNumeric }}
{{- end }}
{{- end }}
clusters:
diff --git a/tests/README.md b/tests/README.md
index a2117d84..8a57f9cb 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -173,6 +173,7 @@ The test suite provides comprehensive coverage across multiple dimensions:
#### **2. ClickHouse Functionality**
- ✅ Version verification
- ✅ Connection testing
+- ✅ Server-side TLS/SSL and HTTPS
- ✅ Query execution
- ✅ Cluster topology (system.clusters)
- ✅ Replication health (system.replicas)
@@ -246,7 +247,6 @@ Areas that **need additional testing or are not fully covered**:
- ❌ **Backup and restore** - No automated backup/restore testing
- ❌ **Disaster recovery** - No full cluster failure scenarios
- ❌ **Network policies** - Limited testing of K8s network restrictions
-- ❌ **TLS/SSL** - No certificate or encryption testing
- ❌ **Monitoring integration** - Prometheus scraping tested only via annotations
- ❌ **Logging integration** - No FluentD/ElasticSearch integration tests
- ❌ **Multi-cluster** - No federation or distributed query tests
@@ -273,11 +273,12 @@ Areas that **need additional testing or are not fully covered**:
## 🌍 Supported Environment
- **Operating System**: [Ubuntu](https://ubuntu.com/) 22.04 / 24.04
-- **Python**: >= 3.10.12
+- **Python**: >= 3.10.12, <= 3.12 (3.13+ has `lzma` package that is incompatible with the test framework)
- **Kubernetes**: >= 1.24
- **Helm**: >= 3.8.0
- **Minikube**: >= 1.28.0 (for local testing)
- **Docker**: Required as Minikube driver
+ - (alternatively) **OrbStack**: >= 2.0
- **kubectl**: Latest stable version
---
@@ -286,7 +287,9 @@ Areas that **need additional testing or are not fully covered**:
### Kubernetes Cluster
-You need access to a Kubernetes cluster. For **local testing**, use Minikube:
+You need access to a Kubernetes cluster. For **local testing**, two providers are supported:
+
+#### Option 1: Minikube (default)
```bash
# Install Minikube
@@ -297,6 +300,27 @@ sudo install minikube-linux-amd64 /usr/local/bin/minikube
minikube version
```
+#### Option 2: OrbStack (account required, may need license)
+
+1. Install OrbStack by following the [_Quick start_ guide \(docs.orbstack.dev\)](
+ https://docs.orbstack.dev/quick-start#installation).
+2. Enable Kubernetes in OrbStack:
+ 1. Open the OrbStack app
+ 2. Go to Settings... (`Cmd ⌘ + ,`) → Kubernetes
+ 3. Toggle the `Enable Kubernetes cluster` option
+ 4. Click the `Apply and Restart` button
+3. Verify that OrbStack is running.
+ ```sh
+ $ orb status
+ # Running
+ ```
+4. Verify the Kubernetes context.
+ ```sh
+ $ kubectl config get-contexts orbstack
+ # CURRENT NAME CLUSTER AUTHINFO NAMESPACE
+ # * orbstack orbstack orbstack
+ ```
+
### Helm
Install Helm 3:
@@ -323,6 +347,7 @@ pip3 install -r tests/requirements.txt
- `testflows.texts==2.0.211217.1011222` - Text utilities
- `PyYAML==6.0.1` - YAML parsing
- `requests==2.32.3` - HTTP requests
+- `cryptography==46.0.5` - TLS validation
---
@@ -334,14 +359,18 @@ To run the complete test suite (all active fixtures + upgrades):
```bash
# From the repository root
+# With Minikube (default)
python3 ./tests/run/smoke.py
+
+# With OrbStack
+LOCAL_K8S_PROVIDER=orbstack python3 ./tests/run/smoke.py
```
This will:
-1. Start/restart Minikube with 4 CPUs and 6GB memory
+1. \[Minikube only\] Start/restart Minikube with 4 CPUs and 6GB memory
2. Run all enabled fixture deployments
3. Run upgrade scenarios
-4. Clean up and delete Minikube
+4. \[Minikube only\] Clean up and delete Minikube
**Expected Duration**: 10 minutes
diff --git a/tests/fixtures/10-tls.yaml b/tests/fixtures/10-tls.yaml
new file mode 100644
index 00000000..a8cd7ecf
--- /dev/null
+++ b/tests/fixtures/10-tls.yaml
@@ -0,0 +1,98 @@
+---
+# Single-node deployment with TLS and load-balancer service
+# Tests: Basic deployment with TLS using inline cert + secret refs, no keeper, minimal config
+# Expected pods: 1 ClickHouse
+clickhouse:
+ replicasCount: 1
+ shardsCount: 1
+
+ defaultUser:
+ password: "TestTLSPassword123"
+ allowExternalAccess: true
+
+ persistence:
+ enabled: true
+ size: 2Gi
+ accessMode: ReadWriteOnce
+
+ service:
+ type: ClusterIP
+
+ lbService:
+ enabled: true
+
+ settings:
+ https_port: 8444
+
+ configurationFiles:
+ # To regenerate the public certificate:
+ # cd tests/fixtures/tls/ # this directory
+ # openssl req -x509 -key test-server.key -days 10950 \
+ # -out server.crt -config public-cert.cnf -extensions v3_req
+ #
+ # Update inlineFileContent with the new certificate content for
+ # the public certificate to be used in the TLS test fixture.
+ #
+ # To verify the public certificate:
+ # openssl x509 -in server.crt -text -noout
+ #
+ # To verify that the private key & public certificate moduli match:
+ # openssl x509 -noout -modulus -in server.crt | openssl md5
+ # openssl rsa -noout -modulus -in test-server.key | openssl md5
+ config.d/foo.crt: |
+ -----BEGIN CERTIFICATE-----
+ MIIDQjCCAiqgAwIBAgIUXWU1ixzpK9OTqFfA+ZhK/EOtLIYwDQYJKoZIhvcNAQEL
+ BQAwHjEcMBoGA1UEAwwTKi5zdmMuY2x1c3Rlci5sb2NhbDAgFw0yNjAyMTQwMTIz
+ NTNaGA8yMDU2MDIwNzAxMjM1M1owHjEcMBoGA1UEAwwTKi5zdmMuY2x1c3Rlci5s
+ b2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIvGupZ9FR3DLC1R
+ /+7hbUySKYSIYOnfZQBnfkMVyLBDdp1WX9aspJLEaazO0rU4l0YjqnLsnckuBxmr
+ OOzzeNA+8ExBkPEANR/mROMIcwXhrdFO3sWH2amVncHFUxspwgDhbZJ0zfVNtQo0
+ Q/JthTWGqYW+4HbDnzOWWkUo23oZcClyELTbhQitxgrsOUyDIcR2ZNae3yueVAoK
+ F12fH4Sms75FLvwwlUuWU3F1lJKr/U7nPxBdl6CY/sPXITov2LcmwlQLebCchjVB
+ 3kEvPKJRBPmW0Dyrr9IRyyExU3qfYdzsJdZOHY/qOB0Dw42qXolkf2L5m6GtW3EF
+ hnqM5SsCAwEAAaN2MHQwCwYDVR0PBAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMB
+ MDEGA1UdEQQqMCiCFSouKi5zdmMuY2x1c3Rlci5sb2NhbIIJbG9jYWxob3N0hwR/
+ AAABMB0GA1UdDgQWBBQzPiDb07CH5dKWb5xYa1PoGtz45jANBgkqhkiG9w0BAQsF
+ AAOCAQEActxS3ySYtlVmykOlaqTaTj8wyZjQ+gFBQ9APEuKwT4+F2HcuIRWWxJIT
+ Pt+CigSMG0XGFQi/+ZRfksXSmfcmErMgDUEr7jsxwvqn6esbAvHX02Vk82oAGkKu
+ t3JVxRAz9Dsl1Hm0W+IAGO336QFwd7iTo367TKSz+jcyHRfnftEZnxtbIvqAkR7V
+ 8z1NRYhRR5ZeLpwjiqwdAU4AgXKQifQWeWjPeJJr+4pEvy2ivpzhhalX7/tB1TuP
+ 4hpQdRi/c+0h5pDtrBpW61gUb0xVWySIkMifLLXfLNjynLKBJmWG91M34fTK1O8+
+ LYd+XhJEhXtFt8+3kktghDcehcuK7g==
+ -----END CERTIFICATE-----
+
+ bar.key:
+ valueFrom:
+ secretKeyRef:
+ name: clickhouse-certs
+ key: server.key
+
+ dhparam.pem:
+ valueFrom:
+ secretKeyRef:
+ name: clickhouse-certs
+ key: dhparam.pem
+
+ config.d/openssl.xml: |
+
+
+
+ /etc/clickhouse-server/config.d/foo.crt
+ /etc/clickhouse-server/secrets.d/bar.key/clickhouse-certs/server.key
+ /etc/clickhouse-server/secrets.d/dhparam.pem/clickhouse-certs/dhparam.pem
+ relaxed
+ true
+ true
+ sslv2,sslv3
+ true
+
+
+
+
+keeper:
+ enabled: false
+
+operator:
+ enabled: true
+
+
diff --git a/tests/fixtures/tls/dhparam.pem b/tests/fixtures/tls/dhparam.pem
new file mode 100644
index 00000000..55ab3873
--- /dev/null
+++ b/tests/fixtures/tls/dhparam.pem
@@ -0,0 +1,13 @@
+-----BEGIN DH PARAMETERS-----
+MIICDAKCAgEA7QR588LscfM+0JHrq22Xc13COYt5l2p+SqWKAijcdN6n7TWpaSVQ
+3N1Lj+EFt2sXZ3NpNtfv8YAMZRdbAd1oe+kquGuugtxmUUUGUXiXqeIS6Gp7bGAj
+5WWobY7WtkD/HsGrvI6kxhTk3nXIEYolUHMJGb0yVLcvYR73j5F6K3ONfd107T49
+/8PoVOr3ZcoBymfQK/a/mNVADKPPQ/ALAHjpIZEkQlCEj9Jw4Osaro0BEySoKJhK
+5lIybQ5TJO023r9rpbKNxILaRy5esq4Vir3tlPb9eKumte6X4HFvTU36aTp5ZX/m
+Ef25jhGRxnkH/N/WDEHXZPOToqyNJzdlmhvZjLj+Ru2SknS+pAZ9ZbbovzG1qPhW
+BxFwotZLmTaD1+Xhm374HEY8PGeMytnrRq5W5oMpzY9PbDL+MhwxwChvWMpfPmbE
+YN+InQjWNrw+C/VGLwyiOsQRhKnsCJSNckDv4cDOKuaIajhInnjGrQn7c51X2qT/
+8ScJ18FLrokEw/n+61xo4TFq7L9RSddiWbaTLvXrX6ZJvE/G0APA7eDeSN/p83TV
+/pYgtiHOsgaSQ8qMFAIa03hdzfw/XqA8DTu5gf6JbV9BcPJ/381Kv3oC53I7XjQP
+ZfpINvFBEs1Ss4fULqnQ3V65DktpS2HhC2gDVlw+084dSy6cnXX7pv8CAQICAgFF
+-----END DH PARAMETERS-----
diff --git a/tests/fixtures/tls/public-cert.cnf b/tests/fixtures/tls/public-cert.cnf
new file mode 100644
index 00000000..0c11169a
--- /dev/null
+++ b/tests/fixtures/tls/public-cert.cnf
@@ -0,0 +1,24 @@
+[req]
+distinguished_name = req_distinguished_name
+req_extensions = v3_req
+prompt = no
+
+[req_distinguished_name]
+CN = *.svc.cluster.local
+
+[v3_req]
+keyUsage = digitalSignature, keyEncipherment, dataEncipherment
+extendedKeyUsage = serverAuth
+subjectAltName = @alt_names
+
+[alt_names]
+# Wildcard pattern covers all Kubernetes service DNS names:
+# - t10-tls-service.t10-tls.svc.cluster.local
+# - t01-tls-clickhouse.t01-tls.svc.cluster.local
+# - chi----..svc.cluster.local
+# This makes the certificate resilient to test fixture renames.
+DNS.1 = *.*.svc.cluster.local
+
+# Localhost for local testing
+DNS.2 = localhost
+IP.1 = 127.0.0.1
diff --git a/tests/fixtures/tls/server.crt b/tests/fixtures/tls/server.crt
new file mode 100644
index 00000000..aa2a9679
--- /dev/null
+++ b/tests/fixtures/tls/server.crt
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDQjCCAiqgAwIBAgIUXWU1ixzpK9OTqFfA+ZhK/EOtLIYwDQYJKoZIhvcNAQEL
+BQAwHjEcMBoGA1UEAwwTKi5zdmMuY2x1c3Rlci5sb2NhbDAgFw0yNjAyMTQwMTIz
+NTNaGA8yMDU2MDIwNzAxMjM1M1owHjEcMBoGA1UEAwwTKi5zdmMuY2x1c3Rlci5s
+b2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIvGupZ9FR3DLC1R
+/+7hbUySKYSIYOnfZQBnfkMVyLBDdp1WX9aspJLEaazO0rU4l0YjqnLsnckuBxmr
+OOzzeNA+8ExBkPEANR/mROMIcwXhrdFO3sWH2amVncHFUxspwgDhbZJ0zfVNtQo0
+Q/JthTWGqYW+4HbDnzOWWkUo23oZcClyELTbhQitxgrsOUyDIcR2ZNae3yueVAoK
+F12fH4Sms75FLvwwlUuWU3F1lJKr/U7nPxBdl6CY/sPXITov2LcmwlQLebCchjVB
+3kEvPKJRBPmW0Dyrr9IRyyExU3qfYdzsJdZOHY/qOB0Dw42qXolkf2L5m6GtW3EF
+hnqM5SsCAwEAAaN2MHQwCwYDVR0PBAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMB
+MDEGA1UdEQQqMCiCFSouKi5zdmMuY2x1c3Rlci5sb2NhbIIJbG9jYWxob3N0hwR/
+AAABMB0GA1UdDgQWBBQzPiDb07CH5dKWb5xYa1PoGtz45jANBgkqhkiG9w0BAQsF
+AAOCAQEActxS3ySYtlVmykOlaqTaTj8wyZjQ+gFBQ9APEuKwT4+F2HcuIRWWxJIT
+Pt+CigSMG0XGFQi/+ZRfksXSmfcmErMgDUEr7jsxwvqn6esbAvHX02Vk82oAGkKu
+t3JVxRAz9Dsl1Hm0W+IAGO336QFwd7iTo367TKSz+jcyHRfnftEZnxtbIvqAkR7V
+8z1NRYhRR5ZeLpwjiqwdAU4AgXKQifQWeWjPeJJr+4pEvy2ivpzhhalX7/tB1TuP
+4hpQdRi/c+0h5pDtrBpW61gUb0xVWySIkMifLLXfLNjynLKBJmWG91M34fTK1O8+
+LYd+XhJEhXtFt8+3kktghDcehcuK7g==
+-----END CERTIFICATE-----
diff --git a/tests/fixtures/tls/test-server.key b/tests/fixtures/tls/test-server.key
new file mode 100644
index 00000000..2681b86d
--- /dev/null
+++ b/tests/fixtures/tls/test-server.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCLxrqWfRUdwywt
+Uf/u4W1MkimEiGDp32UAZ35DFciwQ3adVl/WrKSSxGmsztK1OJdGI6py7J3JLgcZ
+qzjs83jQPvBMQZDxADUf5kTjCHMF4a3RTt7Fh9mplZ3BxVMbKcIA4W2SdM31TbUK
+NEPybYU1hqmFvuB2w58zllpFKNt6GXApchC024UIrcYK7DlMgyHEdmTWnt8rnlQK
+Chddnx+EprO+RS78MJVLllNxdZSSq/1O5z8QXZegmP7D1yE6L9i3JsJUC3mwnIY1
+Qd5BLzyiUQT5ltA8q6/SEcshMVN6n2Hc7CXWTh2P6jgdA8ONql6JZH9i+ZuhrVtx
+BYZ6jOUrAgMBAAECggEAA/WLassnjPdD9L4CMixVIeUbTfNlo1o+NyZh+i56HRwG
+wRV66M7CaUcs+HBx9st1Os8J0JregUikz4JSaMvLW+1cdccpqYSS/KX+GjGEEzeS
+6n8sajXwjBApLsg9vrg5FDijwjvoFY8E62wS50wDiOzujP5HcgsVIwCa6qM/TD8K
+KVFBwLe9mL9yrGlSPCoj0vvt1R5VbEJh7PeYGfjUl30r0lWmP8ZlN6u2Q+aafEdy
+p3gIHPDRRv6vPSiR24tVQgv0FaTnXRAjYtwhNqlXPMhtHnBEtNA1kuGE9IZimPw/
+6P3L3SkbGKL5TRZL6wRfrYada6TeFX9ueE45ES2UuQKBgQDBK/2nCvQr6wlF45kb
+cQ7iunAcu5/DAlqQEXsOoef+WNNzIPxHCTH7YW4GhAUUSYyorLWi20tknrKJrITN
+IotSVgItzpfia7HB3EIuSBysMOS81W+R3HYRoigQigT8YwxiRSACf6x1O5J0J/jC
+tq2Galhwz8ILWaJ5SdACv+sBnQKBgQC5POFBz/GaDFooMFXU4kBw5e1YjkNkEQUh
+bFQL9wZBiP2Xv1QiRV+kbrHwlW1p9gUjOogWz05L2z7qPbtdaIV6MPwEgr2xBCt5
+mkuSWhUDtzQ2pVFsSY75SOg8CE0lHnHSyUEaPhac06qn3KShUcakpm5A684aUjjy
+1IH0huuLZwKBgAdBvc+uq6mStNB5UmEjiCmgU2Hg8omC5yAOaA8OqgZ2E8t5a8DH
+aadF67o273HpqW0Uv+YUUuq+w3pEjuCd8ZnwPTi3UCFjZlQgECRo9RrK42zsn7pd
+C9pxuwuUA8fveKGgcylk3new+zl93uyBrFcmW5gxVdrTTTU9PqE70HpJAoGAAaDH
+Wgy50uDI6hGCr5xNdLCQpXaaoQaFRQXutyw0od7SW8MSujph3NAcQEEP9R50bRrW
+l1y7E2+Z3fUs8GU6xxgnHuMHR8cBmtAAWgjwple13cUWMh1zZD1/zQdFpk3eMjwS
+lmh1SmuR1GfcCo7tcAUGcwufhBu05G15tux4pYECgYEAkzun1vyNNoC2CrGR3wIO
+SMwDy5JxZLIDAeAkidHsDrGzYzOekHLrHR2r47F4mJPUyiGVNCIz69fiVkcngshr
+sEeYFP5cN6Ip7chEp683xVBbKM+jsQtMmhadaL5tsulqgScPquCap+dPvaq/ogJ1
+2jbKCT447cR8wtviH/OnOgY=
+-----END PRIVATE KEY-----
diff --git a/tests/requirements.txt b/tests/requirements.txt
index fdd308a8..5c620785 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -1,4 +1,5 @@
requests==2.32.3
testflows==2.4.13
testflows.texts==2.0.211217.1011222
-PyYAML==6.0.1
\ No newline at end of file
+PyYAML==6.0.1
+cryptography==46.0.5
diff --git a/tests/scenarios/smoke.py b/tests/scenarios/smoke.py
index e6b328f5..d97d7169 100644
--- a/tests/scenarios/smoke.py
+++ b/tests/scenarios/smoke.py
@@ -2,9 +2,10 @@
import os
import tests.steps.kubernetes as kubernetes
-import tests.steps.minikube as minikube
+import tests.steps.local_cluster as local_cluster
import tests.steps.helm as helm
import tests.steps.clickhouse as clickhouse
+import tests.steps.tls as tls
from tests.steps.deployment import HelmState
@@ -13,6 +14,7 @@
"fixtures/02-replicated-with-users.yaml",
"fixtures/08-extracontainer-data-mount.yaml",
"fixtures/09-usersprofiles-settings.yaml",
+ "fixtures/10-tls.yaml",
# "fixtures/03-sharded-advanced.yaml",
# "fixtures/04-external-keeper.yaml",
# "fixtures/05-persistence-disabled.yaml",
@@ -50,8 +52,14 @@ def check_deployment(self, fixture_file, skip_external_keeper=True):
skip("Skipping external keeper test (requires pre-existing keeper)")
return
+ # Create TLS secrets if this is a TLS fixture
+ if "tls" in fixture_name:
+ with And("create TLS secrets"):
+ kubernetes.use_context(context_name=local_cluster.get_context_name())
+ tls.create_tls_secret(namespace=namespace)
+
with When("install ClickHouse with fixture configuration"):
- kubernetes.use_context(context_name="minikube")
+ kubernetes.use_context(context_name=local_cluster.get_context_name())
helm.install(
namespace=namespace, release_name=release_name, values_file=fixture_file
)
@@ -69,6 +77,37 @@ def check_deployment(self, fixture_file, skip_external_keeper=True):
namespace=namespace, admin_password=admin_password
)
+ # Add TLS configuration verification for TLS fixtures
+ if "tls" in fixture_name:
+ https_port = 8444
+ with And("verify TLS configuration in CHI"):
+ tls.verify_tls_files_in_chi(
+ namespace=namespace,
+ )
+
+ tls.verify_tls_secret_references_in_chi(
+ namespace=namespace,
+ )
+
+ tls.verify_openssl_config_on_pod(
+ namespace=namespace,
+ )
+
+ tls.verify_tls_files_on_pod(
+ namespace=namespace,
+ )
+
+ tls.verify_settings_ports_in_chi(
+ namespace=namespace,
+ expected_https_port=https_port,
+ )
+
+ with And("verify HTTPS endpoint certificate"):
+ tls.verify_https_certificate(
+ namespace=namespace,
+ https_port=https_port,
+ )
+
# Verify metrics endpoint is accessible
with And("verify metrics endpoint"):
clickhouse.verify_metrics_endpoint(namespace=namespace)
@@ -76,6 +115,7 @@ def check_deployment(self, fixture_file, skip_external_keeper=True):
with Finally("cleanup deployment"):
helm.uninstall(namespace=namespace, release_name=release_name)
kubernetes.delete_namespace(namespace=namespace)
+ kubernetes.remove_chi_finalizers(namespace=namespace)
@TestScenario
@@ -101,7 +141,7 @@ def check_upgrade(self, initial_fixture, upgrade_fixture):
note(f"Upgraded pods: {upgrade_state.get_expected_pod_count()}")
with When("install ClickHouse with initial configuration"):
- kubernetes.use_context(context_name="minikube")
+ kubernetes.use_context(context_name=local_cluster.get_context_name())
helm.install(
namespace=namespace, release_name=release_name, values_file=initial_fixture
)
@@ -158,6 +198,7 @@ def check_upgrade(self, initial_fixture, upgrade_fixture):
with Finally("cleanup deployment"):
helm.uninstall(namespace=namespace, release_name=release_name)
kubernetes.delete_namespace(namespace=namespace)
+ kubernetes.remove_chi_finalizers(namespace=namespace)
@TestFeature
@@ -188,9 +229,9 @@ def check_all_upgrades(self):
def feature(self):
"""Run all comprehensive smoke tests."""
- with Given("minikube environment"):
- minikube.setup_minikube_environment()
- kubernetes.use_context(context_name="minikube")
+ with Given("local Kubernetes environment"):
+ local_cluster.setup_local_cluster()
+ kubernetes.use_context(context_name=local_cluster.get_context_name())
Feature(run=check_all_fixtures)
diff --git a/tests/steps/clickhouse.py b/tests/steps/clickhouse.py
index ea565d05..433fcb36 100644
--- a/tests/steps/clickhouse.py
+++ b/tests/steps/clickhouse.py
@@ -478,8 +478,8 @@ def verify_profiles_and_user_settings(
actual_value = settings_cfg.get(key)
expected_value = str(value)
assert (
- actual_value == expected_value
- ), f"Expected setting {key}={expected_value}, got {actual_value}"
+ str(actual_value) == expected_value
+ ), f"Expected setting {key}={expected_value!r}, got {actual_value!r}"
note("Users, profiles, and settings configuration verified")
diff --git a/tests/steps/kubernetes.py b/tests/steps/kubernetes.py
index 35634062..7e54a7f4 100644
--- a/tests/steps/kubernetes.py
+++ b/tests/steps/kubernetes.py
@@ -7,7 +7,7 @@
def get_pods(self, namespace):
"""Get the list of pods in the specified namespace and return in a list."""
- pods = run(cmd=f"minikube kubectl -- get pods -n {namespace} -o json")
+ pods = run(cmd=f"kubectl get pods -n {namespace} -o json")
pods = json.loads(pods.stdout)["items"]
return [p["metadata"]["name"] for p in pods]
@@ -472,6 +472,29 @@ def get_secrets(self, namespace):
return [item["metadata"]["name"] for item in secrets_data.get("items", [])]
+@TestStep(Finally)
+def remove_chi_finalizers(self, namespace):
+ """Remove finalizers from CHI resources to unblock namespace deletion.
+
+ Unless the operator is externally deployed, after a helm uninstall, the
+ operator disappears but CHI resources will still have finalizers that block
+ namespace deletion, causing the namespace to persist with 'Terminating' status.
+
+ Args:
+ namespace: Kubernetes namespace to delete
+ """
+ result = run(
+ cmd=f"kubectl get chi -n {namespace} -o name",
+ )
+ chi_resource_names = result.stdout.strip().split()
+ for resource_name in chi_resource_names:
+ run(
+ cmd=f"kubectl patch {resource_name} -n {namespace} "
+ f"--type json -p '[{{\"op\": \"remove\", \"path\": \"/metadata/finalizers\"}}]'",
+ )
+ note(f"✓ Removed finalizers from {resource_name}: {namespace}/{resource_name}")
+
+
@TestStep(Finally)
def delete_namespace(self, namespace):
"""Delete a Kubernetes namespace.
@@ -501,3 +524,16 @@ def delete_pod(self, namespace, pod_name):
"""
run(cmd=f"kubectl delete pod {pod_name} -n {namespace}", check=True)
note(f"✓ Pod {pod_name} deleted from namespace {namespace}")
+
+
+@TestStep(When)
+def get_file_contents_from_pod(self, namespace, pod_name, file_path):
+ """Read the contents of a file from a pod.
+
+ Args:
+ namespace: Kubernetes namespace
+ pod_name: Name of the pod
+ file_path: Absolute path to the file inside the pod
+ """
+ result = run(cmd=f"kubectl exec -n {namespace} {pod_name} -- cat {file_path}")
+ return result.stdout
diff --git a/tests/steps/local_cluster.py b/tests/steps/local_cluster.py
new file mode 100644
index 00000000..4e813d3d
--- /dev/null
+++ b/tests/steps/local_cluster.py
@@ -0,0 +1,32 @@
+from tests.steps.system import *
+import tests.steps.orbstack as orbstack
+import tests.steps.minikube as minikube
+import os
+
+
+def resolve_provider():
+ LOCAL_K8S_PROVIDER = os.environ.get("LOCAL_K8S_PROVIDER", "minikube").lower()
+ if LOCAL_K8S_PROVIDER not in (orbstack.CONTEXT_NAME, minikube.CONTEXT_NAME):
+ raise ValueError(f"Unknown LOCAL_K8S_PROVIDER: {LOCAL_K8S_PROVIDER}. "
+ "Supported values: "
+ f"'{minikube.CONTEXT_NAME}', "
+ f"'{orbstack.CONTEXT_NAME}'")
+
+ return LOCAL_K8S_PROVIDER
+
+
+@TestStep(Given)
+def setup_local_cluster(self):
+ """Set up a local Kubernetes cluster."""
+ provider = resolve_provider()
+ note(f"Using local Kubernetes provider: {provider}")
+
+ if provider == "minikube":
+ minikube.setup_minikube_environment()
+ elif provider == "orbstack":
+ orbstack.setup_orbstack_environment()
+
+
+def get_context_name():
+ # This is okay since the provider is tightly-coupled to the context name
+ return resolve_provider()
diff --git a/tests/steps/minikube.py b/tests/steps/minikube.py
index 82084c9f..bf51e5e7 100644
--- a/tests/steps/minikube.py
+++ b/tests/steps/minikube.py
@@ -2,6 +2,9 @@
from tests.steps.kubernetes import use_context
+CONTEXT_NAME = "minikube"
+
+
@TestStep(Given)
def minikube_start(self, cpus, memory):
"""Start minikube."""
diff --git a/tests/steps/orbstack.py b/tests/steps/orbstack.py
new file mode 100644
index 00000000..4800641f
--- /dev/null
+++ b/tests/steps/orbstack.py
@@ -0,0 +1,47 @@
+from tests.steps.system import *
+from tests.steps.kubernetes import use_context
+
+
+CONTEXT_NAME = "orbstack"
+
+
+@TestStep(Given)
+def orbstack_start(self):
+ """Start OrbStack."""
+
+ if orbstack_status():
+ return
+
+ run(cmd="orbctl start")
+
+
+@TestStep(When)
+def orbstack_status(self):
+ """Check if OrbStack is running."""
+
+ try:
+ result = run(cmd="orbctl status", check=False)
+ return result.returncode == 0 and "Running" in result.stdout
+ except:
+ return False
+
+
+@TestStep(Given)
+def setup_orbstack_environment(self, clean_up=True):
+ """Set up OrbStack environment with context."""
+
+ orbstack_start()
+
+ use_context(context_name=CONTEXT_NAME)
+
+ yield
+
+ if clean_up:
+ cleanup_orbstack_environment()
+
+
+@TestStep(Finally)
+def cleanup_orbstack_environment(self):
+ """Clean up OrbStack environment."""
+
+ note("OrbStack environment lifecycle is managed outside of this framework.")
diff --git a/tests/steps/tls.py b/tests/steps/tls.py
new file mode 100644
index 00000000..2c36f977
--- /dev/null
+++ b/tests/steps/tls.py
@@ -0,0 +1,210 @@
+import os
+import xml.etree.ElementTree as ET
+from datetime import datetime, timezone
+
+from cryptography.hazmat.primitives.serialization import load_pem_parameters, load_pem_private_key
+from cryptography.x509 import load_pem_x509_certificate
+from cryptography.x509.oid import ExtensionOID, NameOID
+
+from tests.steps.system import *
+from tests.steps.kubernetes import *
+import tests.steps.clickhouse as clickhouse
+
+
+@TestStep(Then)
+def verify_tls_files_in_chi(self, namespace):
+ """Verify TLS files are present in CHI spec."""
+ chi_data = clickhouse.get_chi_info(namespace=namespace)
+
+ files = chi_data.get("spec", {}).get("configuration", {}).get("files", {})
+
+ for expected_file in ["config.d/foo.crt", "bar.key", "dhparam.pem", "config.d/openssl.xml"]:
+ assert expected_file in files, f"Expected TLS file '{expected_file}' not found in CHI"
+ note(f"✓ TLS file present: {expected_file}")
+
+
+@TestStep(Then)
+def verify_tls_secret_references_in_chi(self, namespace):
+ """Verify secret references are correct in CHI spec."""
+ chi_data = clickhouse.get_chi_info(namespace=namespace)
+
+ files = chi_data.get("spec", {}).get("configuration", {}).get("files", {})
+
+ expected_secrets = {
+ "bar.key": "clickhouse-certs",
+ "dhparam.pem": "clickhouse-certs",
+ }
+
+ for file_key, expected_secret_name in expected_secrets.items():
+ assert file_key in files, f"File '{file_key}' not found in CHI"
+ file_config = files[file_key]
+
+ assert isinstance(file_config, dict), f"Expected dict for secret ref in '{file_key}'"
+ assert "valueFrom" in file_config, f"No valueFrom in '{file_key}'"
+
+ secret_ref = file_config["valueFrom"]["secretKeyRef"]
+ actual_secret_name = secret_ref["name"]
+
+ assert actual_secret_name == expected_secret_name, \
+ f"Expected secret '{expected_secret_name}' for '{file_key}', got '{actual_secret_name}'"
+
+ note(f"✓ Secret reference correct: {file_key} → {actual_secret_name}")
+
+
+@TestStep(Then)
+def verify_settings_ports_in_chi(self, namespace, expected_https_port):
+ """Verify settings block has correct port configuration in CHI spec."""
+ chi_data = clickhouse.get_chi_info(namespace=namespace)
+
+ settings = chi_data.get("spec", {}).get("configuration", {}).get("settings", {})
+ assert settings.get("https_port") == expected_https_port, \
+ f"Expected https_port: {expected_https_port}, got: {settings.get('https_port')!r}"
+ assert "tcp_port_secure" not in settings, \
+ f"Did not expect 'tcp_port_secure' in settings, but found: {settings.get('tcp_port_secure')!r}"
+ note(f"✓ Settings block only has https_port as explicitly set: {expected_https_port}")
+
+
+@TestStep(Then)
+def verify_openssl_config_on_pod(self, namespace):
+ """Verify openssl.xml format on the ClickHouse pod."""
+ pod_name = clickhouse.get_ready_clickhouse_pod(namespace=namespace)
+
+ content = get_file_contents_from_pod(
+ namespace=namespace,
+ pod_name=pod_name,
+ file_path="/etc/clickhouse-server/config.d/openssl.xml",
+ )
+
+ try:
+ root = ET.fromstring(content)
+ except ET.ParseError as e:
+ raise AssertionError(f"openssl.xml is not valid XML: {e}")
+
+ server_node = root.find(".//openSSL/server")
+ assert server_node is not None, "openssl.xml missing node"
+
+ note(f"✓ openssl.xml present and valid on pod at /etc/clickhouse-server/config.d/openssl.xml")
+
+
+@TestStep(Then)
+def verify_tls_files_on_pod(self, namespace):
+ """Verify TLS file contents on the ClickHouse pod."""
+
+ pod_name = clickhouse.get_ready_clickhouse_pod(namespace=namespace)
+
+ cert_pem = get_file_contents_from_pod(
+ namespace=namespace,
+ pod_name=pod_name,
+ file_path="/etc/clickhouse-server/config.d/foo.crt",
+ )
+
+ key_pem = get_file_contents_from_pod(
+ namespace=namespace,
+ pod_name=pod_name,
+ file_path="/etc/clickhouse-server/secrets.d/bar.key/clickhouse-certs/server.key",
+ )
+
+ cert = load_pem_x509_certificate(cert_pem.encode())
+ key = load_pem_private_key(key_pem.encode(), password=None)
+
+ cert_modulus = cert.public_key().public_numbers().n
+ key_modulus = key.public_key().public_numbers().n
+
+ assert cert_modulus == key_modulus, "Certificate and private key moduli do not match"
+ note("✓ Certificate and private key moduli match")
+
+ dh_pem = get_file_contents_from_pod(
+ namespace=namespace,
+ pod_name=pod_name,
+ file_path="/etc/clickhouse-server/secrets.d/dhparam.pem/clickhouse-certs/dhparam.pem",
+ )
+
+ dh_params = load_pem_parameters(dh_pem.encode())
+ assert dh_params.parameter_numbers().g == 2, \
+ f"Expected DH params generator g=2, got g={dh_params.parameter_numbers().g}"
+ note("✓ DH params valid (g=2)")
+
+
+@TestStep(Then)
+def verify_https_certificate(self, namespace, https_port):
+ """Verify the HTTPS endpoint serves TLS with the correct certificate.
+
+ Performs a TLS handshake against the ClickHouse HTTPS port from within
+ the pod, then validates the served certificate against the configured one.
+ """
+ pod_name = clickhouse.get_ready_clickhouse_pod(namespace=namespace)
+
+ result = run(
+ cmd=f"kubectl exec -n {namespace} {pod_name} -- "
+ f"sh -c 'openssl s_client -connect localhost:{https_port} "
+ f"&1'",
+ check=False,
+ )
+
+ served_cert = load_pem_x509_certificate(result.stdout.encode())
+ note(f"✓ TLS handshake successful on port {https_port}")
+
+ now = datetime.now(timezone.utc)
+ assert served_cert.not_valid_before_utc <= now, \
+ f"Certificate not yet valid (notBefore: {served_cert.not_valid_before_utc})"
+ assert served_cert.not_valid_after_utc > now, \
+ f"Certificate has expired (notAfter: {served_cert.not_valid_after_utc})"
+ note(
+ f"✓ Certificate valid: "
+ f"{served_cert.not_valid_before_utc.date()} to "
+ f"{served_cert.not_valid_after_utc.date()}"
+ )
+
+ cn_attrs = served_cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
+ assert cn_attrs, "Certificate has no CN"
+ cn = cn_attrs[0].value
+ note(f"✓ Certificate CN: {cn}")
+
+ # The cryptography library has no get-by-name accessor for X.509
+ # extensions, so we iterate all extensions and match by OID to find
+ # the Subject Alternative Name extension.
+ san_names = []
+ for ext in served_cert.extensions:
+ if ext.oid == ExtensionOID.SUBJECT_ALTERNATIVE_NAME:
+ san_names = [str(name.value) for name in ext.value]
+ break
+
+ assert san_names, "Certificate has no SANs"
+ assert any(cn in san for san in san_names), \
+ f"CN '{cn}' not found as substring of any SAN: {san_names}"
+ note(f"✓ CN is substring of SAN (SANs: {san_names})")
+
+ configured_cert_pem = get_file_contents_from_pod(
+ namespace=namespace,
+ pod_name=pod_name,
+ file_path="/etc/clickhouse-server/config.d/foo.crt",
+ )
+ configured_cert = load_pem_x509_certificate(configured_cert_pem.encode())
+
+ assert served_cert == configured_cert, \
+ f"Served cert (serial {served_cert.serial_number:#x}) " \
+ f"!= configured cert (serial {configured_cert.serial_number:#x})"
+ note(f"✓ Served certificate matches configured (serial {served_cert.serial_number:#x})")
+
+
+@TestStep(When)
+def create_tls_secret(self, namespace):
+ """Create a Kubernetes secret with TLS files from ../fixtures/tls/."""
+
+ tests_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ certs_dir = os.path.join(tests_dir, "fixtures", "tls")
+
+ cert_file = os.path.join(certs_dir, "server.crt")
+ key_file = os.path.join(certs_dir, "test-server.key")
+ dhparam_file = os.path.join(certs_dir, "dhparam.pem")
+
+ # At time of secret creation, the namespace might not exist
+ run(cmd=f"kubectl create namespace {namespace}", check=False)
+ # Optimistically delete secret in case it already exists for idempotency
+ run(cmd=f"kubectl delete secret clickhouse-certs -n {namespace}", check=False)
+ run(cmd=f"kubectl create secret generic clickhouse-certs -n {namespace} "
+ f"--from-file=server.crt={cert_file} "
+ f"--from-file=server.key={key_file} "
+ f"--from-file=dhparam.pem={dhparam_file}")
+
+ note(f"✓ Created TLS secret: clickhouse-certs")