diff --git a/Taskfile.yml b/Taskfile.yml new file mode 100644 index 00000000..9a8dde15 --- /dev/null +++ b/Taskfile.yml @@ -0,0 +1,674 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +version: '3' + +vars: + G: "" + N: "" + T: "" + P: "" + BASETAG: 0.1.0-incubating + KUBE: + sh: ./detect.sh + NS: "nuvolaris" + PREFIX: "" + TAG: + +dotenv: + - .env + +includes: + d: + taskfile: TaskfileDev.yml + t: + taskfile: TaskfileTest.yml + b: + taskfile: TaskfileBuild.yml + o: + taskfile: TaskfileOlaris.yml + kind: + taskfile: clusters/kind.yml + dir: clusters + eks: + taskfile: clusters/eks.yml + dir: clusters + aks: + taskfile: clusters/aks.yml + dir: clusters + lks: + taskfile: clusters/lks.yml + dir: clusters + gke: + taskfile: clusters/gke.yml + dir: clusters + mik: + taskfile: clusters/microk8s.yml + dir: clusters + k3s: + taskfile: clusters/k3s.yml + dir: clusters + osh: + taskfile: clusters/openshift.yml + dir: clusters + sys: + taskfile: actions/system.yml + dir: actions + talos: + taskfile: clusters/talos-gcp.yml + dir: clusters + +tasks: + + default: + - task: use + + setup: + deps: + - update-files-from-openwhisk + - ssh-key + - configure-env + cmds: + - mkdir -p ~/.kube ; touch ~/.kube/config + - poetry install + status: + - test -e $GOBIN/kopf + + env: env + + watch: watch kubectl -n {{.NS}} get nodes,pod,svc,pvc,ingress + watch-osh: watch kubectl -n {{.NS}} get nodes,pod,svc,pvc,route + watch-cert: watch kubectl -n {{.NS}} get ingress,ClusterIssuers,Certificates,CertificateRequests,Orders,Challenges + watch-pod: watch kubectl -n {{.NS}} get po,job --no-headers + + logs: > + rm -f nuvolaris-operator.log ; + kubectl -n nuvolaris logs pod/nuvolaris-operator -f | tee nuvolaris-operator.log + + cli: + - task: d:cli + + run: + - task: sys:prepare + - task: t:permission + - task: d:run + + irun: + - task: instance + - task: d:run + + permission: + - task: t:permission + + operator: + - task: t:operator + + instance: + - task: t:instance + + instance-wfx: + - task: d:instance-wfx + + instance-and-log: + - task: d:instance + - kubectl -n nuvolaris logs pod/nuvolaris-operator -f + + mongo: + - task: t:mongo + + minio: + - task: t:minio + + postgres: + - task: t:postgres + + minimal: + #- task: permission + - task: operator + - task instance WHISK=minimal + - task: config + - task: hello + + all: + - task: permission + - task: operator + - task: instance + - task: config + - task: hello + - task: redis + - task: mongo + - task: minio + - task: postgres + + config: + - task: t:config + + hello: + - task: t:hello + + redis: + - task: t:redis + + workflow: + - kubectl -n nuvolaris apply -f tests/workflow-test.yaml + + defin: + - task: d:defin + + # old., to be removed + image-tag: + - git tag -d $(git tag) + - git tag -f {{.P}}{{.BASETAG}}.$(date +%y%m%d%H%M) + - env PAGER= git tag + + # configure env + configure-env: + cmds: + - | + if ! test -e .env + then echo "please copy .env.dist in .env and add the keys" + fi + + # update configuration files from openwhisk source + update-files-from-openwhisk: + ignore_error: true + cmds: + - | + cp -v ../nuvolaris/nuvolaris-controller/openwhisk/ansible/files/*.json nuvolaris/files + cp -v ../nuvolaris/nuvolaris-controller/openwhisk/bin/wskadmin tools/cli/wsk/wskadmin.py + cp -v ../nuvolaris/nuvolaris-controller/openwhisk/tools/admin/*.py tools/cli/wsk + sources: + - ../nuvolaris/nuvolaris-controller/openwhisk/ansible/files/*.json + - ../nuvolaris/nuvolaris-controller/openwhisk/bin/wskadmin + - ../nuvolaris/nuvolaris-controller/openwhisk/tools/admin/*.py + generates: + - nuvolaris/files/*.json + - nuvolaris/tools/cli/wsk/*.py + + # generate ssh keys + ssh-key: + cmds: + - test -f clusters/id_rsa || ssh-keygen -b 2048 -t rsa -f clusters/id_rsa -q -N "" + - ssh-keygen -y -f clusters/id_rsa >clusters/id_rsa.pub + + clean: + cmds: + - cmd: kubectl -n nuvolaris delete wsku --all + ignore_error: true + - cmd: kubectl -n nuvolaris delete kubegres --timeout=60s + ignore_error: true + - cmd: kubectl -n nuvolaris delete milvus --timeout=60s + ignore_error: true + - cmd: kubectl -n nuvolaris delete wsk/controller --grace-period=0 --timeout=5s + ignore_error: true + - cmd: task defin + ignore_error: true + - cmd: kubectl -n nuvolaris delete all --all --grace-period=0 + ignore_error: true + - cmd: kubectl -n nuvolaris delete pvc --all --grace-period=0 + ignore_error: true + - cmd: kubectl -n nuvolaris delete ing --all --grace-period=0 + ignore_error: true + - cmd: kubectl -n nuvolaris delete cm/config --grace-period=0 + ignore_error: true + - cmd: kubectl delete clusterissuers/letsencrypt-issuer + ignore_error: true + + + utest: + cmds: + - | + for test in nuvolaris/{{.T}}*.py + do echo "*** [{{.KUBE}}] $test" + poetry run python3 -m doctest $test {{.CLI_ARGS}} + done + silent: true + + iclean: rm -f deploy/*/kustomization.yaml deploy/*/__* deploy/*/*_generated.yaml + + itest: + cmds: + - task: iclean + - | + kubectl apply -f deploy/nuvolaris-permissions + rm -f _failed.txt + for test in tests/{{.T}}*.ipy tests/{{.KUBE}}/{{.T}}*.ipy + do + if test -e "$test" + then echo "*** [{{.KUBE}}] $test" + rm -f deploy/*/kustomization.yaml deploy/*/__* + if poetry run ipython $test {{.CLI_ARGS}} + then echo "OK: $test" + else echo "FAIL: $test" + echo $test >>_failed.txt + fi + fi + done + if test -e _failed.txt + then echo "*** FAILED TESTS:" + cat _failed.txt + fi + silent: true + + dtest: + cmds: + - task: permission + - task: operator + - task: instance + - task: actions + + actions: + - task: t:config + - task: t:hello + - task: t:redis + - task: t:echo + + test: + - task: clean + - task: utest + - task: itest + - task: dtest + + debug: + - poetry run ipython profile create + - cp test_profile.ipy ~/.ipython/profile_default/startup/ + - task: uitest + + all-kubes: + cmds: + - |- + if test -z "{{.CLI_ARGS}}" + then echo 'use "task all-kubes -- runs the target against all the available kubes' + else for cfg in clusters/*.kubeconfig + do /usr/games/cowsay -f duck $(basename $cfg .kubeconfig) 2>/dev/null + cp -v $cfg ~/.kube/config >/dev/null + task {{.CLI_ARGS}} + done + fi + silent: true + + kube-test: + cmds: + - task dtest 2>/dev/null >/dev/null + + use: + cmds: + - |- + if test -z "{{.N}}" + then echo "*** current: {{.KUBE}}" + ls clusters/*.kubeconfig | sort | awk '{ print NR, $0 }' + echo "*** select with 'task #'" + else CFG="$(ls -1 clusters/*.kubeconfig | tail +{{.N}} | head -1)" + cp $CFG ~/.kube/config + echo "cluster: $(./detect.sh)" + kubectl get nodes + fi + silent: true + + build-and-load: + - task: b:build-and-load + + build-and-push: + - task: b:build-and-push + + buildx-and-push: + - task: b:buildx-and-push + + docker-login: + - task: b:docker-login + + # openserverless-operator section + tag: + silent: true + desc: generate a new tag based on the current time + cmds: + - git tag -d $(git tag) || true + - git tag -f {{.BASETAG}}.$(date +%y%m%d%H%M) + - env PAGER= git tag + + install-registry: + desc: install a local registry + cmds: + - | + if [ -d /etc/rancher/k3s ]; then + echo "k3s detected: updating registries.yaml"; + sudo cp registries.yaml /etc/rancher/k3s/registries.yaml; + sudo systemctl restart k3s; + else + echo "k3s not detected: skipping k3s registries config"; + fi + - | + set -e + if helm repo add twuni https://helm.twun.io && \ + helm install docker-registry twuni/docker-registry \ + --namespace kube-system \ + --set image.tag=2.8.3 \ + --set service.type=ClusterIP \ + --set service.port=5000; then + echo "Helm registry installed" + else + echo "Helm registry failed, applying fallback manifests..." + kubectl -n kube-system apply -f deploy/kube-system-registry/registry-deploy.yaml + kubectl -n kube-system apply -f deploy/kube-system-registry/registry-svc.yaml + kubectl -n kube-system rollout status deployment/docker-registry --timeout=180s || true + fi + # Espone anche NodePort per pull da runtime host + kubectl -n kube-system apply -f deploy/kube-system-registry/registry-nodeport.yaml || true + status: + - kubectl -n kube-system get po -l app=docker-registry | grep docker-registry + + tag-commit-push: + desc: tag, commit and push to your default upstream repo (see README.md) + cmds: + - task: tag + - git commit -m "{{.TAG}}" -a || true + - git push || true + + kaniko-build: + desc: build in current kubernetes with Kaniko + env: + TAG: + sh: git describe --tags --abbrev=0 2>/dev/null || echo latest + cmds: + - test -n "$GITHUB_USER" || true "did you configure your .env?" + - envsubst _kaniko-build.yaml + - kubectl -n default delete job/kaniko-build || true + - kubectl -n default apply -f _kaniko-build.yaml + - kubectl -n default wait --for=condition=complete job/kaniko-build --timeout=600s + + kaniko-build-patched: + desc: build (patched Dockerfile) con initContainer per whisk-system fix + env: + TAG: + sh: git describe --tags --abbrev=0 2>/dev/null || echo latest + GITHUB_USER: + sh: if [ -n "$GITHUB_USER" ]; then echo "$GITHUB_USER"; else whoami; fi + cmds: + - envsubst < kaniko-build-patched.yaml > _kaniko-build-patched.yaml + - kubectl -n default delete job/kaniko-build-patched || true + - kubectl -n default apply -f _kaniko-build-patched.yaml + - kubectl -n default logs -f $(kubectl -n default get pods -l app=kaniko-build-patched -o jsonpath='{.items[0].metadata.name}') --container kaniko || true + - kubectl -n default wait --for=condition=complete job/kaniko-build-patched --timeout=900s || (echo "Kaniko patched build failed" && exit 1) + + build-logs: + desc: show logs of the latest build + cmds: + - kubectl -n default logs -l app=kaniko-build -f + + build: + desc: build the operator image + cmds: + - task: install-registry + - task: tag-commit-push + - task: kaniko-build + - task: build-logs + + deploy: + desc: deploy the current operator image + env: + TAG: + sh: git describe --tags --abbrev=0 2>/dev/null || echo latest + cmds: + - kubectl -n nuvolaris apply -f deploy/nuvolaris-permissions || true + - envsubst _operator-deploy.yaml + - kubectl -n nuvolaris apply -f _operator-deploy.yaml + - kubectl -n nuvolaris wait --for=condition=Ready pod/nuvolaris-operator --timeout=180s || true + - kubectl -n nuvolaris get pod nuvolaris-operator -o wide || true + + # --- Spark standard flow (come gli altri operatori) --- + spark-standard: + desc: build (kaniko) e deploy operatore con Spark + istanza whisk-spark + env: + GITHUB_USER: + sh: if [ -n "$GITHUB_USER" ]; then echo "$GITHUB_USER"; else whoami; fi + cmds: + - | + if [ -n "$GHCR_USER" ] && [ -n "$GHCR_TOKEN" ]; then + echo "Rilevate credenziali GHCR: uso pipeline ghcr (build+push)"; + export GHCR_USER GHCR_TOKEN + task spark-all-ghcr || { echo "Pipeline GHCR fallita"; exit 1; } + echo "Deployment GHCR completato, procedo con istanza Spark"; + WHISK=whisk-spark task t:instance; + exit 0; # Evita esecuzione ramo interno registry + else + echo "Credenziali GHCR non presenti: uso percorso registry interno/kind"; + fi + - | + if [ "$(./detect.sh)" = "kind" ]; then + echo "Cluster kind rilevato"; + if command -v kind >/dev/null 2>&1; then + echo "kind CLI presente: uso build-and-load"; + if ! task build-and-load MY_OPERATOR_IMAGE=docker-registry.kube-system.svc.cluster.local:5000/$GITHUB_USER/openserverless-operator; then + echo "kind load fallita: fallback a Kaniko"; + task install-registry || exit 1; + task tag-commit-push || true; + task kaniko-build-patched || exit 1; + fi + else + echo "kind CLI assente: provo installazione veloce"; + curl -fsSL -o kind https://kind.sigs.k8s.io/dl/v0.23.0/kind-linux-amd64 && chmod +x kind && sudo mv kind /usr/local/bin/kind 2>/dev/null || mv kind "$HOME/.local/bin/kind" 2>/dev/null || true; + if command -v kind >/dev/null 2>&1; then + echo "Installazione kind riuscita: eseguo load"; + if ! task build-and-load MY_OPERATOR_IMAGE=docker-registry.kube-system.svc.cluster.local:5000/$GITHUB_USER/openserverless-operator; then + echo "kind load fallita dopo install: fallback a Kaniko"; + task install-registry || exit 1; + task tag-commit-push || true; + task kaniko-build-patched || exit 1; + fi + else + echo "Installazione kind fallita: fallback a Kaniko"; + task install-registry || exit 1; + task tag-commit-push || true; + task kaniko-build-patched || exit 1; + fi + fi + else + echo "Cluster $(./detect.sh): uso Kaniko"; + task install-registry || exit 1; + task tag-commit-push || true; + task kaniko-build-patched || exit 1; + fi + - kubectl -n nuvolaris apply -f deploy/nuvolaris-permissions || true + - | + TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo latest) + REGISTRY_HOST="docker-registry.kube-system.svc.cluster.local:5000" + if [ "$(./detect.sh)" != "kind" ]; then + if kubectl -n kube-system get svc docker-registry-nodeport >/dev/null 2>&1; then + NODE_IP=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}') + if [ -n "$NODE_IP" ]; then + REGISTRY_HOST="${NODE_IP}:30500" + echo "Uso NodePort registry $REGISTRY_HOST per image pull" + fi + fi + fi + export TAG + export MY_OPERATOR_IMAGE=${REGISTRY_HOST}/$GITHUB_USER/openserverless-operator + # Imposta immagine Spark: immagine interna se no GHCR + if [ -n "$GHCR_USER" ] && [ -n "$GHCR_TOKEN" ]; then + export MY_OPERATOR_IMAGE_SPARK="ghcr.io/$GHCR_USER/openserverless-operator:spark-dev" + else + export MY_OPERATOR_IMAGE_SPARK="${REGISTRY_HOST}/$GITHUB_USER/openserverless-operator:$TAG" + fi + envsubst < deploy/nuvolaris-operator/operator-pod-spark.yaml > _operator-pod-spark.yaml + - kubectl -n nuvolaris delete pod nuvolaris-operator-spark --ignore-not-found + - kubectl -n nuvolaris apply -f _operator-pod-spark.yaml + - kubectl -n nuvolaris wait --for=condition=Ready pod/nuvolaris-operator-spark --timeout=180s || true + - kubectl -n nuvolaris get pod nuvolaris-operator-spark -o wide || true + - | + WHISK=whisk-spark task t:instance + # --- Spark Operator (GHCR) --- + spark-login-ghcr: + desc: login a GHCR (richiede GHCR_USER e GHCR_TOKEN in .env) + cmds: + - test -n "$GHCR_USER" || (echo "GHCR_USER mancante" && exit 1) + - test -n "$GHCR_TOKEN" || (echo "GHCR_TOKEN mancante" && exit 1) + - echo "$GHCR_TOKEN" | podman login ghcr.io -u "$GHCR_USER" --password-stdin + spark-build-ghcr: + desc: build immagine operatore con Spark (tag ghcr.io/$GHCR_USER/openserverless-operator:spark-dev) + cmds: + - test -n "$GHCR_USER" || (echo "GHCR_USER mancante" && exit 1) + - podman build -t ghcr.io/$GHCR_USER/openserverless-operator:spark-dev -f Dockerfile . + spark-push-ghcr: + desc: push immagine operatore Spark su GHCR + deps: [spark-login-ghcr] + cmds: + - test -n "$GHCR_USER" || (echo "GHCR_USER mancante" && exit 1) + - podman push ghcr.io/$GHCR_USER/openserverless-operator:spark-dev + spark-deploy-ghcr: + desc: deploy pod operatore Spark usando manifest parametrizzato + env: + GHCR_USER: ${GHCR_USER} + cmds: + - test -n "$GHCR_USER" || (echo "GHCR_USER mancante" && exit 1) + - envsubst < deploy/nuvolaris-operator/operator-pod-spark.yaml > _operator-pod-spark.yaml + - kubectl -n nuvolaris apply -f deploy/nuvolaris-permissions || true + - kubectl -n nuvolaris apply -f _operator-pod-spark.yaml + - kubectl -n nuvolaris wait --for=condition=Ready pod/nuvolaris-operator-spark --timeout=180s || true + - kubectl -n nuvolaris get pod nuvolaris-operator-spark -o wide || true + spark-all-ghcr: + desc: build+push+deploy operatore Spark (GHCR) + cmds: + - task: spark-build-ghcr + - task: spark-push-ghcr + - task: spark-deploy-ghcr + + # --- SparkJob CRD Testing --- + sparkjob-deploy-crd: + desc: deploy SparkJob CRD + cmds: + - kubectl apply -f deploy/nuvolaris-permissions/sparkjob-crd.yaml + - kubectl get crd sparkjobs.nuvolaris.org || true + + sparkjob-test-examples: + desc: test SparkJob examples (PySpark pi calculation) + deps: [sparkjob-deploy-crd] + cmds: + - echo "=== Testing PySpark Example ===" + - kubectl apply -f tests/sparkjob-examples.yaml + - echo "Waiting for SparkJob to complete..." + - | + for i in {1..30}; do + STATUS=$(kubectl get sparkjob pyspark-example -o jsonpath='{.status.phase}' 2>/dev/null || echo "NotFound") + echo "Status: $STATUS (attempt $i/30)" + if [ "$STATUS" = "Succeeded" ] || [ "$STATUS" = "Failed" ]; then + break + fi + sleep 10 + done + - echo "=== SparkJob Status ===" + - kubectl get sparkjob pyspark-example -o yaml | grep -A 20 "status:" || true + - echo "=== Driver Job Status ===" + - kubectl get job pyspark-example-driver -o wide || true + - echo "=== Driver Pod Logs ===" + - kubectl logs job/pyspark-example-driver --tail=50 || true + + sparkjob-test-wordcount: + desc: test SparkJob WordCount example (inline PySpark code) + deps: [sparkjob-deploy-crd] + cmds: + - echo "=== Testing PySpark WordCount ===" + - | + cat </dev/null || echo "NotFound") + echo "Status: $STATUS (attempt $i/20)" + if [ "$STATUS" = "Succeeded" ] || [ "$STATUS" = "Failed" ]; then + break + fi + sleep 10 + done + - echo "=== WordCount Results ===" + - kubectl logs job/pyspark-wordcount-test-driver --tail=30 || true + + sparkjob-clean: + desc: cleanup SparkJob test resources + cmds: + - kubectl delete sparkjob pyspark-example --ignore-not-found + - kubectl delete sparkjob pyspark-wordcount --ignore-not-found + - kubectl delete sparkjob pyspark-wordcount-test --ignore-not-found + - kubectl delete sparkjob scala-sparkpi --ignore-not-found + - kubectl delete job pyspark-example-driver --ignore-not-found + - kubectl delete job pyspark-wordcount-driver --ignore-not-found + - kubectl delete job pyspark-wordcount-test-driver --ignore-not-found + - kubectl delete job scala-sparkpi-driver --ignore-not-found + - echo "SparkJob test resources cleaned up" + + sparkjob-status: + desc: show status of all SparkJobs and related resources + cmds: + - echo "=== SparkJobs ===" + - kubectl get sparkjobs -o wide || echo "No SparkJobs found" + - echo "=== Driver Jobs ===" + - kubectl get jobs -l app=spark,component=driver || echo "No driver jobs found" + - echo "=== Driver Pods ===" + - kubectl get pods -l app=spark,component=driver || echo "No driver pods found" + - echo "=== Spark Cluster Status ===" + - kubectl get pods -l app=spark | grep -E "(master|worker|history)" || echo "Spark cluster not running" + + + shell: + - kubectl -n nuvolaris exec --stdin --tty {{.POD}} -- /bin/bash + + 1: task use N=1 + 2: task use N=2 + 3: task use N=3 + 4: task use N=4 + 5: task use N=5 + 6: task use N=6 + 7: task use N=7 + 8: task use N=8 + 9: task use N=9 + diff --git a/TaskfileBuild.yml b/TaskfileBuild.yml new file mode 100644 index 00000000..8a9fe2b3 --- /dev/null +++ b/TaskfileBuild.yml @@ -0,0 +1,63 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +version: '3' + +vars: + # taken from the tag + OPERATOR_TAG: + sh: git describe --tags --abbrev=0 2>/dev/null || git rev-parse --short HEAD + # taken from the Dockerfile - ovverdable with MY_OPERATOR_IMAGE + OPERATOR_IMAGE: + sh: awk -F= '/ARG OPERATOR_IMAGE_DEFAULT=/ { print $2 ; exit }' Dockerfile + +tasks: + image: + cmds: + - echo "Operator=${MY_OPERATOR_IMAGE:-{{.OPERATOR_IMAGE}}}:{{.OPERATOR_TAG}}" + silent: true + + docker-login: > + echo $GITHUB_TOKEN | docker login ghcr.io -u $GITHUB_USER --password-stdin + + build: + - > + docker build . + -t ${MY_OPERATOR_IMAGE:-{{.OPERATOR_IMAGE}}}:{{.OPERATOR_TAG}} + --build-arg OPERATOR_IMAGE_DEFAULT=${MY_OPERATOR_IMAGE:-{{.OPERATOR_IMAGE}}} + --build-arg OPERATOR_TAG_DEFAULT={{.OPERATOR_TAG}} --load + + build-and-push: + - task: docker-login + - task: build + - docker push ${MY_OPERATOR_IMAGE:-{{.OPERATOR_IMAGE}}}:{{.OPERATOR_TAG}} + + build-and-load: + - task: build + - > + kind load docker-image + ${MY_OPERATOR_IMAGE:-{{.OPERATOR_IMAGE}}}:{{.OPERATOR_TAG}} + --name=$(kind get clusters | head -1) + + buildx-and-push: + - > + docker buildx build + --platform linux/amd64,linux/arm64 + -t ${MY_OPERATOR_IMAGE:-{{.OPERATOR_IMAGE}}}:{{.OPERATOR_TAG}} + --build-arg OPERATOR_IMAGE_DEFAULT=${MY_OPERATOR_IMAGE:-{{.OPERATOR_IMAGE}}} + --build-arg OPERATOR_TAG_DEFAULT={{.OPERATOR_TAG}} + . --push diff --git a/TaskfileDev.yml b/TaskfileDev.yml new file mode 100644 index 00000000..17f05c47 --- /dev/null +++ b/TaskfileDev.yml @@ -0,0 +1,104 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +version: '3' + +vars: + WHISK: '{{default "whisk" .WHISK}}' + CONFIG: "tests/{{.KUBE}}/{{.WHISK}}.yaml" + # taken from the tag + OPERATOR_TAG: + sh: git describe --tags --abbrev=0 2>/dev/null || git rev-parse --short HEAD + # taken from the Dockerfile - ovveridable with MY_OPERATOR_IMAGE + OPERATOR_IMAGE: + sh: awk -F= '/ARG OPERATOR_IMAGE_DEFAULT=/ { print $2 ; exit }' Dockerfile + # taken from the Dockerfile + CONTROLLER_TAG: + sh: awk -F= '/ENV CONTROLLER_TAG=/ { print $2 ; exit }' Dockerfile + CONTROLLER_IMAGE: + sh: awk -F= '/ENV CONTROLLER_IMAGE=/ { print $2 ; exit }' Dockerfile + # taken from the Dockerfile + INVOKER_TAG: + sh: awk -F= '/ENV INVOKER_TAG=/ { print $2 ; exit }' Dockerfile + INVOKER_IMAGE: + sh: awk -F= '/ENV INVOKER_IMAGE=/ { print $2 ; exit }' Dockerfile + APIKUBE: + sh: kubectl config view -o json | jq -r '.clusters[0].cluster.server' + +env: + APIHOST: + sh: echo {{.APIKUBE}} | sed -e 's|^.*//\(.*\):.*$|\1|' + APIHOST_OSH: + sh: echo {{.APIKUBE}} | sed -e 's|^.*//\(.*\):.*$|\1|' | sed -e 's/^api./nuvolaris.apps./' + +tasks: + + env: env + + cli: + cmds: + - env OPERATOR_IMAGE="${MY_OPERATOR_IMAGE:-{{.OPERATOR_IMAGE}}}" poetry run ipython -i profile.ipy + env: + CONTROLLER_IMAGE: "{{.CONTROLLER_IMAGE}}" + CONTROLLER_TAG: "{{.CONTROLLER_TAG}}" + OPERATOR_TAG: "{{.OPERATOR_TAG}}" + + run: + cmds: + # note that the .env is loaded after the env parameters + - env OPERATOR_IMAGE="${MY_OPERATOR_IMAGE:-{{.OPERATOR_IMAGE}}}" ./run.sh + env: + CONTROLLER_IMAGE: "{{.CONTROLLER_IMAGE}}" + CONTROLLER_TAG: "{{.CONTROLLER_TAG}}" + OPERATOR_TAG: "{{.OPERATOR_TAG}}" + COUCHDB_SERVICE_HOST: "localhost" + MINIO_API_HOST: "localhost" + OW_CONTROLLER_HOST : "localhost" + OW_CONTROLLER_PORT : 3233 + INVOKER_IMAGE: "{{.INVOKER_IMAGE}}" + INVOKER_TAG: "{{.INVOKER_TAG}}" + + instance: + - envsubst <{{.CONFIG}} | kubectl -n nuvolaris apply -f - + + show: + - echo "*** {{.CONFIG}}" + - envsubst <{{.CONFIG}} + + destroy: kubectl -n nuvolaris delete wsk/controller + + couchdb: + - rm -f deploy/*/kustomization.yaml deploy/*/__* + - task: permission + - kubectl apply -f deploy/couchdb + + couchdb-forward: + - kubectl -n nuvolaris port-forward svc/couchdb 5985:5984 + + mongodb: + - kubectl apply -f deploy/mongodb-operator + - kubectl apply -f deploy/mongodb + + defin: > + kubectl -n nuvolaris patch wsk/controller --type=merge --patch '{"metadata": {"finalizers":[] } }' + + defin2: kubectl -n nuvolaris get wsk/controller -o yaml | grep -v Final | kubectl apply -f - + + killpy: ps auwwx | grep python | awk '{print $2}' | xargs kill -9 + + enter: kubectl exec -ti nuvolaris-operator -- bash + diff --git a/TaskfileOlaris.yml b/TaskfileOlaris.yml new file mode 100644 index 00000000..6e80ad4e --- /dev/null +++ b/TaskfileOlaris.yml @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +version: '3' + +env: + DP: deploy/nuvolaris-permissions + OK: olaris/kubernetes + +tasks: + copy-yaml: + - cp -v $DP/nuvolaris-common.yaml $DP/openwhisk-runtimes-cm.yaml $OK/common + - cp -v $DP/openwhisk-core-roles.yaml $DP/operator-clusterroles.yaml $DP/operator-roles.yaml $OK/roles + - cp -v $DP/whisk-crd.yaml $DP/whisk-user-crd.yaml $DP/workflows-crd.yaml $OK/crds + \ No newline at end of file diff --git a/TaskfileTest.yml b/TaskfileTest.yml new file mode 100644 index 00000000..0ee165b2 --- /dev/null +++ b/TaskfileTest.yml @@ -0,0 +1,202 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +version: '3' + +vars: + KUBE: + sh: ./detect.sh + # taken from the tag + OPERATOR_TAG: + sh: git describe --tags --abbrev=0 2>/dev/null || git rev-parse --short HEAD + # taken from the Dockerfile - ovverdable with MY_OPERATOR_IMAGE + OPERATOR_IMAGE: + sh: awk -F= '/ARG OPERATOR_IMAGE_DEFAULT=/ { print $2 ; exit }' Dockerfile + + WHISK: '{{default "whisk" .WHISK}}' + CONFIG: "tests/{{.KUBE}}/{{.WHISK}}.yaml" + REDIS_URI: "redis://s0meP%40ass4@redis:6379" + REDIS_PASSWORD: s0meP@ass4 + MINIO_HOST: "nuvolaris-minio" + MINIO_PORT: 9000 + MINIO_USER: "minioadmin" + MINIO_PWD : "minioadmin" + APIKUBE: + sh: kubectl config view -o json | jq -r '.clusters[0].cluster.server' + + T: "" + +env: + APIHOST: + sh: echo {{.APIKUBE}} | sed -e 's|^.*//\(.*\):.*$|\1|' + APIHOST_OSH: + sh: echo {{.APIKUBE}} | sed -e 's|^.*//\(.*\):.*$|\1|' | sed -e 's/^api./nuvolaris.apps./' + + +tasks: + + kustomization: + cmds: + - | + cat <<__EOF__ >deploy/nuvolaris-operator/kustomization.yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + images: + - name: ghcr.io/nuvolaris/nuvolaris-operator:latest + newName: ${MY_OPERATOR_IMAGE:-{{.OPERATOR_IMAGE}}} + newTag: {{.OPERATOR_TAG}} + resources: + - operator-pod.yaml + __EOF__ + + permission: + - kubectl apply -f deploy/nuvolaris-permissions + + operator: + - task: kustomization + - kubectl apply -k deploy/nuvolaris-operator + - | + while ! kubectl -n nuvolaris wait --for=condition=ready pod/nuvolaris-operator + do echo still waiting... + done + + destroy-operator: + - task: kustomization + - kubectl delete -k deploy/nuvolaris-operator + + instance: + silent: false + cmds: + - kubectl config set-context --current --namespace nuvolaris + - | + FILE="tests/{{.KUBE}}/{{.WHISK}}.yaml"; + if [ ! -f "$FILE" ]; then + echo "Manifest $FILE non trovato, fallback a tests/{{.KUBE}}/whisk.yaml"; + FILE="tests/{{.KUBE}}/whisk.yaml"; + fi + cat "$FILE" | envsubst | kubectl apply -f - + - | + while ! kubectl -n nuvolaris wait --for=condition=ready pod/couchdb-0 2>/dev/null + do sleep 5 ; echo $((N++)) waiting couchdb... + done + - | + while ! kubectl -n nuvolaris wait --for=condition=complete job/couchdb-init 2>/dev/null + do sleep 5 ; echo $((N++)) waiting couchdb init... + done + - | + N=1 + while ! kubectl -n nuvolaris wait --for=condition=ready pod/controller-0 2>/dev/null + do sleep 5 ; echo $((N++)) waiting controller... + done + + destroy-instance: + - (sleep 5 ; task defin) & + - kubectl -n nuvolaris delete wsk/controller + + destroy: + - task: destroy-instance + - task: destroy-operator + + cleanup: kubectl -n nuvolaris delete pvc --all + + config: |- + rm -f ~/.wskprops + while true + do APIHOST=$(kubectl -n nuvolaris get cm/config -o yaml | awk '/apihost:/ {print $2}') + if [[ `echo $APIHOST | grep pending` || -z "$APIHOST" ]]; + then sleep 5 ; echo "$((N++)) apihost still pending..." + else break + fi + done + echo "*** $APIHOST ***" + AUTH=$(kubectl -n nuvolaris get wsk/controller -o jsonpath='{.spec.openwhisk.namespaces.nuvolaris}') + echo $AUTH + echo ops -wsk property set --apihost $APIHOST --auth $AUTH + ops -wsk property set --apihost $APIHOST --auth $AUTH + while ! ops action list + do echo $(( N++)) "waiting for the load balancer to be ready..." ; sleep 10 + done + + hello: + - ops -wsk -i action update hello tests/hello.js --web=true + - ops -wsk -i action invoke hello -r | grep "hello" + - | + URL=$(ops -wsk -i action get hello --url | tail +2) + curl -skL $URL | grep hello + + redis: + cmds: + - ops -wsk -i package update redis -p redis_url "{{.REDIS_NUV_URL}}" -p redis_prefix "{{.REDIS_NUV_PREFIX}}" -p password "{{.REDIS_PASSWORD}}" + - ops -wsk -i action update redis/ping tests/ping.js + - ops -wsk -i action invoke redis/ping -r | grep "PONG" + - ops -wsk -i action update redis/redis tests/redis.js + - ops -wsk -i action invoke redis/redis -r | grep "world" + vars: + REDIS_NUV_PREFIX: + sh: kubectl -n nuvolaris get cm/config -o jsonpath='{.metadata.annotations.redis_prefix}' + REDIS_NUV_URL: + sh: kubectl -n nuvolaris get cm/config -o jsonpath='{.metadata.annotations.redis_url}' + + echo: + - ops -wsk -i action update echo tests/echo.js -a provide-api-key true + - ops -wsk -i action invoke echo -r | grep "__OW_API_KEY" + - ops -wsk -i action invoke echo -r | grep "__OW_API_HOST" + + api: + - ops -wsk -i action update api tests/api.js -a provide-api-key true + - ops -wsk -i action invoke api -r | grep '"api"' + + mongo: + - | + MONGODB_URL=$(kubectl -n nuvolaris get cm/config -o jsonpath='{.metadata.annotations.mongodb_url}') + wsk -i package update mongo -p dburi "$MONGODB_URL" + - ops -wsk -i action update mongo/mongo tests/mongo.js + - ops -wsk -i action invoke mongo/mongo -r | grep "hello" + + minio: + - ops -wsk -i package update minio -p minio_host {{.MINIO_HOST}} -p minio_port {{.MINIO_PORT}} -p minio_user {{.MINIO_USER}} -p minio_pwd {{.MINIO_PWD}} + - ops -wsk -i action update minio/minio tests/minio.js + - ops -wsk -i action invoke minio/minio -r + + mongo2: + - ops -wsk -i project deploy --manifest tests/mongo.yaml + + postgres: + - | + PG_URL=$(kubectl -n nuvolaris get cm/config -o jsonpath='{.metadata.annotations.postgres_url}') + ops -wsk -i package update postgres -p dburi "$PG_URL" + - ops -wsk -i action update postgres/postgres tests/postgres.js + - ops -wsk -i action invoke postgres/postgres -r | grep "Nuvolaris Postgres is up and running!" + + minio2: + silent: true + desc: minio test + cmds: + - | + MINIO_ACCESS_KEY=$(kubectl -n nuvolaris get cm/config -o jsonpath='{.metadata.annotations.s3_access_key}') + MINIO_SECRET_KEY=$(kubectl -n nuvolaris get cm/config -o jsonpath='{.metadata.annotations.s3_secret_key}') + MINIO_HOST=$(kubectl -n nuvolaris get cm/config -o jsonpath='{.metadata.annotations.s3_host}') + MINIO_PORT=$(kubectl -n nuvolaris get cm/config -o jsonpath='{.metadata.annotations.s3_port}') + MINIO_BUCKET_DATA=$(kubectl -n nuvolaris get cm/config -o jsonpath='{.metadata.annotations.s3_bucket_data}') + MINIO_BUCKET_WEB=$(kubectl -n nuvolaris get cm/config -o jsonpath='{.metadata.annotations.s3_bucket_static}') + ops -wsk -i action update minio/minio-nuv tests/minio-nuv.js \ + -p minio_access "$MINIO_ACCESS_KEY" \ + -p minio_secret "$MINIO_SECRET_KEY" \ + -p minio_host "$MINIO_HOST" \ + -p minio_port "$MINIO_PORT" \ + -p minio_data "$MINIO_BUCKET_DATA" + - ops -wsk -i action invoke minio/minio-nuv -r diff --git a/setup/kubernetes/whisk.yaml b/setup/kubernetes/whisk.yaml index 012dd26c..8d5e5065 100644 --- a/setup/kubernetes/whisk.yaml +++ b/setup/kubernetes/whisk.yaml @@ -28,21 +28,25 @@ spec: storageclass: ${OPERATOR_CONFIG_STORAGECLASS:-auto} provisioner: ${OPERATOR_CONFIG_STORAGEPROVISIONER:-auto} protocol: ${OPERATOR_CONFIG_HOSTPROTOCOL:-auto} + slim: ${OPERATOR_CONFIG_SLIM:-false} affinity: ${OPERATOR_CONFIG_AFFINITY:-false} tolerations: ${OPERATOR_CONFIG_TOLERATIONS:-false} + ingressclass: ${OPERATOR_CONFIG_INGRESSCLASS:-auto} + preload: + only-apache: ${OPERATOR_CONFIG_PRELOAD_ONLY_APACHE:-true} components: # start openwhisk controller openwhisk: true # start openwhisk invoker - invoker: true + invoker: ${OPERATOR_COMPONENT_INVOKER:-true} # start couchdb couchdb: true # start zookeeper - zookeeper: true + zookeeper: ${OPERATOR_COMPONENT_ZOOKEEPER:-true} # start kafka - kafka: true + kafka: ${OPERATOR_COMPONENT_KAFKA:-true} # prometheus monitoring enabled or not - monitoring: ${OPERATOR_COMPONENT_PROMETHEUS:-false} + monitoring: ${OPERATOR_COMPONENT_PROMETHEUS:-false} # start mongodb mongodb: ${OPERATOR_COMPONENT_MONGODB:-false} # start redis @@ -54,13 +58,23 @@ spec: # minio enabled or not minio: ${OPERATOR_COMPONENT_MINIO:-false} # minio static enabled or not - static: ${OPERATOR_COMPONENT_STATIC:-false} + static: ${OPERATOR_COMPONENT_STATIC:-false} # postgres enabled or not postgres: ${OPERATOR_COMPONENT_POSTGRES:-false} # quota enabled or not quota: ${OPERATOR_COMPONENT_QUOTA:-false} + # etcd enabled or not + etcd: ${OPERATOR_COMPONENT_ETCD:-false} + # MILVUS enabled or not + milvus: ${OPERATOR_COMPONENT_MILVUS:-false} + # REGISTRY enabled or not + registry: ${OPERATOR_COMPONENT_REGISTRY:-false} + # REGISTRY enabled or not + seaweedfs: ${OPERATOR_COMPONENT_SEAWEEDFS:-false} + # SPARK enabled or not + spark: ${OPERATOR_COMPONENT_SPARK:-false} tls: - acme-registered-email: ${OPERATOR_CONFIG_TLSEMAIL} + acme-registered-email: ${OPERATOR_CONFIG_TLSEMAIL:-no-reply@email.com} acme-server-url: https://acme-v02.api.letsencrypt.org/directory openwhisk: namespaces: @@ -98,16 +112,16 @@ spec: scheduler: schedule: "* * * * *" quota: - schedule: "*/10 * * * *" + schedule: "*/10 * * * *" configs: limits: activations: - max_allowed_payload: ${OPENWHISK_ACTIVATION_MAX_ALLOWED_PAYLOAD:-1048576} + max_allowed_payload: ${OPENWHISK_ACTIVATION_MAX_ALLOWED_PAYLOAD:-1048576} actions: sequence-maxLength: ${OPENWHISK_ACTION_SEQUENCE_MAX_LENGTH:-50} invokes-perMinute: ${OPENWHISK_ACTION_INVOKE_PER_MINUTE:-999} invokes-concurrent: ${OPENWHISK_ACTION_INVOKE_CONCURRENT:-250} - triggers: + triggers: fires-perMinute: ${OPENWHISK_TRIGGER_PER_MINUTE:-999} time: limit-min: "${OPENWHISK_TIME_LIMIT_MIN:-100ms}" @@ -118,8 +132,8 @@ spec: limit-std: "${OPENWHISK_ACTION_MEMORY_LIMIT_STD:-256m}" limit-max: "${OPENWHISK_ACTION_MEMORY_LIMIT_MAX:-2048m}" loadbalancer: - blackbox-fraction : "10%" - timeout-factor: 2 + blackbox-fraction : "${OPENWHISK_LB_BLACKBOX_FRACTION:100%}" + timeout-factor: 2 controller: javaOpts: "$OPENWHISK_CONTROLLER_JAVA_OPTS" loggingLevel: "${OPENWHISK_CONTROLLER_LOGGINGLEVEL:-INFO}" @@ -145,7 +159,7 @@ spec: cpu-req: "500m" cpu-lim: "1" mem-req: "$OPENWHISK_INVOKER_RES_MIN_MEM" - mem-lim: "$OPENWHISK_INVOKER_RES_MAX_MEM" + mem-lim: "$OPENWHISK_INVOKER_RES_MAX_MEM" redis: persistence-enabled: ${REDIS_PERSISTENCE_ENABLED:-true} volume-size: ${STORAGE_SIZE_REDIS:-25} @@ -157,7 +171,7 @@ spec: mongodb: host: mongodb volume-size: ${STORAGE_SIZE_MONGODB:-50} - admin: + admin: user: whisk_admin password: $SECRET_MONGODB_ADMIN nuvolaris: @@ -174,14 +188,14 @@ spec: volume-size: ${STORAGE_SIZE_MINIO:-50} admin: user: minioadmin - password: $SECRET_MINIO_ADMIN + password: $SECRET_MINIO_ADMIN nuvolaris: user: nuvolaris password: $SECRET_MINIO_NUVOLARIS - postgres: + postgres: volume-size: ${STORAGE_SIZE_POSTGRES:-50} replicas: ${POSTGRES_CONFIG_REPLICAS:-2} - admin: + admin: password: $SECRET_POSTGRES_ADMIN replica-password: $SECRET_POSTGRES_REPLICA nuvolaris: @@ -196,15 +210,70 @@ spec: alert-manager: enabled: ${OPERATOR_COMPONENT_AM:-false} volume-size: ${STORAGE_SIZE_MONITORING:-30} - slack: + slack: enabled: ${OPERATOR_CONFIG_ALERTSLACK:-false} - default: true + default: true slack_channel_name: "$OPERATOR_CONFIG_SLACK_CHANNELNAME" slack_api_url: "$OPERATOR_CONFIG_SLACK_APIURL" - gmail: + gmail: enabled: ${OPERATOR_CONFIG_ALERTGMAIL:-false} default: false from: $OPERATOR_CONFIG_EMAIL_FROM to: $OPERATOR_CONFIG_EMAIL_TO username: $OPERATOR_CONFIG_GMAIL_USERNAME - password: $OPERATOR_CONFIG_GMAIL_PASSWORD + password: $OPERATOR_CONFIG_GMAIL_PASSWORD + etcd: + volume-size: ${STORAGE_SIZE_ETCD:-25} + replicas: ${ETCD_CONFIG_REPLICAS:-3} + auto-compaction-retention: "${ETCD_AUTO_COMPACTION_RETENTION:-1}" + quota-backend-bytes: ${ETCD_QUOTA_BACKEND_BYTES:-2147483648} + root: + password: $SECRET_ETCD_ROOT + milvus: + volume-size: + cluster: ${STORAGE_SIZE_MILVUS_CLUSTER:-20} + zookeeper: ${STORAGE_SIZE_MILVUS_ZOOKEEPER:-10} + journal: ${STORAGE_SIZE_MILVUS_PULSAR_JOURNAL:-25} + ledgers: ${STORAGE_SIZE_MILVUS_PULSAR_LEDGERS:-50} + replicas: ${MILVUS_CONFIG_REPLICAS:-1} + proxy: + max-role-num: ${PROXY_MILVUS_MAX_ROLE_NUM:-100} + max-user-num: ${PROXY_MILVUS_MAX_USER_NUM:-100} + root-coord: + max-database-num: ${ROOTCOORD_MILVUS_DATABASE_NUM:-64} + password: + root: $SECRET_MILVUS_ROOT + s3: $SECRET_MILVUS_S3 + etcd: $SECRET_ETCD_ROOT + nuvolaris: + password: $SECRET_MILVUS_NUVOLARIS + registry: + mode: ${REGISTRY_CONFIG_MODE:-internal} + volume-size: ${REGISTRY_CONFIG_VOLUME_SIZE:-50} + auth: + username: ${REGISTRY_CONFIG_USERNAME:-opsuser} + password: ${REGISTRY_CONFIG_SECRET_PUSH_PULL:-changeme-registry} + hostname: ${REGISTRY_CONFIG_HOSTNAME:-auto} + ingress: + enabled: ${REGISTRY_CONFIG_INGRESS_ENABLED:-false} + seaweedfs: + volume-size: ${STORAGE_SIZE_SEAWEEDFS:-60} + nuvolaris: + user: nuvolaris + password: ${SECRET_SEAWEEDFS_NUVOLARIS:-changeme-seaweedfs} + ingress: + s3-enabled: ${SEAWEEDFS_CONFIG_INGRESS_S3_ENABLED:-true} + s3-hostname: ${SEAWEEDFS_CONFIG_INGRESS_S3_HOSTNAME:-auto} + console-enabled: ${SEAWEEDFS_CONFIG_INGRESS_CONSOLE_ENABLED:-false} + console-hostname: ${SEAWEEDFS_CONFIG_INGRESS_CONSOLE_HOSTNAME:-auto} + spark: + enabled: ${OPERATOR_COMPONENT_SPARK:-false} + mode: standalone + image: apache/spark:3.5.0 + history: + enabled: ${SPARK_HISTORY_ENABLED:-true} + backend: s3a + s3a: + bucket: ${SPARK_HISTORY_BUCKET:-spark-history} + endpoint: http://minio.nuvolaris.svc.cluster.local:9000 + secretRef: nuvolaris-minio