E2E Matrix Tests (nested clusters) #4010
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # Copyright 2025 Flant JSC | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| name: E2E Matrix Tests (nested clusters) | |
| on: | |
| workflow_dispatch: | |
| schedule: | |
| - cron: "40 4 * * *" | |
| concurrency: | |
| group: "${{ github.workflow }}-${{ github.event.number || github.ref }}" | |
| cancel-in-progress: true | |
| defaults: | |
| run: | |
| shell: bash | |
| jobs: | |
| cleanup-nested-clusters: | |
| name: Cleanup nested clusters | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Configure kubectl via azure/k8s-set-context@v4 | |
| uses: azure/k8s-set-context@v4 | |
| with: | |
| method: kubeconfig | |
| context: e2e-cluster-nightly-e2e-virt-sa | |
| kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} | |
| - name: Delete nested clusters | |
| run: | | |
| current_date_seconds="$(date -u +%s)" | |
| FORMAT="%-63s %22s\n" | |
| # 47h = ~2 days with CI delay; 71h = ~3 days for Friday clusters | |
| KEEP_HOURS=47 | |
| FRIDAY_KEEP_HOURS=71 | |
| collect_items_json() { | |
| local resource="$1" | |
| kubectl get "${resource}" -l test=nightly-e2e -o json \ | |
| | jq -c '.items[] | {name: .metadata.name, created_at: .metadata.creationTimestamp}' | |
| } | |
| should_keep() { | |
| local created_at="$1" | |
| local recourse_created_at_seconds age_seconds weekday_of_day | |
| recourse_created_at_seconds="$(date -d "${created_at}" -u +%s)" | |
| age_seconds="$(( current_date_seconds - recourse_created_at_seconds ))" | |
| weekday_of_day="$(date -d "${created_at}" -u +%u)" | |
| if [ "${age_seconds}" -lt "$(( KEEP_HOURS * 3600 ))" ]; then | |
| echo "keep" | |
| return 0 | |
| fi | |
| if [ "${weekday_of_day}" -eq 5 ] && [ "${age_seconds}" -lt "$(( FRIDAY_KEEP_HOURS * 3600 ))" ]; then | |
| echo "keep" | |
| return 0 | |
| fi | |
| echo "delete" | |
| return 0 | |
| } | |
| cleanup_kind() { | |
| local kind="$1" | |
| local item name created_at decision parsed | |
| echo "[INFO] Process ${kind} with label test=nightly-e2e" | |
| collect_items_json "${kind}" | while read -r item; do | |
| name=$(echo $item | jq -r '.name') | |
| created_at=$(echo $item | jq -r '.created_at') | |
| [ -z "${name}" ] && continue | |
| decision="$(should_keep "${created_at}")" | |
| if [ "${decision}" = "keep" ]; then | |
| printf "$FORMAT" "[INFO] Keep ${kind}/${name}:" "created_at ${created_at}" | |
| continue | |
| fi | |
| printf "$FORMAT" "[INFO] Delete ${kind}/${name}:" "created_at ${created_at}" | |
| kubectl delete "${kind}" "${name}" --timeout=300s || true | |
| done || true | |
| } | |
| cleanup_kind "namespaces" | |
| echo " " | |
| cleanup_kind "vmclass" | |
| power-off-vms-for-nested: | |
| name: Power off VMs for nested clusters | |
| needs: cleanup-nested-clusters | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Configure kubectl via azure/k8s-set-context@v4 | |
| uses: azure/k8s-set-context@v4 | |
| with: | |
| method: kubeconfig | |
| context: e2e-cluster-nightly-e2e-virt-sa | |
| kubeconfig: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} | |
| - name: Power off VMs to free resources for nested cluster setup | |
| run: | | |
| set -euo pipefail | |
| # Constants (nested cluster: 1 master + 3 workers x2) | |
| REQUIRED_MEM_GI=86 | |
| REQUIRED_CPU=26 | |
| MIN_MEM_GI_PER_NODE=12 | |
| MIN_CPU_PER_NODE=4 | |
| MIN_NODES_FOR_PLACEMENT=3 | |
| # Helpers: Kubernetes quantity -> numeric (portable, no bash 4+) | |
| mem_to_gi() { | |
| local q="$1" q_lower | |
| q_lower=$(echo "$q" | tr '[:upper:]' '[:lower:]') | |
| if [[ "$q_lower" =~ ^([0-9]+\.?[0-9]*)gi?$ ]]; then | |
| echo "${BASH_REMATCH[1]}" | |
| elif [[ "$q_lower" =~ ^([0-9]+\.?[0-9]*)mi?$ ]]; then | |
| echo "scale=4; ${BASH_REMATCH[1]} / 1024" | bc | |
| elif [[ "$q_lower" =~ ^([0-9]+\.?[0-9]*)ki?$ ]]; then | |
| echo "scale=6; ${BASH_REMATCH[1]} / 1024 / 1024" | bc | |
| elif [[ "$q" =~ ^[0-9]+\.?[0-9]*$ ]]; then | |
| echo "scale=6; $q / 1024 / 1024 / 1024" | bc | |
| else | |
| echo "0" | |
| fi | |
| } | |
| cpu_to_cores() { | |
| local q="${1:-0}" q_lower | |
| q_lower=$(echo "$q" | tr '[:upper:]' '[:lower:]') | |
| if [[ "$q_lower" == *m ]]; then | |
| echo "scale=4; ${q%[mM]} / 1000" | bc | |
| else | |
| echo "${q:-0}" | |
| fi | |
| } | |
| # Compare two numbers (bc outputs 0 or 1; (( )) treats 0 as false) | |
| float_ge() { (( $(echo "$1 >= $2" | bc) )); } | |
| float_gt() { (( $(echo "$1 > $2" | bc) )); } | |
| float_le() { (( $(echo "$1 <= $2" | bc) )); } | |
| float_lt() { (( $(echo "$1 < $2" | bc) )); } | |
| # Gather free resources like the scheduler: allocatable - sum(pod requests) per node. | |
| worker_nodes=$(kubectl get nodes -l node-role.kubernetes.io/worker -o jsonpath='{.items[*].metadata.name}') | |
| gather_node_resources() { | |
| available_mem_gi=0 | |
| available_cpu=0 | |
| nodes_meeting_min=0 | |
| for node in $worker_nodes; do | |
| [[ -n "$node" ]] || continue | |
| node_json=$(kubectl get node "$node" -o json 2>/dev/null) || true | |
| if [[ -z "$node_json" ]]; then | |
| echo "[WARN] Node $node: could not get node spec, skipping" | |
| continue | |
| fi | |
| alloc_mem_gi=$(mem_to_gi "$(echo "$node_json" | jq -r '.status.allocatable.memory // "0"')") | |
| alloc_cpu=$(cpu_to_cores "$(echo "$node_json" | jq -r '.status.allocatable.cpu // "0"')") | |
| pods_json=$(kubectl get pods -A --field-selector spec.nodeName="$node" -o json 2>/dev/null) || true | |
| requested_mem_gi=0 | |
| requested_cpu=0 | |
| if [[ -n "$pods_json" ]]; then | |
| while read -r qty; do | |
| [[ -z "$qty" ]] && continue | |
| requested_mem_gi=$(echo "$requested_mem_gi + $(mem_to_gi "$qty")" | bc) | |
| done < <(echo "$pods_json" | jq -r ' | |
| .items[] | |
| | select(.status.phase == "Running" or .status.phase == "Pending") | |
| | [(.spec.containers[]? | try .resources.requests.memory catch null), (.spec.initContainers[]? | try .resources.requests.memory catch null)] | |
| | .[] | . // "0" | |
| ') | |
| while read -r qty; do | |
| [[ -z "$qty" ]] && continue | |
| requested_cpu=$(echo "$requested_cpu + $(cpu_to_cores "$qty")" | bc) | |
| done < <(echo "$pods_json" | jq -r ' | |
| .items[] | |
| | select(.status.phase == "Running" or .status.phase == "Pending") | |
| | [(.spec.containers[]? | try .resources.requests.cpu catch null), (.spec.initContainers[]? | try .resources.requests.cpu catch null)] | |
| | .[] | . // "0" | |
| ') | |
| fi | |
| node_free_mem=$(echo "x = $alloc_mem_gi - $requested_mem_gi; if (x < 0) 0 else x" | bc 2>/dev/null || echo "0") | |
| node_free_cpu=$(echo "x = $alloc_cpu - $requested_cpu; if (x < 0) 0 else x" | bc 2>/dev/null || echo "0") | |
| available_mem_gi=$(echo "$available_mem_gi + $node_free_mem" | bc) | |
| available_cpu=$(echo "$available_cpu + $node_free_cpu" | bc) | |
| node_ok_mem=$(echo "$node_free_mem >= $MIN_MEM_GI_PER_NODE" | bc) | |
| node_ok_cpu=$(echo "$node_free_cpu >= $MIN_CPU_PER_NODE" | bc) | |
| if [[ "$node_ok_mem" -eq 1 && "$node_ok_cpu" -eq 1 ]]; then | |
| nodes_meeting_min=$((nodes_meeting_min + 1)) | |
| else | |
| echo "[INFO] Node $node: does not meet placement min — free ${node_free_mem} Gi RAM, ${node_free_cpu} CPU (required: >= ${MIN_MEM_GI_PER_NODE} Gi, >= ${MIN_CPU_PER_NODE} CPU)" | |
| fi | |
| done | |
| } | |
| gather_node_resources | |
| echo "[INFO] Workers: free ${available_mem_gi} Gi RAM, ${available_cpu} CPU; nodes with enough free resources for placement: ${nodes_meeting_min} (need at least ${MIN_NODES_FOR_PLACEMENT})" | |
| echo "[INFO] Required: ${REQUIRED_MEM_GI} Gi, ${REQUIRED_CPU} CPU; need >= ${MIN_NODES_FOR_PLACEMENT} nodes with >= ${MIN_MEM_GI_PER_NODE} Gi and >= ${MIN_CPU_PER_NODE} CPU" | |
| echo " " | |
| deficit_mem=$(echo "$REQUIRED_MEM_GI - $available_mem_gi" | bc 2>/dev/null || echo "$REQUIRED_MEM_GI") | |
| deficit_cpu=$(echo "$REQUIRED_CPU - $available_cpu" | bc 2>/dev/null || echo "$REQUIRED_CPU") | |
| # Check for sufficient free resources and node availability to proceed with placement | |
| total_sufficient=false | |
| placement_sufficient=false | |
| if float_le "$deficit_mem" 0 && float_le "$deficit_cpu" 0; then | |
| total_sufficient=true | |
| fi | |
| if [[ $nodes_meeting_min -ge $MIN_NODES_FOR_PLACEMENT ]]; then | |
| placement_sufficient=true | |
| fi | |
| if $total_sufficient && $placement_sufficient; then | |
| echo "[INFO] Resources sufficient (total + placement), no VMs to power off" | |
| exit 0 | |
| fi | |
| if $total_sufficient; then | |
| echo "[INFO] Cluster has enough free memory and cpu." | |
| else | |
| shortage_parts="" | |
| float_gt "$deficit_mem" 0 && shortage_parts="${deficit_mem} Gi RAM" | |
| float_gt "$deficit_cpu" 0 && shortage_parts="${shortage_parts:+$shortage_parts, }${deficit_cpu} CPU" | |
| echo "[INFO] Resources shortage: need to free ${shortage_parts}. Proceed with power off some VMs to free cluster resources." | |
| fi | |
| if $placement_sufficient; then | |
| echo "[INFO] Cluster has enough available nodes." | |
| else | |
| echo "[INFO] Available nodes shortage: only ${nodes_meeting_min} node(s) meet free resources requirement, expect at least ${MIN_NODES_FOR_PLACEMENT} available nodes. Proceed with power off some VMs to free resources." | |
| fi | |
| echo "[Note] Will ignore VMs in 'nightly-e2e-*', 'static-cse' namespaces, and VMs with the 'e2e-cluster/do-not-stop-vm-on-e2e-run' label." | |
| echo "[INFO] Power off candidates sorted by memory (largest first); stop when enough resources are freed." | |
| # Power off VMs until we have enough (exclude nightly-e2e-*, static-cse, do-not-stop) | |
| get_vms_candidates() { | |
| kubectl get vm -A -o json | jq -r ' | |
| .items[] | |
| | select(.metadata.namespace | test("^nightly-e2e-|static-cse") | not) | |
| | select(.metadata.labels | tostring | test("e2e-cluster/do-not-stop-vm-on-e2e-run") | not) | |
| | select(.spec.runPolicy != "AlwaysOff") | |
| | [.metadata.namespace, .metadata.name, (.spec.memory.size // "0"), (.spec.cpu.cores // 0), (.spec.cpu.coreFraction // "100%")] | |
| | @tsv | |
| ' | |
| } | |
| # Sort by memory descending (largest first) so we free the most with fewer power-offs | |
| sort_by_mem_desc() { | |
| while IFS=$'\t' read -r ns name mem_qty cores core_frac; do | |
| [[ -n "$ns" ]] || continue | |
| mem_gi=$(mem_to_gi "$mem_qty") | |
| printf '%s\t%s\t%s\t%s\t%s\t%s\n' "$mem_gi" "$ns" "$name" "$mem_qty" "$cores" "$core_frac" | |
| done | sort -t$'\t' -k1,1 -rn | |
| } | |
| # Keep powering off while: placement not met, or memory/CPU deficit not yet covered | |
| still_need_to_free() { | |
| if ! $placement_sufficient; then return 0; fi | |
| if float_gt "$deficit_mem" 0 && float_lt "$freed_mem" "$deficit_mem"; then return 0; fi | |
| if float_gt "$deficit_cpu" 0 && float_lt "$freed_cpu" "$deficit_cpu"; then return 0; fi | |
| return 1 | |
| } | |
| freed_mem=0 | |
| freed_cpu=0 | |
| while IFS=$'\t' read -r vm_mem_gi ns name mem_qty cores core_frac; do | |
| [[ -n "$ns" ]] || continue | |
| still_need_to_free || break | |
| frac_pct=100 | |
| [[ "$core_frac" =~ ^([0-9]+)%$ ]] && frac_pct="${BASH_REMATCH[1]}" | |
| vm_cpu=$(echo "scale=2; $cores * $frac_pct / 100" | bc) | |
| echo "[INFO] Powering off vm $ns/$name (${vm_mem_gi} Gi, ${vm_cpu} CPU)" | |
| kubectl patch vm -n "$ns" "$name" --type=merge -p '{"spec":{"runPolicy":"AlwaysOff"}}' || true | |
| freed_mem=$(echo "$freed_mem + $vm_mem_gi" | bc) | |
| freed_cpu=$(echo "$freed_cpu + $vm_cpu" | bc) | |
| # Re-check placement after each power-off (pods may take a few seconds to disappear) | |
| if ! $placement_sufficient; then | |
| kubectl wait --for=jsonpath='{.status.phase}'=Stopped vm -n "$ns" "$name" --timeout=90s || true | |
| gather_node_resources | |
| if [[ $nodes_meeting_min -ge $MIN_NODES_FOR_PLACEMENT ]]; then | |
| placement_sufficient=true | |
| echo "[INFO] Placement now sufficient: ${nodes_meeting_min} nodes with >= ${MIN_MEM_GI_PER_NODE} Gi and >= ${MIN_CPU_PER_NODE} CPU" | |
| fi | |
| fi | |
| done < <(get_vms_candidates | sort_by_mem_desc) | |
| echo "[INFO] Freed: ${freed_mem} Gi RAM, ${freed_cpu} CPU" | |
| if still_need_to_free; then | |
| echo "[ERROR] Stopping VMs did not free enough resources. Human intervention is required." | |
| exit 1 | |
| fi | |
| set-vars: | |
| name: Set vars | |
| needs: power-off-vms-for-nested | |
| runs-on: ubuntu-latest | |
| outputs: | |
| date_start: ${{ steps.vars.outputs.date-start }} | |
| randuuid4c: ${{ steps.vars.outputs.randuuid4c }} | |
| steps: | |
| - name: Set vars | |
| id: vars | |
| run: | | |
| echo "date-start=$(date +%Y%m%d-%H%M%S)" >> $GITHUB_OUTPUT | |
| echo "randuuid4c=$(openssl rand -hex 2)" >> $GITHUB_OUTPUT | |
| e2e-replicated: | |
| name: E2E Pipeline (Replicated) | |
| needs: | |
| - set-vars | |
| uses: ./.github/workflows/e2e-reusable-pipeline.yml | |
| with: | |
| storage_type: replicated | |
| nested_storageclass_name: nested-thin-r1 | |
| branch: main | |
| virtualization_tag: main | |
| deckhouse_channel: alpha | |
| default_user: cloud | |
| go_version: "1.24.13" | |
| e2e_timeout: "3.5h" | |
| date_start: ${{ needs.set-vars.outputs.date_start }} | |
| randuuid4c: ${{ needs.set-vars.outputs.randuuid4c }} | |
| cluster_config_workers_memory: "9Gi" | |
| cluster_config_k8s_version: "1.34" | |
| secrets: | |
| DEV_REGISTRY_DOCKER_CFG: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} | |
| VIRT_E2E_NIGHTLY_SA_TOKEN: ${{ secrets.VIRT_E2E_NIGHTLY_SA_TOKEN }} | |
| PROD_IO_REGISTRY_DOCKER_CFG: ${{ secrets.PROD_IO_REGISTRY_DOCKER_CFG }} | |
| BOOTSTRAP_DEV_PROXY: ${{ secrets.BOOTSTRAP_DEV_PROXY }} | |
| report-to-channel: | |
| runs-on: ubuntu-latest | |
| name: End-to-End tests report | |
| needs: | |
| - e2e-replicated | |
| if: ${{ always()}} | |
| env: | |
| STORAGE_TYPES: '["replicated"]' | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - name: Download E2E report artifacts | |
| uses: actions/download-artifact@v5 | |
| continue-on-error: true | |
| id: download-artifacts-pattern | |
| with: | |
| pattern: "e2e-report-*" | |
| path: downloaded-artifacts/ | |
| merge-multiple: false | |
| - name: Send results to channel | |
| run: | | |
| # Map storage types to CSI names | |
| get_csi_name() { | |
| local storage_type=$1 | |
| case "$storage_type" in | |
| "replicated") | |
| echo "replicated.csi.storage.deckhouse.io" | |
| ;; | |
| *) | |
| echo "$storage_type" | |
| ;; | |
| esac | |
| } | |
| # Function to load and parse report from artifact | |
| # Outputs: file content to stdout, debug messages to stderr | |
| # Works with pattern-based artifact download (e2e-report-*) | |
| # Artifacts are organized as: downloaded-artifacts/e2e-report-<storage_type>-<run_id>/e2e_report_<storage_type>.json | |
| load_report_from_artifact() { | |
| local storage_type=$1 | |
| local base_path="downloaded-artifacts/" | |
| echo "[INFO] Searching for report for storage type: $storage_type" >&2 | |
| echo "[DEBUG] Base path: $base_path" >&2 | |
| if [ ! -d "$base_path" ]; then | |
| echo "[WARN] Base path does not exist: $base_path" >&2 | |
| return 1 | |
| fi | |
| local report_file="" | |
| # First, search in artifact directories matching pattern: e2e-report-<storage_type>-* | |
| # Pattern downloads create subdirectories named after the artifact | |
| # e.g., downloaded-artifacts/e2e-report-replicated-<run_id>/e2e_report_replicated.json | |
| echo "[DEBUG] Searching in artifact directories matching pattern: e2e-report-${storage_type}-*" >&2 | |
| local artifact_dir=$(find "$base_path" -type d -name "e2e-report-${storage_type}-*" 2>/dev/null | head -1) | |
| if [ -n "$artifact_dir" ]; then | |
| echo "[DEBUG] Found artifact dir: $artifact_dir" >&2 | |
| report_file=$(find "$artifact_dir" -name "e2e_report_*.json" -type f 2>/dev/null | head -1) | |
| if [ -n "$report_file" ] && [ -f "$report_file" ]; then | |
| echo "[INFO] Found report file in artifact dir: $report_file" >&2 | |
| cat "$report_file" | |
| return 0 | |
| fi | |
| fi | |
| # Fallback: search for file by name pattern anywhere in base_path | |
| echo "[DEBUG] Searching for file: e2e_report_${storage_type}.json" >&2 | |
| report_file=$(find "$base_path" -type f -name "e2e_report_${storage_type}.json" 2>/dev/null | head -1) | |
| if [ -n "$report_file" ] && [ -f "$report_file" ]; then | |
| echo "[INFO] Found report file by name: $report_file" >&2 | |
| cat "$report_file" | |
| return 0 | |
| fi | |
| echo "[WARN] Could not load report artifact for $storage_type" >&2 | |
| return 1 | |
| } | |
| # Function to create failure summary JSON (fallback) | |
| create_failure_summary() { | |
| local storage_type=$1 | |
| local stage=$2 | |
| local run_id=$3 | |
| local csi=$(get_csi_name "$storage_type") | |
| local date=$(date +"%Y-%m-%d") | |
| local time=$(date +"%H:%M:%S") | |
| local branch="${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" | |
| local link="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${run_id:-${GITHUB_RUN_ID}}" | |
| # Map stage to status message | |
| local status_msg | |
| case "$stage" in | |
| "bootstrap") | |
| status_msg=":x: BOOTSTRAP CLUSTER FAILED" | |
| ;; | |
| "storage-setup") | |
| status_msg=":x: STORAGE SETUP FAILED" | |
| ;; | |
| "virtualization-setup") | |
| status_msg=":x: VIRTUALIZATION SETUP FAILED" | |
| ;; | |
| "e2e-test") | |
| status_msg=":x: E2E TEST FAILED" | |
| ;; | |
| *) | |
| status_msg=":question: UNKNOWN" | |
| ;; | |
| esac | |
| jq -n \ | |
| --arg csi "$csi" \ | |
| --arg date "$date" \ | |
| --arg time "$time" \ | |
| --arg branch "$branch" \ | |
| --arg status "$status_msg" \ | |
| --arg link "$link" \ | |
| '{CSI: $csi, Date: $date, StartTime: $time, Branch: $branch, Status: $status, Passed: 0, Failed: 0, Pending: 0, Skipped: 0, Link: $link}' | |
| } | |
| # Parse summary JSON and add to table | |
| parse_summary() { | |
| local summary_json=$1 | |
| local storage_type=$2 | |
| if [ -z "$summary_json" ] || [ "$summary_json" == "null" ] || [ "$summary_json" == "" ]; then | |
| echo "Warning: Empty summary for $storage_type" | |
| return | |
| fi | |
| # Try to parse as JSON (handle both JSON string and already parsed JSON) | |
| if ! echo "$summary_json" | jq empty 2>/dev/null; then | |
| echo "Warning: Invalid JSON for $storage_type: $summary_json" | |
| echo "[DEBUG] json: $summary_json" | |
| return | |
| fi | |
| # Parse JSON fields | |
| csi_raw=$(echo "$summary_json" | jq -r '.CSI // empty' 2>/dev/null) | |
| if [ -z "$csi_raw" ] || [ "$csi_raw" == "null" ] || [ "$csi_raw" == "" ]; then | |
| csi=$(get_csi_name "$storage_type") | |
| else | |
| csi="$csi_raw" | |
| fi | |
| date=$(echo "$summary_json" | jq -r '.Date // ""' 2>/dev/null) | |
| time=$(echo "$summary_json" | jq -r '.StartTime // ""' 2>/dev/null) | |
| branch=$(echo "$summary_json" | jq -r '.Branch // ""' 2>/dev/null) | |
| status=$(echo "$summary_json" | jq -r '.Status // ":question: UNKNOWN"' 2>/dev/null) | |
| passed=$(echo "$summary_json" | jq -r '.Passed // 0' 2>/dev/null) | |
| failed=$(echo "$summary_json" | jq -r '.Failed // 0' 2>/dev/null) | |
| pending=$(echo "$summary_json" | jq -r '.Pending // 0' 2>/dev/null) | |
| skipped=$(echo "$summary_json" | jq -r '.Skipped // 0' 2>/dev/null) | |
| link=$(echo "$summary_json" | jq -r '.Link // ""' 2>/dev/null) | |
| # Set defaults if empty | |
| [ -z "$passed" ] && passed=0 | |
| [ -z "$failed" ] && failed=0 | |
| [ -z "$pending" ] && pending=0 | |
| [ -z "$skipped" ] && skipped=0 | |
| [ -z "$status" ] && status=":question: UNKNOWN" | |
| # Format link - use CSI name as fallback if link is empty | |
| if [ -z "$link" ] || [ "$link" == "" ]; then | |
| link_text="$csi" | |
| else | |
| link_text="[:link: $csi]($link)" | |
| fi | |
| # Add row to table | |
| markdown_table+="| $link_text | $status | $passed | $failed | $pending | $skipped | $date | $time | $branch |\n" | |
| } | |
| # Initialize markdown table | |
| echo "[INFO] Generate markdown table" | |
| markdown_table="" | |
| header="| CSI | Status | Passed | Failed | Pending | Skipped | Date | Time | Branch|\n" | |
| separator="|---|---|---|---|---|---|---|---|---|\n" | |
| markdown_table+="$header" | |
| markdown_table+="$separator" | |
| # Get current date for header | |
| DATE=$(date +"%Y-%m-%d") | |
| COMBINED_SUMMARY="## :dvp: **DVP | E2E on a nested cluster | $DATE**\n\n" | |
| echo "[INFO] Get storage types" | |
| readarray -t storage_types < <(echo "$STORAGE_TYPES" | jq -r '.[]') | |
| echo "[INFO] Storage types: " "${storage_types[@]}" | |
| echo "[INFO] Generate summary for each storage type" | |
| for storage in "${storage_types[@]}"; do | |
| echo "[INFO] Processing $storage" | |
| # Try to load report from artifact | |
| # Debug messages go to stderr (visible in logs), JSON content goes to stdout | |
| echo "[INFO] Attempting to load report for $storage" | |
| structured_report=$(load_report_from_artifact "$storage" || true) | |
| if [ -n "$structured_report" ]; then | |
| # Check if it's valid JSON | |
| if echo "$structured_report" | jq empty 2>/dev/null; then | |
| echo "[INFO] Report is valid JSON for $storage" | |
| else | |
| echo "[WARN] Report is not valid JSON for $storage" | |
| echo "[DEBUG] Raw report content (first 200 chars):" | |
| echo "$structured_report" | head -c 200 | |
| echo "" | |
| structured_report="" | |
| fi | |
| fi | |
| if [ -n "$structured_report" ] && echo "$structured_report" | jq empty 2>/dev/null; then | |
| # Extract report data from structured file | |
| report_json=$(echo "$structured_report" | jq -c '.report // empty') | |
| failed_stage=$(echo "$structured_report" | jq -r '.failed_stage // empty') | |
| workflow_run_id=$(echo "$structured_report" | jq -r '.workflow_run_id // empty') | |
| echo "[INFO] Loaded report for $storage (failed_stage: ${failed_stage}, run_id: ${workflow_run_id})" | |
| # Validate and parse report | |
| if [ -n "$report_json" ] && [ "$report_json" != "" ] && [ "$report_json" != "null" ]; then | |
| if echo "$report_json" | jq empty 2>/dev/null; then | |
| echo "[INFO] Found valid report for $storage" | |
| parse_summary "$report_json" "$storage" | |
| else | |
| echo "[WARN] Invalid report JSON for $storage, using failed stage info" | |
| # Fallback to failed stage | |
| if [ -n "$failed_stage" ] && [ "$failed_stage" != "" ] && [ "$failed_stage" != "success" ]; then | |
| failed_summary=$(create_failure_summary "$storage" "$failed_stage" "$workflow_run_id") | |
| parse_summary "$failed_summary" "$storage" | |
| else | |
| csi=$(get_csi_name "$storage") | |
| markdown_table+="| $csi | :warning: INVALID REPORT | 0 | 0 | 0 | 0 | — | — | — |\n" | |
| fi | |
| fi | |
| else | |
| # No report in structured file, use failed stage | |
| if [ -n "$failed_stage" ] && [ "$failed_stage" != "" ] && [ "$failed_stage" != "success" ]; then | |
| echo "[INFO] Stage '$failed_stage' failed for $storage" | |
| failed_summary=$(create_failure_summary "$storage" "$failed_stage" "$workflow_run_id") | |
| parse_summary "$failed_summary" "$storage" | |
| else | |
| csi=$(get_csi_name "$storage") | |
| markdown_table+="| $csi | :warning: NO REPORT | 0 | 0 | 0 | 0 | — | — | — |\n" | |
| fi | |
| fi | |
| else | |
| # Artifact not found or invalid, show warning | |
| echo "[WARN] Could not load report artifact for $storage" | |
| csi=$(get_csi_name "$storage") | |
| markdown_table+="| $csi | :warning: ARTIFACT NOT FOUND | 0 | 0 | 0 | 0 | — | — | — |\n" | |
| fi | |
| done | |
| echo "[INFO] Combined summary" | |
| COMBINED_SUMMARY+="${markdown_table}\n" | |
| echo -e "$COMBINED_SUMMARY" | |
| # Send to channel if webhook is configured | |
| echo "[INFO] Send to webhook" | |
| if [ -n "$LOOP_WEBHOOK_URL" ]; then | |
| curl --request POST --header 'Content-Type: application/json' --data "{\"text\": \"${COMBINED_SUMMARY}\"}" "$LOOP_WEBHOOK_URL" | |
| fi | |
| env: | |
| LOOP_WEBHOOK_URL: ${{ secrets.LOOP_WEBHOOK_URL }} |