Skip to content

feat/initial commit

feat/initial commit #2

Workflow file for this run

name: SpecCursor Qualification & Optimization Suite
on:
push:
branches: [ main, develop, release/* ]
paths:
- 'apps/**'
- 'packages/**'
- 'workers/**'
- 'scripts/**'
- 'tests/**'
- 'load/**'
- 'chaos/**'
- 'security/**'
- '.github/workflows/**'
pull_request:
branches: [ main, develop, release/* ]
paths:
- 'apps/**'
- 'packages/**'
- 'workers/**'
- 'scripts/**'
- 'tests/**'
- 'load/**'
- 'chaos/**'
- 'security/**'
- '.github/workflows/**'
workflow_dispatch:
inputs:
stage:
description: 'Specific stage to run'
required: false
type: choice
options:
- all
- static-analysis
- unit-tests
- property-tests
- integration-tests
- load-tests
- chaos-tests
- security-scans
- vulnerability-sbom
- observability
- performance
- cost-budgets
- deployment-drill
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
K6_VERSION: v0.51.0
OTEL_COLLECTOR_VERSION: v0.130.1
jobs:
# Stage 1: Static Analysis
static-analysis:
name: Stage 1 - Static Analysis
runs-on: ubuntu-latest
timeout-minutes: 30
strategy:
matrix:
os: [ubuntu-22.04]
node: [18, 20]
rust: [1.78, nightly]
go: [1.22]
python: [3.12]
lean: [4.20.0]
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Node.js ${{ matrix.node }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node }}
cache: 'npm'
- name: Setup Rust ${{ matrix.rust }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.rust }}
override: true
- name: Setup Python ${{ matrix.python }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
- name: Setup Go ${{ matrix.go }}
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go }}
- name: Setup Lean ${{ matrix.lean }}
uses: leanprover/lean4@v2
with:
lean-version: ${{ matrix.lean }}
- name: Install pnpm
run: npm install -g pnpm@latest
- name: Install dependencies
run: |
pnpm install --frozen-lockfile
if [ -f "Cargo.toml" ]; then cargo fetch; fi
if [ -f "requirements.txt" ]; then pip install -r requirements.txt; fi
if [ -f "go.mod" ]; then go mod download; fi
# TypeScript/JavaScript Analysis
- name: Run ESLint with complexity limits
run: |
pnpm lint
# Check cyclomatic complexity ≤ 15
npx eslint --plugin complexity --rule 'complexity/complexity: [error, 15]' apps/ packages/
- name: Run TypeScript type check
run: pnpm type-check
- name: Run Prettier check
run: pnpm format:check
# Rust Analysis
- name: Run Rust clippy with strict settings
run: |
if [ -f "Cargo.toml" ]; then
cargo clippy --all-targets --all-features -- -D warnings
# Check complexity with cargo-geiger
cargo install cargo-geiger
cargo geiger --output json > geiger-report.json
fi
# Go Analysis
- name: Run Go static analysis
run: |
if [ -f "go.mod" ]; then
go vet ./...
# Install and run gocyclo for complexity
go install github.com/fzipp/gocyclo/cmd/gocyclo@latest
gocyclo -over 15 . || echo "Complexity check passed"
fi
# Python Analysis
- name: Run Python static analysis
run: |
if [ -f "requirements.txt" ]; then
pip install ruff mypy
ruff check . --select E9,F63,F7,F82
ruff format --check .
mypy --strict .
fi
# Lean Analysis
- name: Run Lean checker
run: |
if [ -f "lakefile.lean" ]; then
lake build
lake exe cache get
leanchecker lean/speccursor.lean
fi
- name: Upload static analysis results
uses: actions/upload-artifact@v4
if: always()
with:
name: static-analysis-${{ matrix.node }}-${{ matrix.rust }}-${{ matrix.go }}-${{ matrix.python }}
path: |
geiger-report.json
eslint-report.json
mypy-report.json
# Stage 2: Unit Tests
unit-tests:
name: Stage 2 - Unit Tests
runs-on: ubuntu-latest
timeout-minutes: 45
needs: static-analysis
strategy:
matrix:
os: [ubuntu-22.04]
node: [18, 20]
rust: [1.78, nightly]
go: [1.22]
python: [3.12]
lean: [4.20.0]
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Node.js ${{ matrix.node }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node }}
cache: 'npm'
- name: Setup Rust ${{ matrix.rust }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.rust }}
override: true
- name: Setup Python ${{ matrix.python }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
- name: Setup Go ${{ matrix.go }}
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go }}
- name: Setup Lean ${{ matrix.lean }}
uses: leanprover/lean4@v2
with:
lean-version: ${{ matrix.lean }}
- name: Install dependencies
run: |
pnpm install --frozen-lockfile
if [ -f "Cargo.toml" ]; then cargo fetch; fi
if [ -f "requirements.txt" ]; then pip install -r requirements.txt; fi
if [ -f "go.mod" ]; then go mod download; fi
# Node.js Tests
- name: Run Node.js unit tests
if: matrix.node
run: |
pnpm test:unit
pnpm test:coverage
env:
NODE_ENV: test
COVERAGE_THRESHOLD: 95
# Rust Tests
- name: Run Rust unit tests
if: matrix.rust
run: |
if [ -f "Cargo.toml" ]; then
cargo test --verbose
cargo tarpaulin --out Html --output-dir coverage
fi
# Python Tests
- name: Run Python unit tests
if: matrix.python
run: |
if [ -f "requirements.txt" ]; then
python -m pytest tests/unit/ --cov=. --cov-report=html --cov-report=term-missing --cov-fail-under=95
fi
# Go Tests
- name: Run Go unit tests
if: matrix.go
run: |
if [ -f "go.mod" ]; then
go test -v -coverprofile=coverage.out ./...
go tool cover -html=coverage.out -o coverage.html
# Check coverage threshold
COVERAGE=$(go tool cover -func=coverage.out | grep total | awk '{print $3}' | sed 's/%//')
if (( $(echo "$COVERAGE < 95" | bc -l) )); then
echo "Coverage $COVERAGE% is below 95% threshold"
exit 1
fi
fi
# Lean Tests
- name: Run Lean tests
if: matrix.lean
run: |
if [ -f "lakefile.lean" ]; then
lake env lean --run lean/test_runner.lean
fi
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
file: ./coverage/lcov.info
flags: ${{ matrix.node }}-${{ matrix.rust }}-${{ matrix.go }}-${{ matrix.python }}
name: coverage-${{ matrix.node }}-${{ matrix.rust }}-${{ matrix.go }}-${{ matrix.python }}
fail_ci_if_error: true
- name: Check coverage delta
run: |
# Fail if coverage drops by more than 2%
if [ "${{ needs.unit-tests.outputs.coverage_delta }}" -lt -2 ]; then
echo "Coverage dropped by more than 2%"
exit 1
fi
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: unit-test-results-${{ matrix.node }}-${{ matrix.rust }}-${{ matrix.go }}-${{ matrix.python }}
path: |
coverage/
test-results.json
*.xml
# Stage 3: Property-Based Tests
property-tests:
name: Stage 3 - Property-Based Tests
runs-on: ubuntu-latest
timeout-minutes: 60
needs: unit-tests
strategy:
matrix:
os: [ubuntu-22.04]
node: [20]
rust: [1.78]
go: [1.22]
python: [3.12]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js ${{ matrix.node }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node }}
cache: 'npm'
- name: Setup Rust ${{ matrix.rust }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.rust }}
override: true
- name: Setup Python ${{ matrix.python }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
- name: Setup Go ${{ matrix.go }}
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go }}
- name: Install dependencies
run: |
pnpm install --frozen-lockfile
if [ -f "Cargo.toml" ]; then cargo fetch; fi
if [ -f "requirements.txt" ]; then pip install -r requirements.txt; fi
if [ -f "go.mod" ]; then go mod download; fi
# Node.js Property Tests (fast-check)
- name: Run Node.js property tests
if: matrix.node
run: |
pnpm test:property
env:
NODE_ENV: test
FAST_CHECK_RUNS: 1000
# Rust Property Tests (proptest)
- name: Run Rust property tests
if: matrix.rust
run: |
if [ -f "Cargo.toml" ]; then
cargo test --test property_tests -- --nocapture
fi
# Python Property Tests (Hypothesis)
- name: Run Python property tests
if: matrix.python
run: |
if [ -f "requirements.txt" ]; then
python -m pytest tests/property/ --hypothesis-profile=ci
fi
# Go Property Tests (gopter)
- name: Run Go property tests
if: matrix.go
run: |
if [ -f "go.mod" ]; then
go test -v ./tests/property/ -tags=property
fi
- name: Upload property test results
uses: actions/upload-artifact@v4
if: always()
with:
name: property-test-results-${{ matrix.node }}-${{ matrix.rust }}-${{ matrix.go }}-${{ matrix.python }}
path: |
property-test-results/
*.json
# Stage 4: Integration & E2E Tests
integration-e2e:
name: Stage 4 - Integration & E2E Tests
runs-on: ubuntu-latest
timeout-minutes: 120
needs: property-tests
strategy:
matrix:
scenario: [basic-upgrade, ai-patch, formal-proof, full-workflow, dependency-bump, test-failure, claude-patch, lean-reproof, green-build, branch-merge]
services:
postgres:
image: postgres:15
env:
POSTGRES_PASSWORD: postgres
POSTGRES_DB: speccursor_test
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
redis:
image: redis:7-alpine
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 6379:6379
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
- name: Setup Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- name: Setup Lean
uses: leanprover/lean4@v2
with:
lean-version: '4.20.0'
- name: Install dependencies
run: |
npm install -g pnpm@latest
pnpm install --frozen-lockfile
if [ -f "Cargo.toml" ]; then cargo fetch; fi
if [ -f "lakefile.lean" ]; then lake build; fi
- name: Run integration test scenario
run: |
pnpm test:integration --scenario ${{ matrix.scenario }}
env:
DATABASE_URL: postgresql://postgres:postgres@localhost:5432/speccursor_test
REDIS_URL: redis://localhost:6379
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
MORPH_API_KEY: ${{ secrets.MORPH_API_KEY }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Run E2E workflow test
run: |
# Simulate full workflow: bump dependency → failing test → Claude patch → Lean re-proof → green build → merge
pnpm test:e2e --workflow ${{ matrix.scenario }}
env:
DATABASE_URL: postgresql://postgres:postgres@localhost:5432/speccursor_test
REDIS_URL: redis://localhost:6379
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
MORPH_API_KEY: ${{ secrets.MORPH_API_KEY }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload integration test results
uses: actions/upload-artifact@v4
if: always()
with:
name: integration-e2e-results-${{ matrix.scenario }}
path: |
integration-test-results/
e2e-test-results/
screenshots/
# Stage 5: Load & Stress Tests
load-tests:
name: Stage 5 - Load & Stress Tests
runs-on: ubuntu-latest
timeout-minutes: 90
needs: integration-e2e
strategy:
matrix:
scenario: [upgrade-workflow, ai-patch-generation, formal-verification, full-pipeline]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup k6
uses: grafana/k6-action@v0.3.0
with:
filename: load/${{ matrix.scenario }}.js
flags: --summary-export=load-results-${{ matrix.scenario }}.json
- name: Start test infrastructure
run: |
docker-compose -f docker-compose.test.yml up -d
# Wait for services to be ready
sleep 30
- name: Run k6 load test
run: |
k6 run --out json=load-results-${{ matrix.scenario }}.json \
--summary-export=load-summary-${{ matrix.scenario }}.json \
load/${{ matrix.scenario }}.js
env:
K6_BROKER_URL: http://localhost:8080
K6_DATABASE_URL: postgresql://postgres:postgres@localhost:5432/speccursor_test
K6_REDIS_URL: redis://localhost:6379
- name: Parse and validate results
run: |
node scripts/parse-load-results.js \
--input load-results-${{ matrix.scenario }}.json \
--budget load/budgets.json \
--scenario ${{ matrix.scenario }}
- name: Upload load test results
uses: actions/upload-artifact@v4
if: always()
with:
name: load-test-results-${{ matrix.scenario }}
path: |
load-results-${{ matrix.scenario }}.json
load-summary-${{ matrix.scenario }}.json
load/
# Stage 6: Chaos & Resilience Tests
chaos-tests:
name: Stage 6 - Chaos & Resilience Tests
runs-on: ubuntu-latest
timeout-minutes: 120
needs: load-tests
strategy:
matrix:
experiment: [worker-failure, redis-failure, network-partition, database-failure, memory-pressure]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Kubernetes
uses: azure/setup-kubectl@v3
with:
version: 'latest'
- name: Setup Chaos Mesh
run: |
helm repo add chaos-mesh https://charts.chaos-mesh.org
helm repo update
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace chaos-testing --create-namespace
- name: Deploy test application
run: |
kubectl apply -f chaos/manifests/test-app.yaml
kubectl wait --for=condition=ready pod -l app=speccursor-test --timeout=300s
- name: Run chaos experiment
run: |
kubectl apply -f chaos/experiments/${{ matrix.experiment }}.yaml
# Wait for experiment to complete
sleep 60
- name: Verify resilience
run: |
# Check if system self-healed within 60s
node scripts/verify-resilience.js \
--experiment ${{ matrix.experiment }} \
--timeout 60 \
--no-lost-jobs
- name: Cleanup chaos experiment
run: |
kubectl delete -f chaos/experiments/${{ matrix.experiment }}.yaml || true
- name: Upload chaos test results
uses: actions/upload-artifact@v4
if: always()
with:
name: chaos-test-results-${{ matrix.experiment }}
path: |
chaos-results/
*.log
# Stage 7: Security Scans
security-scans:
name: Stage 7 - Security Scans
runs-on: ubuntu-latest
timeout-minutes: 60
needs: chaos-tests
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
# CodeQL Analysis
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: javascript, python, go
queries: security-extended,security-and-quality
config-file: security/codeql-config.yml
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"
# Semgrep Analysis
- name: Run Semgrep
uses: returntocorp/semgrep-action@v1
with:
config: >-
p/security-audit
p/oss-security-high
p/secrets
security/custom-rules.yml
# Trivy Vulnerability Scanner
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
scan-type: 'fs'
scan-ref: '.'
format: 'sarif'
output: 'trivy-results.sarif'
severity: 'CRITICAL,HIGH'
# Gitleaks
- name: Run Gitleaks
uses: gitleaks/gitleaks-action@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Custom Security Rules
- name: Run custom security checks
run: |
node security/custom-checks.js \
--ssrf-check \
--deserialization-check \
--injection-check
- name: Upload security scan results
uses: actions/upload-artifact@v4
if: always()
with:
name: security-scan-results
path: |
trivy-results.sarif
semgrep-results.json
codeql-results.sarif
security/
# Stage 8: Vulnerability & SBOM
vulnerability-sbom:
name: Stage 8 - Vulnerability & SBOM
runs-on: ubuntu-latest
timeout-minutes: 45
needs: security-scans
steps:
- name: Checkout code
uses: actions/checkout@v4
# Generate SBOM
- name: Generate SPDX SBOM
uses: anchore/sbom-action@v0
with:
image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
format: spdx-json
output-file: sbom.json
# Sign SBOM
- name: Sign SBOM with Sigstore
uses: sigstore/cosign-installer@v3
with:
cosign-release: 'v2.1.1'
- name: Sign the SBOM
run: |
cosign sign --yes sbom.json
# Verify supply chain
- name: Verify supply chain attestations
run: |
sigstore verify sbom.json \
--certificate-identity-regexp ".*" \
--certificate-oidc-issuer-regexp ".*"
# Upload SBOM
- name: Upload SBOM
uses: actions/upload-artifact@v4
with:
name: sbom
path: sbom.json
# Vulnerability assessment
- name: Assess vulnerabilities
run: |
node security/assess-vulnerabilities.js \
--sbom sbom.json \
--threshold HIGH \
--fail-on-critical
# Stage 9: Observability Assertions
observability:
name: Stage 9 - Observability Assertions
runs-on: ubuntu-latest
timeout-minutes: 60
needs: vulnerability-sbom
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Start observability stack
run: |
docker-compose -f docker-compose.observability.yml up -d
sleep 30
# Check metrics endpoint
- name: Verify metrics endpoint
run: |
curl -f http://localhost:9090/metrics | grep -E "(upgrade_duration_seconds|proof_latency_seconds|ai_tokens_total)" || exit 1
# Test Alertmanager rules
- name: Test Alertmanager rules
run: |
promtool test rules config/alertmanager-rules.yaml
# Synthetic alert test
- name: Run synthetic alert test
run: |
node scripts/test-alerts.js \
--alertmanager-url http://localhost:9093 \
--test-alerts
# Verify OpenTelemetry
- name: Verify OpenTelemetry collector
run: |
curl -f http://localhost:4317/ | grep -q "OpenTelemetry Collector" || exit 1
# Check Grafana dashboards
- name: Verify Grafana dashboards
run: |
curl -f -u admin:admin http://localhost:3000/api/dashboards | jq '.dashboards | length > 0' || exit 1
- name: Upload observability results
uses: actions/upload-artifact@v4
if: always()
with:
name: observability-results
path: |
observability-test-results/
*.log
# Stage 10: Performance Profiling
performance:
name: Stage 10 - Performance Profiling
runs-on: ubuntu-latest
timeout-minutes: 90
needs: observability
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
- name: Setup Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: '1.22'
# Node.js profiling
- name: Profile Node.js performance
run: |
npm install -g 0x
0x --output flamegraph-nodejs.html apps/github-app/src/index.ts
# Rust profiling
- name: Profile Rust performance
run: |
if [ -f "Cargo.toml" ]; then
cargo install flamegraph
cargo flamegraph --bin rust-worker
fi
# Go profiling
- name: Profile Go performance
run: |
if [ -f "go.mod" ]; then
go test -cpuprofile=cpu.prof -memprofile=mem.prof ./...
go tool pprof -svg cpu.prof > cpu-profile.svg
go tool pprof -svg mem.prof > mem-profile.svg
fi
# Check resource usage
- name: Check resource usage
run: |
node scripts/check-performance.js \
--cpu-threshold 70 \
--memory-leak-threshold 0.5 \
--connection-pool-check
- name: Upload performance results
uses: actions/upload-artifact@v4
if: always()
with:
name: performance-results
path: |
flamegraph-nodejs.html
cpu-profile.svg
mem-profile.svg
performance-metrics.json
# Stage 11: Cost & Latency Budgets
cost-budgets:
name: Stage 11 - Cost & Latency Budgets
runs-on: ubuntu-latest
timeout-minutes: 60
needs: performance
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
# Simulate 10x daily volume
- name: Run cost simulation
run: |
node scripts/simulate-costs.js \
--daily-upgrades 10000 \
--claude-tokens \
--ecr-egress \
--output cost-simulation.json
# Check against budget
- name: Check cost budget
run: |
node scripts/check-budget.js \
--simulation cost-simulation.json \
--budget docs/budget.md \
--threshold 120
# Latency budget check
- name: Check latency budget
run: |
node scripts/check-latency.js \
--p95-threshold 3000 \
--error-rate-threshold 0.1
- name: Upload cost analysis
uses: actions/upload-artifact@v4
if: always()
with:
name: cost-analysis
path: |
cost-simulation.json
budget-report.json
latency-report.json
# Stage 12: Deployment Drill
deployment-drill:
name: Stage 12 - Deployment Drill
runs-on: ubuntu-latest
timeout-minutes: 120
needs: cost-budgets
environment: staging
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Terraform
uses: hashicorp/setup-terraform@v3
with:
terraform_version: '1.5.0'
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-west-2
# Blue/Green deployment
- name: Deploy blue environment
run: |
cd terraform/staging
terraform apply -var="environment=blue" -var="image_tag=${{ github.sha }}" -auto-approve
- name: Run smoke tests on blue
run: |
pnpm test:smoke --base-url ${{ steps.deploy.outputs.blue_url }}
- name: Switch traffic to blue
run: |
cd terraform/staging
terraform apply -var="active_environment=blue" -auto-approve
# Canary deployment
- name: Deploy canary
run: |
cd terraform/staging
terraform apply -var="canary_percentage=10" -var="image_tag=${{ github.sha }}" -auto-approve
- name: Monitor canary for 30 minutes
run: |
node scripts/monitor-canary.js \
--duration 1800 \
--slo-threshold 0.99 \
--auto-promote
# Rollback simulation
- name: Simulate rollback
run: |
node scripts/simulate-rollback.js \
--previous-version v1.0.0 \
--rollback-time 300
- name: Upload deployment results
uses: actions/upload-artifact@v4
if: always()
with:
name: deployment-drill-results
path: |
deployment-logs/
canary-metrics.json
rollback-test.json
# Final Qualification Gate
qualification-gate:
name: Qualification Gate
runs-on: ubuntu-latest
timeout-minutes: 10
needs: [static-analysis, unit-tests, property-tests, integration-e2e, load-tests, chaos-tests, security-scans, vulnerability-sbom, observability, performance, cost-budgets, deployment-drill]
if: always()
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
- name: Evaluate qualification criteria
run: |
node scripts/evaluate-qualification.js \
--static-analysis ${{ needs.static-analysis.result }} \
--unit-tests ${{ needs.unit-tests.result }} \
--property-tests ${{ needs.property-tests.result }} \
--integration-tests ${{ needs.integration-e2e.result }} \
--load-tests ${{ needs.load-tests.result }} \
--chaos-tests ${{ needs.chaos-tests.result }} \
--security-scans ${{ needs.security-scans.result }} \
--vulnerability-sbom ${{ needs.vulnerability-sbom.result }} \
--observability ${{ needs.observability.result }} \
--performance ${{ needs.performance.result }} \
--cost-budgets ${{ needs.cost-budgets.result }} \
--deployment-drill ${{ needs.deployment-drill.result }}
- name: Sign release candidate
if: success()
run: |
# Sign Docker image with cosign
cosign sign --yes ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}
# Create release candidate tag
git tag v${{ github.run_number }}-rc.1
git push origin v${{ github.run_number }}-rc.1
- name: Create qualification report
run: |
node scripts/create-qualification-report.js \
--output qualification-report.md \
--all-stages
- name: Upload qualification report
uses: actions/upload-artifact@v4
with:
name: qualification-report
path: qualification-report.md
- name: Comment on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const report = fs.readFileSync('qualification-report.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: `## Qualification Report\n\n${report}`
});
# Required status checks for branch protection
# These must pass before merging PRs to main/develop/release/*
# Configure in repository settings > Branches > Branch protection rules