diff --git a/.github/actions/k3s-setup/action.yml b/.github/actions/k3s-setup/action.yml new file mode 100644 index 00000000..d21c4a43 --- /dev/null +++ b/.github/actions/k3s-setup/action.yml @@ -0,0 +1,57 @@ +name: 'K3s Setup' +description: 'Install k3s and create kubeconfig for Docker containers' + +inputs: + namespace: + description: 'Kubernetes namespace to create' + required: false + default: 'integr8scode' + kubeconfig-path: + description: 'Path to write the Docker-accessible kubeconfig' + required: false + default: 'backend/kubeconfig.yaml' + +outputs: + kubeconfig: + description: 'Path to the kubeconfig file for Docker containers' + value: ${{ inputs.kubeconfig-path }} + +runs: + using: 'composite' + steps: + - name: Install k3s + shell: bash + run: | + # --bind-address 0.0.0.0: Listen on all interfaces so Docker containers can reach it + # --tls-san host.docker.internal: Include in cert SANs for Docker container access + curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable=traefik --bind-address 0.0.0.0 --tls-san host.docker.internal" sh - + mkdir -p /home/runner/.kube + sudo k3s kubectl config view --raw > /home/runner/.kube/config + sudo chmod 600 /home/runner/.kube/config + + - name: Wait for k3s to be ready + shell: bash + run: | + export KUBECONFIG=/home/runner/.kube/config + timeout 90 bash -c 'until kubectl cluster-info; do sleep 5; done' + + - name: Create namespace + shell: bash + env: + NAMESPACE: ${{ inputs.namespace }} + run: | + export KUBECONFIG=/home/runner/.kube/config + kubectl create namespace "$NAMESPACE" --dry-run=client -o yaml | kubectl apply -f - + + - name: Create kubeconfig for Docker containers + shell: bash + env: + KUBECONFIG_PATH: ${{ inputs.kubeconfig-path }} + run: | + # Replace localhost/0.0.0.0 with host.docker.internal for container access + # (k3s may use 0.0.0.0 when started with --bind-address 0.0.0.0) + sed -E 's#https://(127\.0\.0\.1|0\.0\.0\.0):6443#https://host.docker.internal:6443#g' \ + /home/runner/.kube/config > "$KUBECONFIG_PATH" + chmod 644 "$KUBECONFIG_PATH" + echo "Kubeconfig written to $KUBECONFIG_PATH" + echo "Server URL: $(grep server "$KUBECONFIG_PATH" | head -1)" diff --git a/.github/workflows/backend-ci.yml b/.github/workflows/backend-ci.yml deleted file mode 100644 index ece0f874..00000000 --- a/.github/workflows/backend-ci.yml +++ /dev/null @@ -1,204 +0,0 @@ -name: Backend CI - -on: - push: - branches: [main, dev] - paths: - - 'backend/**' - - '.github/workflows/backend-ci.yml' - - 'docker-compose.ci.yaml' - pull_request: - branches: [main, dev] - paths: - - 'backend/**' - - '.github/workflows/backend-ci.yml' - - 'docker-compose.ci.yaml' - workflow_dispatch: - -# Pin image versions for cache key consistency -env: - MONGO_IMAGE: mongo:8.0 - REDIS_IMAGE: redis:7-alpine - KAFKA_IMAGE: apache/kafka:3.9.0 - SCHEMA_REGISTRY_IMAGE: confluentinc/cp-schema-registry:7.5.0 - -jobs: - unit: - name: Unit Tests - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v6 - - - name: Set up uv - uses: astral-sh/setup-uv@v7 - with: - enable-cache: true - cache-dependency-glob: "backend/uv.lock" - - - name: Install Python dependencies - run: | - cd backend - uv python install 3.12 - uv sync --frozen - - - name: Run unit tests - timeout-minutes: 5 - run: | - cd backend - uv run pytest tests/unit -v -rs \ - --durations=0 \ - --cov=app \ - --cov-report=xml --cov-report=term - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v5 - if: always() - with: - token: ${{ secrets.CODECOV_TOKEN }} - files: backend/coverage.xml - flags: backend-unit - name: backend-unit-coverage - fail_ci_if_error: false - verbose: true - - integration: - name: Integration Tests - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v6 - - - name: Cache and load Docker images - uses: ./.github/actions/docker-cache - with: - images: ${{ env.MONGO_IMAGE }} ${{ env.REDIS_IMAGE }} ${{ env.KAFKA_IMAGE }} ${{ env.SCHEMA_REGISTRY_IMAGE }} - - - name: Set up uv - uses: astral-sh/setup-uv@v7 - with: - enable-cache: true - cache-dependency-glob: "backend/uv.lock" - - - name: Install Python dependencies - run: | - cd backend - uv python install 3.12 - uv sync --frozen - - - name: Start infrastructure services - run: | - docker compose -f docker-compose.ci.yaml up -d --wait --wait-timeout 120 - docker compose -f docker-compose.ci.yaml ps - - - name: Run integration tests - timeout-minutes: 10 - run: | - cd backend - uv run pytest tests/integration -v -rs \ - --durations=0 \ - --cov=app \ - --cov-report=xml --cov-report=term - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v5 - if: always() - with: - token: ${{ secrets.CODECOV_TOKEN }} - files: backend/coverage.xml - flags: backend-integration - name: backend-integration-coverage - fail_ci_if_error: false - verbose: true - - - name: Collect logs - if: failure() - run: | - mkdir -p logs - docker compose -f docker-compose.ci.yaml logs > logs/docker-compose.log 2>&1 - docker compose -f docker-compose.ci.yaml logs kafka > logs/kafka.log 2>&1 - docker compose -f docker-compose.ci.yaml logs schema-registry > logs/schema-registry.log 2>&1 - - - name: Upload logs - if: failure() - uses: actions/upload-artifact@v6 - with: - name: backend-logs - path: logs/ - - e2e: - name: E2E Tests - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v6 - - - name: Cache and load Docker images - uses: ./.github/actions/docker-cache - with: - images: ${{ env.MONGO_IMAGE }} ${{ env.REDIS_IMAGE }} ${{ env.KAFKA_IMAGE }} ${{ env.SCHEMA_REGISTRY_IMAGE }} - - - name: Set up uv - uses: astral-sh/setup-uv@v7 - with: - enable-cache: true - cache-dependency-glob: "backend/uv.lock" - - - name: Install Python dependencies - run: | - cd backend - uv python install 3.12 - uv sync --frozen - - - name: Start infrastructure services - run: | - docker compose -f docker-compose.ci.yaml up -d --wait --wait-timeout 120 - docker compose -f docker-compose.ci.yaml ps - - - name: Setup Kubernetes (k3s) - run: | - curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable=traefik" sh - - mkdir -p /home/runner/.kube - sudo k3s kubectl config view --raw > /home/runner/.kube/config - sudo chmod 600 /home/runner/.kube/config - export KUBECONFIG=/home/runner/.kube/config - timeout 90 bash -c 'until sudo k3s kubectl cluster-info; do sleep 5; done' - kubectl create namespace integr8scode --dry-run=client -o yaml | kubectl apply -f - - - - name: Run E2E tests - timeout-minutes: 10 - env: - KUBECONFIG: /home/runner/.kube/config - K8S_NAMESPACE: integr8scode - run: | - cd backend - uv run pytest tests/e2e -v -rs \ - --durations=0 \ - --cov=app \ - --cov-report=xml --cov-report=term - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v5 - if: always() - with: - token: ${{ secrets.CODECOV_TOKEN }} - files: backend/coverage.xml - flags: backend-e2e - name: backend-e2e-coverage - fail_ci_if_error: false - verbose: true - - - name: Collect logs - if: failure() - run: | - mkdir -p logs - docker compose -f docker-compose.ci.yaml logs > logs/docker-compose.log 2>&1 - kubectl get events --sort-by='.metadata.creationTimestamp' -A > logs/k8s-events.log 2>&1 || true - kubectl describe pods -A > logs/k8s-describe-pods.log 2>&1 || true - - - name: Upload logs - if: failure() - uses: actions/upload-artifact@v6 - with: - name: k8s-logs - path: logs/ diff --git a/.github/workflows/frontend-ci.yml b/.github/workflows/frontend-ci.yml index c36fff8a..fe29a033 100644 --- a/.github/workflows/frontend-ci.yml +++ b/.github/workflows/frontend-ci.yml @@ -6,18 +6,16 @@ on: paths: - 'frontend/**' - '.github/workflows/frontend-ci.yml' - - 'docker-compose.ci.yaml' pull_request: branches: [main, dev] paths: - 'frontend/**' - '.github/workflows/frontend-ci.yml' - - 'docker-compose.ci.yaml' workflow_dispatch: jobs: - unit: - name: Unit Tests + quality: + name: Lint & Type Check runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 @@ -33,167 +31,10 @@ jobs: working-directory: frontend run: npm ci - - name: Run unit tests with coverage + - name: Run ESLint working-directory: frontend - run: npm run test:coverage + run: npm run lint - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v5 - with: - token: ${{ secrets.CODECOV_TOKEN }} - files: frontend/coverage/lcov.info - flags: frontend - name: frontend-coverage - fail_ci_if_error: false - verbose: true - - e2e: - name: E2E Tests - needs: unit - runs-on: ubuntu-latest - - # Local registry for buildx to reference base image (docker-container driver is isolated) - services: - registry: - image: registry:2 - ports: - - 5000:5000 - - env: - MONGO_IMAGE: mongo:8.0 - REDIS_IMAGE: redis:7-alpine - KAFKA_IMAGE: apache/kafka:3.9.0 - SCHEMA_REGISTRY_IMAGE: confluentinc/cp-schema-registry:7.5.0 - - steps: - - uses: actions/checkout@v6 - - - name: Cache and load Docker images - uses: ./.github/actions/docker-cache - with: - images: ${{ env.MONGO_IMAGE }} ${{ env.REDIS_IMAGE }} ${{ env.KAFKA_IMAGE }} ${{ env.SCHEMA_REGISTRY_IMAGE }} - - - name: Setup Node.js - uses: actions/setup-node@v6 - with: - node-version: '22' - cache: 'npm' - cache-dependency-path: frontend/package-lock.json - - - name: Install dependencies - working-directory: frontend - run: npm ci - - - name: Install Playwright browsers + - name: Run svelte-check working-directory: frontend - run: npx playwright install chromium - - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - driver-opts: network=host - - - name: Setup Kubernetes (k3s) - run: | - curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable=traefik --tls-san host.docker.internal" sh - - mkdir -p /home/runner/.kube - sudo k3s kubectl config view --raw > /home/runner/.kube/config - sudo chmod 600 /home/runner/.kube/config - export KUBECONFIG=/home/runner/.kube/config - timeout 90 bash -c 'until sudo k3s kubectl cluster-info; do sleep 5; done' - - - name: Create kubeconfig for Docker containers - run: | - # Copy k3s kubeconfig with host.docker.internal for container networking - sed 's|https://127.0.0.1:6443|https://host.docker.internal:6443|g' \ - /home/runner/.kube/config > backend/kubeconfig.yaml - chmod 644 backend/kubeconfig.yaml - - # Build images with GitHub Actions cache for faster subsequent builds - # Base image pushed to local registry so buildx can reference it - - name: Build and push base image - uses: docker/build-push-action@v6 - with: - context: ./backend - file: ./backend/Dockerfile.base - push: true - tags: localhost:5000/integr8scode-base:latest - cache-from: type=gha,scope=backend-base - cache-to: type=gha,mode=max,scope=backend-base - - # Pull base to Docker daemon (needed for docker-compose) - - name: Load base image to Docker daemon - run: | - docker pull localhost:5000/integr8scode-base:latest - docker tag localhost:5000/integr8scode-base:latest integr8scode-base:latest - - - name: Build backend image - uses: docker/build-push-action@v6 - with: - context: ./backend - file: ./backend/Dockerfile - load: true - tags: integr8scode-backend:latest - build-contexts: | - base=docker-image://localhost:5000/integr8scode-base:latest - cache-from: type=gha,scope=backend - cache-to: type=gha,mode=max,scope=backend - - - name: Build cert-generator image - uses: docker/build-push-action@v6 - with: - context: ./cert-generator - file: ./cert-generator/Dockerfile - load: true - tags: integr8scode-cert-generator:latest - cache-from: type=gha,scope=cert-generator - cache-to: type=gha,mode=max,scope=cert-generator - - - name: Build frontend image - uses: docker/build-push-action@v6 - with: - context: ./frontend - file: ./frontend/Dockerfile - load: true - tags: integr8scode-frontend:latest - cache-from: type=gha,scope=frontend - cache-to: type=gha,mode=max,scope=frontend - - - name: Start full stack - run: | - docker compose -f docker-compose.ci.yaml --profile full up -d --wait --wait-timeout 300 - docker compose -f docker-compose.ci.yaml ps - - - name: Seed test users - run: | - docker compose -f docker-compose.ci.yaml exec -T backend uv run python scripts/seed_users.py - - - name: Run E2E tests - working-directory: frontend - env: - CI: true - run: npx playwright test --reporter=html - - - name: Upload Playwright report - uses: actions/upload-artifact@v6 - if: always() - with: - name: playwright-report - path: frontend/playwright-report/ - - - name: Collect logs - if: failure() - run: | - mkdir -p logs - docker compose -f docker-compose.ci.yaml logs > logs/docker-compose.log 2>&1 - docker compose -f docker-compose.ci.yaml logs backend > logs/backend.log 2>&1 - docker compose -f docker-compose.ci.yaml logs frontend > logs/frontend.log 2>&1 - docker compose -f docker-compose.ci.yaml logs kafka > logs/kafka.log 2>&1 - kubectl get events --sort-by='.metadata.creationTimestamp' -A > logs/k8s-events.log 2>&1 || true - - - name: Upload logs - if: failure() - uses: actions/upload-artifact@v6 - with: - name: frontend-e2e-logs - path: logs/ + run: npm run check diff --git a/.github/workflows/stack-tests.yml b/.github/workflows/stack-tests.yml new file mode 100644 index 00000000..8711ea9f --- /dev/null +++ b/.github/workflows/stack-tests.yml @@ -0,0 +1,418 @@ +name: Stack Tests + +on: + push: + branches: [main, dev] + paths: + - 'backend/**' + - 'frontend/**' + - 'docker-compose.yaml' + - 'deploy.sh' + - '.github/workflows/stack-tests.yml' + - '.github/actions/**' + pull_request: + branches: [main, dev] + paths: + - 'backend/**' + - 'frontend/**' + - 'docker-compose.yaml' + - 'deploy.sh' + - '.github/workflows/stack-tests.yml' + - '.github/actions/**' + workflow_dispatch: + +env: + MONGO_IMAGE: mongo:8.0 + REDIS_IMAGE: redis:7-alpine + KAFKA_IMAGE: confluentinc/cp-kafka:7.8.2 + ZOOKEEPER_IMAGE: confluentinc/cp-zookeeper:7.8.2 + SCHEMA_REGISTRY_IMAGE: confluentinc/cp-schema-registry:7.8.2 + +jobs: + # Fast unit tests (no infrastructure needed) + backend-unit: + name: Backend Unit Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - name: Set up uv + uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + cache-dependency-glob: "backend/uv.lock" + + - name: Install Python dependencies + run: | + cd backend + uv python install 3.12 + uv sync --frozen + + - name: Run unit tests + timeout-minutes: 5 + run: | + cd backend + uv run pytest tests/unit -v -rs \ + --durations=0 \ + --cov=app \ + --cov-report=xml --cov-report=term + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v5 + if: always() + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: backend/coverage.xml + flags: backend-unit + name: backend-unit-coverage + fail_ci_if_error: false + verbose: true + + frontend-unit: + name: Frontend Unit Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - name: Setup Node.js + uses: actions/setup-node@v6 + with: + node-version: '22' + cache: 'npm' + cache-dependency-path: frontend/package-lock.json + + - name: Install dependencies + working-directory: frontend + run: npm ci + + - name: Run unit tests with coverage + working-directory: frontend + run: npm run test:coverage + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v5 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: frontend/coverage/lcov.info + flags: frontend-unit + name: frontend-unit-coverage + fail_ci_if_error: false + verbose: true + + # Build all images once, cache for test jobs + build-images: + name: Build Images + needs: [backend-unit, frontend-unit] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - name: Setup Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Cache base image + uses: actions/cache@v4 + id: base-cache + with: + path: /tmp/base-image.tar.zst + key: base-image-${{ runner.os }}-${{ hashFiles('backend/Dockerfile.base', 'backend/pyproject.toml', 'backend/uv.lock') }} + + - name: Load base image from cache + if: steps.base-cache.outputs.cache-hit == 'true' + run: zstd -d -c /tmp/base-image.tar.zst | docker load + + - name: Build base image + if: steps.base-cache.outputs.cache-hit != 'true' + uses: docker/build-push-action@v6 + with: + context: ./backend + file: ./backend/Dockerfile.base + load: true + tags: integr8scode-base:latest + cache-from: type=gha,scope=backend-base + cache-to: type=gha,mode=max,scope=backend-base + + - name: Save base image to cache + if: steps.base-cache.outputs.cache-hit != 'true' + run: docker save integr8scode-base:latest | zstd -T0 -3 > /tmp/base-image.tar.zst + + - name: Build all images + run: | + docker build -t integr8scode-backend:latest --build-context base=docker-image://integr8scode-base:latest -f ./backend/Dockerfile ./backend + docker build -t integr8scode-coordinator:latest -f backend/workers/Dockerfile.coordinator --build-context base=docker-image://integr8scode-base:latest ./backend + docker build -t integr8scode-k8s-worker:latest -f backend/workers/Dockerfile.k8s_worker --build-context base=docker-image://integr8scode-base:latest ./backend + docker build -t integr8scode-pod-monitor:latest -f backend/workers/Dockerfile.pod_monitor --build-context base=docker-image://integr8scode-base:latest ./backend + docker build -t integr8scode-result-processor:latest -f backend/workers/Dockerfile.result_processor --build-context base=docker-image://integr8scode-base:latest ./backend + docker build -t integr8scode-saga-orchestrator:latest -f backend/workers/Dockerfile.saga_orchestrator --build-context base=docker-image://integr8scode-base:latest ./backend + + - name: Build cert-generator image + uses: docker/build-push-action@v6 + with: + context: ./cert-generator + file: ./cert-generator/Dockerfile + load: true + tags: integr8scode-cert-generator:latest + cache-from: type=gha,scope=cert-generator + cache-to: type=gha,mode=max,scope=cert-generator + + - name: Build frontend image + uses: docker/build-push-action@v6 + with: + context: ./frontend + file: ./frontend/Dockerfile + load: true + tags: integr8scode-frontend:latest + cache-from: type=gha,scope=frontend + cache-to: type=gha,mode=max,scope=frontend + + - name: Save all images + run: | + docker save \ + integr8scode-backend:latest \ + integr8scode-coordinator:latest \ + integr8scode-k8s-worker:latest \ + integr8scode-pod-monitor:latest \ + integr8scode-result-processor:latest \ + integr8scode-saga-orchestrator:latest \ + integr8scode-cert-generator:latest \ + integr8scode-frontend:latest \ + | zstd -T0 -3 > /tmp/all-images.tar.zst + + - name: Upload images artifact + uses: actions/upload-artifact@v6 + with: + name: docker-images + path: /tmp/all-images.tar.zst + retention-days: 1 + + # Three parallel test jobs + backend-integration: + name: Backend Integration Tests + needs: [build-images] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - name: Cache and load Docker images + uses: ./.github/actions/docker-cache + with: + images: ${{ env.MONGO_IMAGE }} ${{ env.REDIS_IMAGE }} ${{ env.KAFKA_IMAGE }} ${{ env.ZOOKEEPER_IMAGE }} ${{ env.SCHEMA_REGISTRY_IMAGE }} + + - name: Download built images + uses: actions/download-artifact@v7 + with: + name: docker-images + path: /tmp + + - name: Load built images + run: zstd -d -c /tmp/all-images.tar.zst | docker load + + - name: Setup k3s + uses: ./.github/actions/k3s-setup + + - name: Use test environment config + run: cp backend/.env.test backend/.env + + - name: Start stack + run: ./deploy.sh dev --wait + + - name: Run integration tests + timeout-minutes: 10 + run: | + docker compose exec -T -e TEST_RUN_ID=integration backend \ + uv run pytest tests/integration -v -rs \ + --durations=0 \ + --cov=app \ + --cov-report=xml:coverage-integration.xml \ + --cov-report=term + + - name: Copy coverage + if: always() + run: docker compose cp backend:/app/coverage-integration.xml backend/coverage-integration.xml || true + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v5 + if: always() + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: backend/coverage-integration.xml + flags: backend-integration + name: backend-integration-coverage + fail_ci_if_error: false + + - name: Collect logs on failure + if: failure() + run: | + mkdir -p logs + docker compose logs > logs/docker-compose.log 2>&1 + docker compose logs backend > logs/backend.log 2>&1 + docker compose logs kafka > logs/kafka.log 2>&1 + + - name: Upload logs + if: failure() + uses: actions/upload-artifact@v6 + with: + name: backend-integration-logs + path: logs/ + + backend-e2e: + name: Backend E2E Tests + needs: [build-images] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - name: Cache and load Docker images + uses: ./.github/actions/docker-cache + with: + images: ${{ env.MONGO_IMAGE }} ${{ env.REDIS_IMAGE }} ${{ env.KAFKA_IMAGE }} ${{ env.ZOOKEEPER_IMAGE }} ${{ env.SCHEMA_REGISTRY_IMAGE }} + + - name: Download built images + uses: actions/download-artifact@v7 + with: + name: docker-images + path: /tmp + + - name: Load built images + run: zstd -d -c /tmp/all-images.tar.zst | docker load + + - name: Setup k3s + uses: ./.github/actions/k3s-setup + + - name: Use test environment config + run: cp backend/.env.test backend/.env + + - name: Start stack + run: ./deploy.sh dev --wait + + - name: Seed test users + run: docker compose exec -T backend uv run python scripts/seed_users.py + + - name: Run E2E tests + timeout-minutes: 10 + run: | + docker compose exec -T -e TEST_RUN_ID=e2e backend \ + uv run pytest tests/e2e -v -rs \ + --durations=0 \ + --cov=app \ + --cov-report=xml:coverage-e2e.xml \ + --cov-report=term + + - name: Copy coverage + if: always() + run: docker compose cp backend:/app/coverage-e2e.xml backend/coverage-e2e.xml || true + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v5 + if: always() + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: backend/coverage-e2e.xml + flags: backend-e2e + name: backend-e2e-coverage + fail_ci_if_error: false + + - name: Collect logs on failure + if: failure() + run: | + mkdir -p logs + docker compose logs > logs/docker-compose.log 2>&1 + docker compose logs backend > logs/backend.log 2>&1 + docker compose logs kafka > logs/kafka.log 2>&1 + docker compose logs coordinator > logs/coordinator.log 2>&1 || true + docker compose logs k8s-worker > logs/k8s-worker.log 2>&1 || true + kubectl get events --sort-by='.metadata.creationTimestamp' -A > logs/k8s-events.log 2>&1 || true + + - name: Upload logs + if: failure() + uses: actions/upload-artifact@v6 + with: + name: backend-e2e-logs + path: logs/ + + frontend-e2e: + name: Frontend E2E Tests + needs: [build-images] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - name: Setup Node.js + uses: actions/setup-node@v6 + with: + node-version: '22' + cache: 'npm' + cache-dependency-path: frontend/package-lock.json + + - name: Install frontend dependencies + working-directory: frontend + run: npm ci + + - name: Cache Playwright browsers + uses: actions/cache@v4 + id: playwright-cache + with: + path: ~/.cache/ms-playwright + key: playwright-${{ runner.os }}-${{ hashFiles('frontend/package-lock.json') }} + + - name: Install Playwright system dependencies + working-directory: frontend + run: npx playwright install-deps chromium + + - name: Install Playwright browsers + if: steps.playwright-cache.outputs.cache-hit != 'true' + working-directory: frontend + run: npx playwright install chromium + + - name: Cache and load Docker images + uses: ./.github/actions/docker-cache + with: + images: ${{ env.MONGO_IMAGE }} ${{ env.REDIS_IMAGE }} ${{ env.KAFKA_IMAGE }} ${{ env.ZOOKEEPER_IMAGE }} ${{ env.SCHEMA_REGISTRY_IMAGE }} + + - name: Download built images + uses: actions/download-artifact@v7 + with: + name: docker-images + path: /tmp + + - name: Load built images + run: zstd -d -c /tmp/all-images.tar.zst | docker load + + - name: Setup k3s + uses: ./.github/actions/k3s-setup + + - name: Use test environment config + run: cp backend/.env.test backend/.env + + - name: Start stack + run: ./deploy.sh dev --wait + + - name: Seed test users + run: docker compose exec -T backend uv run python scripts/seed_users.py + + - name: Run Playwright tests + timeout-minutes: 10 + working-directory: frontend + run: CI=true npx playwright test + + - name: Upload Playwright report + uses: actions/upload-artifact@v6 + if: always() + with: + name: playwright-report + path: frontend/playwright-report/ + + - name: Collect logs on failure + if: failure() + run: | + mkdir -p logs + docker compose logs > logs/docker-compose.log 2>&1 + docker compose logs backend > logs/backend.log 2>&1 + docker compose logs frontend > logs/frontend.log 2>&1 + + - name: Upload logs + if: failure() + uses: actions/upload-artifact@v6 + with: + name: frontend-e2e-logs + path: logs/ diff --git a/README.md b/README.md index a7327140..cb6d99ea 100644 --- a/README.md +++ b/README.md @@ -15,11 +15,14 @@ Docker Scan Status - - Backend Tests + + Backend Tests + + + Frontend Tests - Frontend Tests + Frontend Lint

diff --git a/backend/.env b/backend/.env index aa213436..01b22742 100644 --- a/backend/.env +++ b/backend/.env @@ -13,8 +13,8 @@ K8S_POD_MEMORY_LIMIT=128Mi K8S_POD_CPU_REQUEST=200m K8S_POD_MEMORY_REQUEST=128Mi K8S_POD_EXECUTION_TIMEOUT=5 +K8S_NAMESPACE=integr8scode RATE_LIMITS=100/minute -RATE_LIMIT_ENABLED=false # Event-Driven Design Configuration KAFKA_BOOTSTRAP_SERVERS=kafka:29092 @@ -81,3 +81,6 @@ SERVER_HOST=127.0.0.1 # Security BCRYPT_ROUNDS=12 + +# Redis Configuration +REDIS_MAX_CONNECTIONS=200 diff --git a/backend/.env.test b/backend/.env.test index 7d175192..68b4d5d8 100644 --- a/backend/.env.test +++ b/backend/.env.test @@ -1,51 +1,84 @@ -# Test environment configuration PROJECT_NAME=integr8scode -DATABASE_NAME=integr8scode_test -API_V1_STR=/api/v1 -SECRET_KEY=test-secret-key-for-testing-only-32chars!! -TESTING=true - -# MongoDB - use localhost for tests -MONGODB_URL=mongodb://root:rootpassword@localhost:27017/?authSource=admin -MONGO_ROOT_USER=root -MONGO_ROOT_PASSWORD=rootpassword - -# Redis - use localhost for tests -REDIS_HOST=localhost -REDIS_PORT=6379 -REDIS_DB=0 -REDIS_PASSWORD= -REDIS_SSL=false -REDIS_MAX_CONNECTIONS=50 -REDIS_DECODE_RESPONSES=true - -# Kafka - use localhost for tests -KAFKA_BOOTSTRAP_SERVERS=localhost:9092 -KAFKA_TOPIC_PREFIX=test. -SCHEMA_SUBJECT_PREFIX=test. -SCHEMA_REGISTRY_URL=http://localhost:8081 - -# Reduce consumer pool and timeouts for faster test startup/teardown -# https://github.com/aio-libs/aiokafka/issues/773 -SSE_CONSUMER_POOL_SIZE=1 -KAFKA_SESSION_TIMEOUT_MS=6000 -KAFKA_HEARTBEAT_INTERVAL_MS=2000 -KAFKA_REQUEST_TIMEOUT_MS=5000 +DATABASE_NAME=integr8scode_db +SECRET_KEY=${SECRET_KEY:-uS5xBF-OKXHV-1vqU4ASLwyPcKpSdUTLqGHPYs3y-Yc} +ALGORITHM=HS256 +ACCESS_TOKEN_EXPIRE_MINUTES=1440 +MONGO_ROOT_USER="${MONGO_ROOT_USER:-root}" +MONGO_ROOT_PASSWORD="${MONGO_ROOT_PASSWORD:-rootpassword}" +MONGODB_URL="mongodb://${MONGO_ROOT_USER}:${MONGO_ROOT_PASSWORD}@mongo:27017/integr8scode?authSource=admin" +KUBERNETES_CONFIG_PATH=/app/kubeconfig.yaml +KUBERNETES_CA_CERTIFICATE_PATH=/app/certs/k8s-ca.pem +K8S_POD_CPU_LIMIT=1000m +K8S_POD_MEMORY_LIMIT=128Mi +K8S_POD_CPU_REQUEST=200m +K8S_POD_MEMORY_REQUEST=128Mi +K8S_POD_EXECUTION_TIMEOUT=5 +K8S_NAMESPACE=integr8scode +RATE_LIMITS=99999/second +RATE_LIMIT_ENABLED=false -# Security -SECURE_COOKIES=true -BCRYPT_ROUNDS=4 +# Event-Driven Design Configuration +KAFKA_BOOTSTRAP_SERVERS=kafka:29092 +SCHEMA_REGISTRY_URL=http://schema-registry:8081 +ENABLE_EVENT_STREAMING=true +EVENT_RETENTION_DAYS=30 +KAFKA_CONSUMER_GROUP_ID=integr8scode-backend +KAFKA_AUTO_OFFSET_RESET=earliest +KAFKA_ENABLE_AUTO_COMMIT=true +KAFKA_SESSION_TIMEOUT_MS=10000 +KAFKA_HEARTBEAT_INTERVAL_MS=3000 +KAFKA_REQUEST_TIMEOUT_MS=15000 +KAFKA_MAX_POLL_RECORDS=500 + +# WebSocket Configuration +WEBSOCKET_PING_INTERVAL=30 +WEBSOCKET_PING_TIMEOUT=10 + +# Logging Configuration +LOG_LEVEL=WARNING +WEBSOCKET_MAX_CONNECTIONS_PER_USER=5 +WEBSOCKET_STALE_CONNECTION_TIMEOUT=300 + +# Distributed Tracing +ENABLE_TRACING=true +JAEGER_AGENT_HOST=jaeger +JAEGER_AGENT_PORT=6831 +TRACING_SERVICE_NAME=integr8scode-backend +TRACING_SERVICE_VERSION=1.0.0 +TRACING_SAMPLING_RATE=1.0 + +# Dead Letter Queue Configuration +DLQ_RETRY_MAX_ATTEMPTS=5 +DLQ_RETRY_BASE_DELAY_SECONDS=60.0 +DLQ_RETRY_MAX_DELAY_SECONDS=3600.0 +DLQ_RETENTION_DAYS=7 +DLQ_WARNING_THRESHOLD=100 +DLQ_CRITICAL_THRESHOLD=1000 -# Features -RATE_LIMIT_ENABLED=true -ENABLE_TRACING=false +# App URL for notification links +APP_URL=https://localhost -# OpenTelemetry - disabled for tests -# Empty endpoint prevents OTLP exporter creation in setup_metrics() -# OTEL_SDK_DISABLED=true (set via pytest-env) provides additional safety -OTEL_EXPORTER_OTLP_ENDPOINT= +# Service Configuration +SERVICE_NAME=integr8scode-backend +SERVICE_VERSION=1.0.0 -# Development -DEVELOPMENT_MODE=false -LOG_LEVEL=INFO -ENVIRONMENT=test +# OpenTelemetry Configuration +OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4317 +OTEL_SERVICE_NAME=integr8scode-backend +OTEL_SERVICE_VERSION=1.0.0 +OTEL_RESOURCE_ATTRIBUTES=environment=test,team=backend + +# Web server concurrency settings (Gunicorn + Uvicorn workers) +# Tune these for your machine. Defaults are safe for dev. +WEB_CONCURRENCY=1 +WEB_THREADS=4 +WEB_TIMEOUT=60 +WEB_BACKLOG=2048 + +# Local development server bind address +# When running uvicorn locally (outside Docker), bind to IPv4 loopback to avoid +# IPv6-only localhost resolution on some Linux distros. +SERVER_HOST=127.0.0.1 + +# Security +BCRYPT_ROUNDS=4 diff --git a/backend/app/core/container.py b/backend/app/core/container.py index 97e0c48f..b67f133a 100644 --- a/backend/app/core/container.py +++ b/backend/app/core/container.py @@ -8,7 +8,6 @@ CoordinatorProvider, CoreServicesProvider, DatabaseProvider, - DLQProcessorProvider, EventProvider, EventReplayProvider, K8sWorkerProvider, @@ -119,6 +118,7 @@ def create_pod_monitor_container(settings: Settings) -> AsyncContainer: SettingsProvider(), LoggingProvider(), DatabaseProvider(), + RedisProvider(), CoreServicesProvider(), MetricsProvider(), RepositoryProvider(), @@ -154,6 +154,7 @@ def create_event_replay_container(settings: Settings) -> AsyncContainer: SettingsProvider(), LoggingProvider(), DatabaseProvider(), + RedisProvider(), CoreServicesProvider(), MetricsProvider(), RepositoryProvider(), @@ -170,10 +171,11 @@ def create_dlq_processor_container(settings: Settings) -> AsyncContainer: SettingsProvider(), LoggingProvider(), DatabaseProvider(), + RedisProvider(), CoreServicesProvider(), MetricsProvider(), RepositoryProvider(), + MessagingProvider(), EventProvider(), - DLQProcessorProvider(), context={Settings: settings}, ) diff --git a/backend/app/core/dishka_lifespan.py b/backend/app/core/dishka_lifespan.py index d419bf54..3a91ee1d 100644 --- a/backend/app/core/dishka_lifespan.py +++ b/backend/app/core/dishka_lifespan.py @@ -1,3 +1,4 @@ +import asyncio import logging from contextlib import AsyncExitStack, asynccontextmanager from typing import AsyncGenerator @@ -8,7 +9,8 @@ from fastapi import FastAPI from app.core.database_context import Database -from app.core.startup import initialize_metrics_context, initialize_rate_limits +from app.core.metrics import RateLimitMetrics +from app.core.startup import initialize_rate_limits from app.core.tracing import init_tracing from app.db.docs import ALL_DOCUMENTS from app.events.event_store_consumer import EventStoreConsumer @@ -71,35 +73,38 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: extra={"testing": settings.TESTING, "enable_tracing": settings.ENABLE_TRACING}, ) - # Initialize schema registry once at startup - schema_registry = await container.get(SchemaRegistryManager) - await initialize_event_schemas(schema_registry) - - # Initialize Beanie ODM with database from DI container - database = await container.get(Database) - await init_beanie(database=database, document_models=ALL_DOCUMENTS) - logger.info(f"Beanie ODM initialized with {len(ALL_DOCUMENTS)} document models") - - # Initialize metrics context with instances from DI container - # This must happen early so services can access metrics via contextvars - await initialize_metrics_context(container, logger) - logger.info("Metrics context initialized with contextvars") - - # Initialize default rate limits in Redis - redis_client = await container.get(redis.Redis) - await initialize_rate_limits(redis_client, settings, logger) - logger.info("Rate limits initialized in Redis") - - # Rate limit middleware added during app creation; service resolved lazily at runtime + # Phase 1: Resolve all DI dependencies in parallel + ( + schema_registry, + database, + redis_client, + rate_limit_metrics, + sse_bridge, + event_store_consumer, + ) = await asyncio.gather( + container.get(SchemaRegistryManager), + container.get(Database), + container.get(redis.Redis), + container.get(RateLimitMetrics), + container.get(SSEKafkaRedisBridge), + container.get(EventStoreConsumer), + ) - # Acquire long-lived services and manage lifecycle via AsyncExitStack - sse_bridge = await container.get(SSEKafkaRedisBridge) - event_store_consumer = await container.get(EventStoreConsumer) + # Phase 2: Initialize infrastructure in parallel (independent subsystems) + await asyncio.gather( + initialize_event_schemas(schema_registry), + init_beanie(database=database, document_models=ALL_DOCUMENTS), + initialize_rate_limits(redis_client, settings, logger, rate_limit_metrics), + ) + logger.info("Infrastructure initialized (schemas, beanie, rate limits)") + # Phase 3: Start Kafka consumers in parallel async with AsyncExitStack() as stack: - await stack.enter_async_context(sse_bridge) - logger.info("SSE Kafka→Redis bridge started with consumer pool") - await stack.enter_async_context(event_store_consumer) - logger.info("EventStoreConsumer started - events will be persisted to MongoDB") - logger.info("All services initialized by DI and managed by AsyncExitStack") + stack.push_async_callback(sse_bridge.aclose) + stack.push_async_callback(event_store_consumer.aclose) + await asyncio.gather( + sse_bridge.__aenter__(), + event_store_consumer.__aenter__(), + ) + logger.info("SSE bridge and EventStoreConsumer started") yield diff --git a/backend/app/core/metrics/context.py b/backend/app/core/metrics/context.py deleted file mode 100644 index dd87c3b2..00000000 --- a/backend/app/core/metrics/context.py +++ /dev/null @@ -1,285 +0,0 @@ -import contextvars -import logging -from typing import Any, Generic, Optional, Type, TypeVar - -from app.core.metrics import ( - ConnectionMetrics, - CoordinatorMetrics, - DatabaseMetrics, - DLQMetrics, - EventMetrics, - ExecutionMetrics, - HealthMetrics, - KubernetesMetrics, - NotificationMetrics, - RateLimitMetrics, - ReplayMetrics, - SecurityMetrics, -) - -# Type variable for generic metrics -T = TypeVar("T") - - -class MetricsContextVar(Generic[T]): - """ - A wrapper around contextvars.ContextVar for type-safe metrics access. - - This class ensures that each metric type has its own context variable - and provides a clean interface for getting and setting metrics. - """ - - def __init__(self, name: str, metric_class: Type[T], logger: logging.Logger) -> None: - """ - Initialize a metrics context variable. - - Args: - name: Name for the context variable (for debugging) - metric_class: The class of the metric this context holds - logger: Logger instance for logging - """ - self._context_var: contextvars.ContextVar[Optional[T]] = contextvars.ContextVar(f"metrics_{name}", default=None) - self._metric_class = metric_class - self._name = name - self.logger = logger - - def get(self) -> T: - """ - Get the metric from context. - - Returns: - The metric instance for the current context - - Raises: - RuntimeError: If metrics not initialized via DI - """ - metric = self._context_var.get() - if metric is None: - raise RuntimeError( - f"{self._name} metrics not initialized. " - "Ensure MetricsContext.initialize_all() is called during app startup." - ) - return metric - - def set(self, metric: T) -> contextvars.Token[Optional[T]]: - """ - Set the metric in the current context. - - Args: - metric: The metric instance to set - - Returns: - A token that can be used to reset the context - """ - return self._context_var.set(metric) - - def reset(self) -> None: - """Reset the metric to None in the current context.""" - self._context_var.set(None) - - def is_set(self) -> bool: - """Check if a metric is set in the current context.""" - return self._context_var.get() is not None - - -# Module-level logger for lazy initialization -_module_logger: Optional[logging.Logger] = None - - -def _get_module_logger() -> logging.Logger: - """Get or create module logger for lazy initialization.""" - global _module_logger - if _module_logger is None: - _module_logger = logging.getLogger(__name__) - return _module_logger - - -# Create module-level context variables for each metric type -# These are singletons that live for the lifetime of the application -_connection_ctx = MetricsContextVar("connection", ConnectionMetrics, _get_module_logger()) -_coordinator_ctx = MetricsContextVar("coordinator", CoordinatorMetrics, _get_module_logger()) -_database_ctx = MetricsContextVar("database", DatabaseMetrics, _get_module_logger()) -_dlq_ctx = MetricsContextVar("dlq", DLQMetrics, _get_module_logger()) -_event_ctx = MetricsContextVar("event", EventMetrics, _get_module_logger()) -_execution_ctx = MetricsContextVar("execution", ExecutionMetrics, _get_module_logger()) -_health_ctx = MetricsContextVar("health", HealthMetrics, _get_module_logger()) -_kubernetes_ctx = MetricsContextVar("kubernetes", KubernetesMetrics, _get_module_logger()) -_notification_ctx = MetricsContextVar("notification", NotificationMetrics, _get_module_logger()) -_rate_limit_ctx = MetricsContextVar("rate_limit", RateLimitMetrics, _get_module_logger()) -_replay_ctx = MetricsContextVar("replay", ReplayMetrics, _get_module_logger()) -_security_ctx = MetricsContextVar("security", SecurityMetrics, _get_module_logger()) - - -class MetricsContext: - """ - Central manager for all metrics contexts. - - This class provides a unified interface for managing all metric types - in the application. It handles initialization at startup and provides - access methods for each metric type. - """ - - @classmethod - def initialize_all(cls, logger: logging.Logger, **metrics: Any) -> None: - """ - Initialize all metrics contexts at application startup. - - This should be called once during application initialization, - typically in the startup sequence after dependency injection - has created the metric instances. - - Args: - **metrics: Keyword arguments mapping metric names to instances - e.g., event=EventMetrics(), connection=ConnectionMetrics() - """ - for name, metric_instance in metrics.items(): - if name == "connection": - _connection_ctx.set(metric_instance) - elif name == "coordinator": - _coordinator_ctx.set(metric_instance) - elif name == "database": - _database_ctx.set(metric_instance) - elif name == "dlq": - _dlq_ctx.set(metric_instance) - elif name == "event": - _event_ctx.set(metric_instance) - elif name == "execution": - _execution_ctx.set(metric_instance) - elif name == "health": - _health_ctx.set(metric_instance) - elif name == "kubernetes": - _kubernetes_ctx.set(metric_instance) - elif name == "notification": - _notification_ctx.set(metric_instance) - elif name == "rate_limit": - _rate_limit_ctx.set(metric_instance) - elif name == "replay": - _replay_ctx.set(metric_instance) - elif name == "security": - _security_ctx.set(metric_instance) - else: - logger.warning(f"Unknown metric type: {name}") - continue - logger.info(f"Initialized {name} metrics in context") - - @classmethod - def reset_all(cls, logger: logging.Logger) -> None: - """ - Reset all metrics contexts. - - This is primarily useful for testing to ensure a clean state - between test cases. - """ - _connection_ctx.reset() - _coordinator_ctx.reset() - _database_ctx.reset() - _dlq_ctx.reset() - _event_ctx.reset() - _execution_ctx.reset() - _health_ctx.reset() - _kubernetes_ctx.reset() - _notification_ctx.reset() - _rate_limit_ctx.reset() - _replay_ctx.reset() - _security_ctx.reset() - logger.debug("Reset all metrics contexts") - - @classmethod - def get_connection_metrics(cls) -> ConnectionMetrics: - return _connection_ctx.get() - - @classmethod - def get_coordinator_metrics(cls) -> CoordinatorMetrics: - return _coordinator_ctx.get() - - @classmethod - def get_database_metrics(cls) -> DatabaseMetrics: - return _database_ctx.get() - - @classmethod - def get_dlq_metrics(cls) -> DLQMetrics: - return _dlq_ctx.get() - - @classmethod - def get_event_metrics(cls) -> EventMetrics: - return _event_ctx.get() - - @classmethod - def get_execution_metrics(cls) -> ExecutionMetrics: - return _execution_ctx.get() - - @classmethod - def get_health_metrics(cls) -> HealthMetrics: - return _health_ctx.get() - - @classmethod - def get_kubernetes_metrics(cls) -> KubernetesMetrics: - return _kubernetes_ctx.get() - - @classmethod - def get_notification_metrics(cls) -> NotificationMetrics: - return _notification_ctx.get() - - @classmethod - def get_rate_limit_metrics(cls) -> RateLimitMetrics: - return _rate_limit_ctx.get() - - @classmethod - def get_replay_metrics(cls) -> ReplayMetrics: - return _replay_ctx.get() - - @classmethod - def get_security_metrics(cls) -> SecurityMetrics: - return _security_ctx.get() - - -# Convenience functions for direct access with proper type annotations -# Import types with forward references to avoid circular imports - - -def get_connection_metrics() -> ConnectionMetrics: - return MetricsContext.get_connection_metrics() - - -def get_coordinator_metrics() -> CoordinatorMetrics: - return MetricsContext.get_coordinator_metrics() - - -def get_database_metrics() -> DatabaseMetrics: - return MetricsContext.get_database_metrics() - - -def get_dlq_metrics() -> DLQMetrics: - return MetricsContext.get_dlq_metrics() - - -def get_event_metrics() -> EventMetrics: - return MetricsContext.get_event_metrics() - - -def get_execution_metrics() -> ExecutionMetrics: - return MetricsContext.get_execution_metrics() - - -def get_health_metrics() -> HealthMetrics: - return MetricsContext.get_health_metrics() - - -def get_kubernetes_metrics() -> KubernetesMetrics: - return MetricsContext.get_kubernetes_metrics() - - -def get_notification_metrics() -> NotificationMetrics: - return MetricsContext.get_notification_metrics() - - -def get_rate_limit_metrics() -> RateLimitMetrics: - return MetricsContext.get_rate_limit_metrics() - - -def get_replay_metrics() -> ReplayMetrics: - return MetricsContext.get_replay_metrics() - - -def get_security_metrics() -> SecurityMetrics: - return MetricsContext.get_security_metrics() diff --git a/backend/app/core/metrics/events.py b/backend/app/core/metrics/events.py index f74e94b6..bd417078 100644 --- a/backend/app/core/metrics/events.py +++ b/backend/app/core/metrics/events.py @@ -5,17 +5,16 @@ class EventMetrics(BaseMetrics): """Metrics for event processing and Kafka. This class tracks metrics related to event processing, event buffers, - and Kafka message production/consumption. It's now accessed through - the contextvars-based MetricsContext system rather than a singleton. + and Kafka message production/consumption. Metrics are provided via + dependency injection (DI) through the MetricsProvider. - Usage: - from app.core.metrics.context import get_event_metrics + Usage (via DI): + class MyService: + def __init__(self, event_metrics: EventMetrics): + self.metrics = event_metrics - metrics = get_event_metrics() - metrics.record_event_published("execution.requested") - - The metrics instance is managed by the MetricsContext and is available - throughout the application without needing to pass it through layers. + def my_method(self): + self.metrics.record_event_published("execution.requested") """ def _create_instruments(self) -> None: diff --git a/backend/app/core/middlewares/rate_limit.py b/backend/app/core/middlewares/rate_limit.py index a08a708e..56b2da62 100644 --- a/backend/app/core/middlewares/rate_limit.py +++ b/backend/app/core/middlewares/rate_limit.py @@ -46,8 +46,6 @@ def __init__( self.app = app self.rate_limit_service = rate_limit_service self.settings = settings - # Default to enabled unless settings says otherwise - self.enabled = bool(settings.RATE_LIMIT_ENABLED) if settings else True async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: if scope["type"] != "http": @@ -56,7 +54,12 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: path = scope["path"] - if not self.enabled or path in self.EXCLUDED_PATHS: + if path in self.EXCLUDED_PATHS: + await self.app(scope, receive, send) + return + + # Check if rate limiting is globally disabled via settings + if self.settings is not None and not self.settings.RATE_LIMIT_ENABLED: await self.app(scope, receive, send) return diff --git a/backend/app/core/providers.py b/backend/app/core/providers.py index 3af1e28e..6ce30a01 100644 --- a/backend/app/core/providers.py +++ b/backend/app/core/providers.py @@ -9,19 +9,19 @@ from app.core.k8s_clients import K8sClients, close_k8s_clients, create_k8s_clients from app.core.logging import setup_logger from app.core.metrics import ( + ConnectionMetrics, CoordinatorMetrics, DatabaseMetrics, DLQMetrics, + EventMetrics, ExecutionMetrics, HealthMetrics, KubernetesMetrics, NotificationMetrics, + RateLimitMetrics, ReplayMetrics, SecurityMetrics, ) -from app.core.metrics.connections import ConnectionMetrics -from app.core.metrics.events import EventMetrics -from app.core.metrics.rate_limit import RateLimitMetrics from app.core.security import SecurityService from app.core.tracing import TracerManager from app.db.repositories import ( @@ -41,7 +41,6 @@ from app.db.repositories.resource_allocation_repository import ResourceAllocationRepository from app.db.repositories.user_settings_repository import UserSettingsRepository from app.dlq.manager import DLQManager, create_dlq_manager -from app.domain.enums.kafka import KafkaTopic from app.domain.saga.models import SagaConfig from app.events.core import UnifiedProducer from app.events.event_store import EventStore, create_event_store @@ -120,7 +119,7 @@ async def get_redis_client(self, settings: Settings, logger: logging.Logger) -> @provide def get_rate_limit_service( - self, redis_client: redis.Redis, settings: Settings, rate_limit_metrics: RateLimitMetrics + self, redis_client: redis.Redis, settings: Settings, rate_limit_metrics: RateLimitMetrics ) -> RateLimitService: return RateLimitService(redis_client, settings, rate_limit_metrics) @@ -158,16 +157,21 @@ class MessagingProvider(Provider): @provide async def get_kafka_producer( - self, settings: Settings, schema_registry: SchemaRegistryManager, logger: logging.Logger + self, settings: Settings, schema_registry: SchemaRegistryManager, logger: logging.Logger, + event_metrics: EventMetrics ) -> AsyncIterator[UnifiedProducer]: - async with UnifiedProducer(schema_registry, logger, settings) as producer: + async with UnifiedProducer(schema_registry, logger, settings, event_metrics) as producer: yield producer @provide async def get_dlq_manager( - self, settings: Settings, schema_registry: SchemaRegistryManager, logger: logging.Logger + self, + settings: Settings, + schema_registry: SchemaRegistryManager, + logger: logging.Logger, + dlq_metrics: DLQMetrics, ) -> AsyncIterator[DLQManager]: - async with create_dlq_manager(settings, schema_registry, logger) as manager: + async with create_dlq_manager(settings, schema_registry, logger, dlq_metrics) as manager: yield manager @provide @@ -176,9 +180,11 @@ def get_idempotency_repository(self, redis_client: redis.Redis) -> RedisIdempote @provide async def get_idempotency_manager( - self, repo: RedisIdempotencyRepository, logger: logging.Logger + self, repo: RedisIdempotencyRepository, logger: logging.Logger, database_metrics: DatabaseMetrics ) -> AsyncIterator[IdempotencyManager]: - manager = create_idempotency_manager(repository=repo, config=IdempotencyConfig(), logger=logger) + manager = create_idempotency_manager( + repository=repo, config=IdempotencyConfig(), logger=logger, database_metrics=database_metrics + ) await manager.initialize() try: yield manager @@ -194,33 +200,40 @@ def get_schema_registry(self, settings: Settings, logger: logging.Logger) -> Sch return SchemaRegistryManager(settings, logger) @provide - async def get_event_store(self, schema_registry: SchemaRegistryManager, logger: logging.Logger) -> EventStore: - store = create_event_store(schema_registry=schema_registry, logger=logger, ttl_days=90) - return store + async def get_event_store( + self, schema_registry: SchemaRegistryManager, logger: logging.Logger, event_metrics: EventMetrics + ) -> EventStore: + return create_event_store( + schema_registry=schema_registry, logger=logger, event_metrics=event_metrics, ttl_days=90 + ) @provide async def get_event_store_consumer( - self, - event_store: EventStore, - schema_registry: SchemaRegistryManager, - settings: Settings, - kafka_producer: UnifiedProducer, - logger: logging.Logger, + self, + event_store: EventStore, + schema_registry: SchemaRegistryManager, + settings: Settings, + kafka_producer: UnifiedProducer, + logger: logging.Logger, + event_metrics: EventMetrics, ) -> AsyncIterator[EventStoreConsumer]: topics = get_all_topics() async with create_event_store_consumer( - event_store=event_store, - topics=list(topics), - schema_registry_manager=schema_registry, - settings=settings, - producer=kafka_producer, - logger=logger, + event_store=event_store, + topics=list(topics), + schema_registry_manager=schema_registry, + settings=settings, + producer=kafka_producer, + logger=logger, + event_metrics=event_metrics, ) as consumer: yield consumer @provide - async def get_event_bus_manager(self, settings: Settings, logger: logging.Logger) -> AsyncIterator[EventBusManager]: - manager = EventBusManager(settings, logger) + async def get_event_bus_manager( + self, settings: Settings, logger: logging.Logger, connection_metrics: ConnectionMetrics + ) -> AsyncIterator[EventBusManager]: + manager = EventBusManager(settings, logger, connection_metrics) try: yield manager finally: @@ -232,7 +245,7 @@ class KubernetesProvider(Provider): @provide async def get_k8s_clients(self, settings: Settings, logger: logging.Logger) -> AsyncIterator[K8sClients]: - clients = create_k8s_clients(logger) + clients = create_k8s_clients(logger, kubeconfig_path=settings.KUBERNETES_CONFIG_PATH) try: yield clients finally: @@ -240,7 +253,7 @@ async def get_k8s_clients(self, settings: Settings, logger: logging.Logger) -> A class MetricsProvider(Provider): - """Provides all metrics instances.""" + """Provides all metrics instances via DI (no contextvars needed).""" scope = Scope.APP @@ -367,35 +380,38 @@ async def get_sse_redis_bus(self, redis_client: redis.Redis, logger: logging.Log @provide async def get_sse_kafka_redis_bridge( - self, - schema_registry: SchemaRegistryManager, - settings: Settings, - event_metrics: EventMetrics, - sse_redis_bus: SSERedisBus, - logger: logging.Logger, + self, + schema_registry: SchemaRegistryManager, + settings: Settings, + event_metrics: EventMetrics, + sse_redis_bus: SSERedisBus, + logger: logging.Logger, ) -> AsyncIterator[SSEKafkaRedisBridge]: async with create_sse_kafka_redis_bridge( - schema_registry=schema_registry, - settings=settings, - event_metrics=event_metrics, - sse_bus=sse_redis_bus, - logger=logger, + schema_registry=schema_registry, + settings=settings, + event_metrics=event_metrics, + sse_bus=sse_redis_bus, + logger=logger, ) as bridge: yield bridge @provide(scope=Scope.REQUEST) - def get_sse_shutdown_manager(self, logger: logging.Logger) -> SSEShutdownManager: - return create_sse_shutdown_manager(logger=logger) + def get_sse_shutdown_manager( + self, logger: logging.Logger, connection_metrics: ConnectionMetrics + ) -> SSEShutdownManager: + return create_sse_shutdown_manager(logger=logger, connection_metrics=connection_metrics) @provide(scope=Scope.REQUEST) def get_sse_service( - self, - sse_repository: SSERepository, - router: SSEKafkaRedisBridge, - sse_redis_bus: SSERedisBus, - shutdown_manager: SSEShutdownManager, - settings: Settings, - logger: logging.Logger, + self, + sse_repository: SSERepository, + router: SSEKafkaRedisBridge, + sse_redis_bus: SSERedisBus, + shutdown_manager: SSEShutdownManager, + settings: Settings, + logger: logging.Logger, + connection_metrics: ConnectionMetrics, ) -> SSEService: shutdown_manager.set_router(router) return SSEService( @@ -405,6 +421,7 @@ def get_sse_service( shutdown_manager=shutdown_manager, settings=settings, logger=logger, + connection_metrics=connection_metrics, ) @@ -413,7 +430,7 @@ class AuthProvider(Provider): @provide def get_auth_service( - self, user_repository: UserRepository, security_service: SecurityService, logger: logging.Logger + self, user_repository: UserRepository, security_service: SecurityService, logger: logging.Logger ) -> AuthService: return AuthService(user_repository, security_service, logger) @@ -429,17 +446,19 @@ def get_event_service(self, event_repository: EventRepository) -> EventService: @provide def get_kafka_event_service( - self, - event_repository: EventRepository, - kafka_producer: UnifiedProducer, - settings: Settings, - logger: logging.Logger, + self, + event_repository: EventRepository, + kafka_producer: UnifiedProducer, + settings: Settings, + logger: logging.Logger, + event_metrics: EventMetrics, ) -> KafkaEventService: return KafkaEventService( event_repository=event_repository, kafka_producer=kafka_producer, settings=settings, logger=logger, + event_metrics=event_metrics, ) @@ -448,11 +467,11 @@ class UserServicesProvider(Provider): @provide async def get_user_settings_service( - self, - repository: UserSettingsRepository, - kafka_event_service: KafkaEventService, - event_bus_manager: EventBusManager, - logger: logging.Logger, + self, + repository: UserSettingsRepository, + kafka_event_service: KafkaEventService, + event_bus_manager: EventBusManager, + logger: logging.Logger, ) -> UserSettingsService: service = UserSettingsService(repository, kafka_event_service, logger) await service.initialize(event_bus_manager) @@ -464,31 +483,33 @@ class AdminServicesProvider(Provider): @provide(scope=Scope.REQUEST) def get_admin_events_service( - self, - admin_events_repository: AdminEventsRepository, - replay_service: ReplayService, - logger: logging.Logger, + self, + admin_events_repository: AdminEventsRepository, + replay_service: ReplayService, + logger: logging.Logger, ) -> AdminEventsService: return AdminEventsService(admin_events_repository, replay_service, logger) @provide def get_admin_settings_service( - self, - admin_settings_repository: AdminSettingsRepository, - logger: logging.Logger, + self, + admin_settings_repository: AdminSettingsRepository, + logger: logging.Logger, ) -> AdminSettingsService: return AdminSettingsService(admin_settings_repository, logger) @provide def get_notification_service( - self, - notification_repository: NotificationRepository, - kafka_event_service: KafkaEventService, - event_bus_manager: EventBusManager, - schema_registry: SchemaRegistryManager, - sse_redis_bus: SSERedisBus, - settings: Settings, - logger: logging.Logger, + self, + notification_repository: NotificationRepository, + kafka_event_service: KafkaEventService, + event_bus_manager: EventBusManager, + schema_registry: SchemaRegistryManager, + sse_redis_bus: SSERedisBus, + settings: Settings, + logger: logging.Logger, + notification_metrics: NotificationMetrics, + event_metrics: EventMetrics, ) -> NotificationService: service = NotificationService( notification_repository=notification_repository, @@ -498,15 +519,17 @@ def get_notification_service( sse_bus=sse_redis_bus, settings=settings, logger=logger, + notification_metrics=notification_metrics, + event_metrics=event_metrics, ) service.initialize() return service @provide def get_grafana_alert_processor( - self, - notification_service: NotificationService, - logger: logging.Logger, + self, + notification_service: NotificationService, + logger: logging.Logger, ) -> GrafanaAlertProcessor: return GrafanaAlertProcessor(notification_service, logger) @@ -526,48 +549,54 @@ def _create_default_saga_config() -> SagaConfig: # Standalone factory functions for lifecycle-managed services (eliminates duplication) async def _provide_saga_orchestrator( - saga_repository: SagaRepository, - kafka_producer: UnifiedProducer, - schema_registry: SchemaRegistryManager, - settings: Settings, - event_store: EventStore, - idempotency_manager: IdempotencyManager, - resource_allocation_repository: ResourceAllocationRepository, - logger: logging.Logger, + saga_repository: SagaRepository, + kafka_producer: UnifiedProducer, + schema_registry: SchemaRegistryManager, + settings: Settings, + event_store: EventStore, + idempotency_manager: IdempotencyManager, + resource_allocation_repository: ResourceAllocationRepository, + logger: logging.Logger, + event_metrics: EventMetrics, ) -> AsyncIterator[SagaOrchestrator]: """Shared factory for SagaOrchestrator with lifecycle management.""" async with create_saga_orchestrator( - saga_repository=saga_repository, - producer=kafka_producer, - schema_registry_manager=schema_registry, - settings=settings, - event_store=event_store, - idempotency_manager=idempotency_manager, - resource_allocation_repository=resource_allocation_repository, - config=_create_default_saga_config(), - logger=logger, + saga_repository=saga_repository, + producer=kafka_producer, + schema_registry_manager=schema_registry, + settings=settings, + event_store=event_store, + idempotency_manager=idempotency_manager, + resource_allocation_repository=resource_allocation_repository, + config=_create_default_saga_config(), + logger=logger, + event_metrics=event_metrics, ) as orchestrator: yield orchestrator async def _provide_execution_coordinator( - kafka_producer: UnifiedProducer, - schema_registry: SchemaRegistryManager, - settings: Settings, - event_store: EventStore, - execution_repository: ExecutionRepository, - idempotency_manager: IdempotencyManager, - logger: logging.Logger, + kafka_producer: UnifiedProducer, + schema_registry: SchemaRegistryManager, + settings: Settings, + event_store: EventStore, + execution_repository: ExecutionRepository, + idempotency_manager: IdempotencyManager, + logger: logging.Logger, + coordinator_metrics: CoordinatorMetrics, + event_metrics: EventMetrics, ) -> AsyncIterator[ExecutionCoordinator]: """Shared factory for ExecutionCoordinator with lifecycle management.""" async with ExecutionCoordinator( - producer=kafka_producer, - schema_registry_manager=schema_registry, - settings=settings, - event_store=event_store, - execution_repository=execution_repository, - idempotency_manager=idempotency_manager, - logger=logger, + producer=kafka_producer, + schema_registry_manager=schema_registry, + settings=settings, + event_store=event_store, + execution_repository=execution_repository, + idempotency_manager=idempotency_manager, + logger=logger, + coordinator_metrics=coordinator_metrics, + event_metrics=event_metrics, ) as coordinator: yield coordinator @@ -582,11 +611,11 @@ def __init__(self) -> None: @provide def get_saga_service( - self, - saga_repository: SagaRepository, - execution_repository: ExecutionRepository, - saga_orchestrator: SagaOrchestrator, - logger: logging.Logger, + self, + saga_repository: SagaRepository, + execution_repository: ExecutionRepository, + saga_orchestrator: SagaOrchestrator, + logger: logging.Logger, ) -> SagaService: return SagaService( saga_repo=saga_repository, @@ -597,12 +626,13 @@ def get_saga_service( @provide def get_execution_service( - self, - execution_repository: ExecutionRepository, - kafka_producer: UnifiedProducer, - event_store: EventStore, - settings: Settings, - logger: logging.Logger, + self, + execution_repository: ExecutionRepository, + kafka_producer: UnifiedProducer, + event_store: EventStore, + settings: Settings, + logger: logging.Logger, + execution_metrics: ExecutionMetrics, ) -> ExecutionService: return ExecutionService( execution_repo=execution_repository, @@ -610,22 +640,23 @@ def get_execution_service( event_store=event_store, settings=settings, logger=logger, + execution_metrics=execution_metrics, ) @provide def get_saved_script_service( - self, saved_script_repository: SavedScriptRepository, logger: logging.Logger + self, saved_script_repository: SavedScriptRepository, logger: logging.Logger ) -> SavedScriptService: return SavedScriptService(saved_script_repository, logger) @provide async def get_replay_service( - self, - replay_repository: ReplayRepository, - kafka_producer: UnifiedProducer, - event_store: EventStore, - settings: Settings, - logger: logging.Logger, + self, + replay_repository: ReplayRepository, + kafka_producer: UnifiedProducer, + event_store: EventStore, + settings: Settings, + logger: logging.Logger, ) -> ReplayService: event_replay_service = EventReplayService( repository=replay_repository, @@ -638,13 +669,13 @@ async def get_replay_service( @provide def get_admin_user_service( - self, - admin_user_repository: AdminUserRepository, - event_service: EventService, - execution_service: ExecutionService, - rate_limit_service: RateLimitService, - security_service: SecurityService, - logger: logging.Logger, + self, + admin_user_repository: AdminUserRepository, + event_service: EventService, + execution_service: ExecutionService, + rate_limit_service: RateLimitService, + security_service: SecurityService, + logger: logging.Logger, ) -> AdminUserService: return AdminUserService( user_repository=admin_user_repository, @@ -669,23 +700,25 @@ class K8sWorkerProvider(Provider): @provide async def get_kubernetes_worker( - self, - kafka_producer: UnifiedProducer, - schema_registry: SchemaRegistryManager, - settings: Settings, - event_store: EventStore, - idempotency_manager: IdempotencyManager, - logger: logging.Logger, + self, + kafka_producer: UnifiedProducer, + schema_registry: SchemaRegistryManager, + settings: Settings, + event_store: EventStore, + idempotency_manager: IdempotencyManager, + logger: logging.Logger, + event_metrics: EventMetrics, ) -> AsyncIterator[KubernetesWorker]: config = K8sWorkerConfig() async with KubernetesWorker( - config=config, - producer=kafka_producer, - schema_registry_manager=schema_registry, - settings=settings, - event_store=event_store, - idempotency_manager=idempotency_manager, - logger=logger, + config=config, + producer=kafka_producer, + schema_registry_manager=schema_registry, + settings=settings, + event_store=event_store, + idempotency_manager=idempotency_manager, + logger=logger, + event_metrics=event_metrics, ) as worker: yield worker @@ -695,27 +728,29 @@ class PodMonitorProvider(Provider): @provide def get_event_mapper( - self, - logger: logging.Logger, - k8s_clients: K8sClients, + self, + logger: logging.Logger, + k8s_clients: K8sClients, ) -> PodEventMapper: return PodEventMapper(logger=logger, k8s_api=k8s_clients.v1) @provide async def get_pod_monitor( - self, - kafka_event_service: KafkaEventService, - k8s_clients: K8sClients, - logger: logging.Logger, - event_mapper: PodEventMapper, + self, + kafka_event_service: KafkaEventService, + k8s_clients: K8sClients, + logger: logging.Logger, + event_mapper: PodEventMapper, + kubernetes_metrics: KubernetesMetrics, ) -> AsyncIterator[PodMonitor]: config = PodMonitorConfig() async with PodMonitor( - config=config, - kafka_event_service=kafka_event_service, - logger=logger, - k8s_clients=k8s_clients, - event_mapper=event_mapper, + config=config, + kafka_event_service=kafka_event_service, + logger=logger, + k8s_clients=k8s_clients, + event_mapper=event_mapper, + kubernetes_metrics=kubernetes_metrics, ) as monitor: yield monitor @@ -733,12 +768,12 @@ class EventReplayProvider(Provider): @provide def get_event_replay_service( - self, - replay_repository: ReplayRepository, - kafka_producer: UnifiedProducer, - event_store: EventStore, - settings: Settings, - logger: logging.Logger, + self, + replay_repository: ReplayRepository, + kafka_producer: UnifiedProducer, + event_store: EventStore, + settings: Settings, + logger: logging.Logger, ) -> EventReplayService: return EventReplayService( repository=replay_repository, @@ -747,23 +782,3 @@ def get_event_replay_service( settings=settings, logger=logger, ) - - -class DLQProcessorProvider(Provider): - scope = Scope.APP - - @provide - async def get_dlq_manager( - self, - settings: Settings, - schema_registry: SchemaRegistryManager, - logger: logging.Logger, - ) -> AsyncIterator[DLQManager]: - async with create_dlq_manager( - settings=settings, - schema_registry=schema_registry, - logger=logger, - dlq_topic=KafkaTopic.DEAD_LETTER_QUEUE, - retry_topic_suffix="-retry", - ) as manager: - yield manager diff --git a/backend/app/core/startup.py b/backend/app/core/startup.py index afabada3..549c3cb8 100644 --- a/backend/app/core/startup.py +++ b/backend/app/core/startup.py @@ -1,69 +1,25 @@ import logging import redis.asyncio as redis -from dishka import AsyncContainer -from app.core.metrics import ( - ConnectionMetrics, - CoordinatorMetrics, - DatabaseMetrics, - DLQMetrics, - EventMetrics, - ExecutionMetrics, - HealthMetrics, - KubernetesMetrics, - NotificationMetrics, - RateLimitMetrics, - ReplayMetrics, - SecurityMetrics, -) -from app.core.metrics.context import MetricsContext, get_rate_limit_metrics +from app.core.metrics import RateLimitMetrics from app.domain.rate_limit import RateLimitConfig from app.services.rate_limit_service import RateLimitService from app.settings import Settings -async def initialize_metrics_context(container: AsyncContainer, logger: logging.Logger) -> None: - try: - # Get all metrics from the container - # These are created as APP-scoped singletons by providers - metrics_mapping = {} - - # Only add metrics that are actually provided by the container - # Some metrics might not be needed for certain deployments - metrics_mapping["event"] = await container.get(EventMetrics) - metrics_mapping["connection"] = await container.get(ConnectionMetrics) - metrics_mapping["rate_limit"] = await container.get(RateLimitMetrics) - metrics_mapping["execution"] = await container.get(ExecutionMetrics) - metrics_mapping["database"] = await container.get(DatabaseMetrics) - metrics_mapping["health"] = await container.get(HealthMetrics) - metrics_mapping["kubernetes"] = await container.get(KubernetesMetrics) - metrics_mapping["coordinator"] = await container.get(CoordinatorMetrics) - metrics_mapping["dlq"] = await container.get(DLQMetrics) - metrics_mapping["notification"] = await container.get(NotificationMetrics) - metrics_mapping["replay"] = await container.get(ReplayMetrics) - metrics_mapping["security"] = await container.get(SecurityMetrics) - - # Initialize the context with available metrics - MetricsContext.initialize_all(logger=logger, **metrics_mapping) - - logger.info(f"Initialized metrics context with {len(metrics_mapping)} metric types") - - except Exception as e: - logger.error(f"Failed to initialize metrics context: {e}") - # Don't fail startup if metrics init fails - # The context will lazy-initialize metrics as needed - - -async def initialize_rate_limits(redis_client: redis.Redis, settings: Settings, logger: logging.Logger) -> None: +async def initialize_rate_limits( + redis_client: redis.Redis, + settings: Settings, + logger: logging.Logger, + rate_limit_metrics: RateLimitMetrics, +) -> None: """ Initialize default rate limits in Redis on application startup. This ensures default limits are always available. """ try: - # Create metrics instance - metrics = get_rate_limit_metrics() - service = RateLimitService(redis_client, settings, metrics) + service = RateLimitService(redis_client, settings, rate_limit_metrics) # Check if config already exists config_key = f"{settings.RATE_LIMIT_REDIS_PREFIX}config" diff --git a/backend/app/dlq/manager.py b/backend/app/dlq/manager.py index 27aacfdf..1d450a03 100644 --- a/backend/app/dlq/manager.py +++ b/backend/app/dlq/manager.py @@ -8,7 +8,7 @@ from opentelemetry.trace import SpanKind from app.core.lifecycle import LifecycleEnabled -from app.core.metrics.context import get_dlq_metrics +from app.core.metrics import DLQMetrics from app.core.tracing import EventAttributes from app.core.tracing.utils import extract_trace_context, get_tracer, inject_trace_context from app.db.docs import DLQMessageDocument @@ -40,13 +40,14 @@ def __init__( producer: AIOKafkaProducer, schema_registry: SchemaRegistryManager, logger: logging.Logger, + dlq_metrics: DLQMetrics, dlq_topic: KafkaTopic = KafkaTopic.DEAD_LETTER_QUEUE, retry_topic_suffix: str = "-retry", default_retry_policy: RetryPolicy | None = None, ): super().__init__() self.settings = settings - self.metrics = get_dlq_metrics() + self.metrics = dlq_metrics self.schema_registry = schema_registry self.logger = logger self.dlq_topic = dlq_topic @@ -77,9 +78,8 @@ def _kafka_msg_to_message(self, msg: Any) -> DLQMessage: async def _on_start(self) -> None: """Start DLQ manager.""" - # Start producer and consumer - await self.producer.start() - await self.consumer.start() + # Start producer and consumer in parallel for faster startup + await asyncio.gather(self.producer.start(), self.consumer.start()) # Start processing tasks self._process_task = asyncio.create_task(self._process_messages()) @@ -444,6 +444,7 @@ def create_dlq_manager( settings: Settings, schema_registry: SchemaRegistryManager, logger: logging.Logger, + dlq_metrics: DLQMetrics, dlq_topic: KafkaTopic = KafkaTopic.DEAD_LETTER_QUEUE, retry_topic_suffix: str = "-retry", default_retry_policy: RetryPolicy | None = None, @@ -478,6 +479,7 @@ def create_dlq_manager( producer=producer, schema_registry=schema_registry, logger=logger, + dlq_metrics=dlq_metrics, dlq_topic=dlq_topic, retry_topic_suffix=retry_topic_suffix, default_retry_policy=default_retry_policy, diff --git a/backend/app/events/core/consumer.py b/backend/app/events/core/consumer.py index 01556751..d0532f37 100644 --- a/backend/app/events/core/consumer.py +++ b/backend/app/events/core/consumer.py @@ -8,7 +8,7 @@ from aiokafka.errors import KafkaError from opentelemetry.trace import SpanKind -from app.core.metrics.context import get_event_metrics +from app.core.metrics import EventMetrics from app.core.tracing import EventAttributes from app.core.tracing.utils import extract_trace_context, get_tracer from app.domain.enums.kafka import KafkaTopic @@ -28,6 +28,7 @@ def __init__( schema_registry: SchemaRegistryManager, settings: Settings, logger: logging.Logger, + event_metrics: EventMetrics, ): self._config = config self.logger = logger @@ -37,7 +38,7 @@ def __init__( self._state = ConsumerState.STOPPED self._running = False self._metrics = ConsumerMetrics() - self._event_metrics = get_event_metrics() # Singleton for Kafka metrics + self._event_metrics = event_metrics self._error_callback: "Callable[[Exception, DomainEvent], Awaitable[None]] | None" = None self._consume_task: asyncio.Task[None] | None = None self._topic_prefix = settings.KAFKA_TOPIC_PREFIX diff --git a/backend/app/events/core/producer.py b/backend/app/events/core/producer.py index c5848aec..a41188c7 100644 --- a/backend/app/events/core/producer.py +++ b/backend/app/events/core/producer.py @@ -9,7 +9,7 @@ from aiokafka.errors import KafkaError from app.core.lifecycle import LifecycleEnabled -from app.core.metrics.context import get_event_metrics +from app.core.metrics import EventMetrics from app.dlq.models import DLQMessage, DLQMessageStatus from app.domain.enums.kafka import KafkaTopic from app.domain.events.typed import DomainEvent @@ -28,6 +28,7 @@ def __init__( schema_registry_manager: SchemaRegistryManager, logger: logging.Logger, settings: Settings, + event_metrics: EventMetrics, ): super().__init__() self._settings = settings @@ -36,7 +37,7 @@ def __init__( self._producer: AIOKafkaProducer | None = None self._state = ProducerState.STOPPED self._metrics = ProducerMetrics() - self._event_metrics = get_event_metrics() + self._event_metrics = event_metrics self._topic_prefix = settings.KAFKA_TOPIC_PREFIX @property diff --git a/backend/app/events/event_store.py b/backend/app/events/event_store.py index 0c475cc3..026ae84a 100644 --- a/backend/app/events/event_store.py +++ b/backend/app/events/event_store.py @@ -7,7 +7,7 @@ from beanie.odm.enums import SortDirection from pymongo.errors import BulkWriteError, DuplicateKeyError -from app.core.metrics.context import get_event_metrics +from app.core.metrics import EventMetrics from app.core.tracing import EventAttributes from app.core.tracing.utils import add_span_attributes from app.db.docs import EventDocument @@ -21,10 +21,11 @@ def __init__( self, schema_registry: SchemaRegistryManager, logger: logging.Logger, + event_metrics: EventMetrics, ttl_days: int = 90, batch_size: int = 100, ): - self.metrics = get_event_metrics() + self.metrics = event_metrics self.schema_registry = schema_registry self.logger = logger self.ttl_days = ttl_days @@ -317,12 +318,14 @@ async def health_check(self) -> dict[str, Any]: def create_event_store( schema_registry: SchemaRegistryManager, logger: logging.Logger, + event_metrics: EventMetrics, ttl_days: int = 90, batch_size: int = 100, ) -> EventStore: return EventStore( schema_registry=schema_registry, logger=logger, + event_metrics=event_metrics, ttl_days=ttl_days, batch_size=batch_size, ) diff --git a/backend/app/events/event_store_consumer.py b/backend/app/events/event_store_consumer.py index 4f2ba47d..41135a95 100644 --- a/backend/app/events/event_store_consumer.py +++ b/backend/app/events/event_store_consumer.py @@ -4,6 +4,7 @@ from opentelemetry.trace import SpanKind from app.core.lifecycle import LifecycleEnabled +from app.core.metrics import EventMetrics from app.core.tracing.utils import trace_span from app.domain.enums.events import EventType from app.domain.enums.kafka import GroupId, KafkaTopic @@ -24,6 +25,7 @@ def __init__( schema_registry_manager: SchemaRegistryManager, settings: Settings, logger: logging.Logger, + event_metrics: EventMetrics, producer: UnifiedProducer | None = None, group_id: GroupId = GroupId.EVENT_STORE_CONSUMER, batch_size: int = 100, @@ -37,6 +39,7 @@ def __init__( self.batch_size = batch_size self.batch_timeout = batch_timeout_seconds self.logger = logger + self.event_metrics = event_metrics self.consumer: UnifiedConsumer | None = None self.schema_registry_manager = schema_registry_manager self.dispatcher = EventDispatcher(logger) @@ -66,6 +69,7 @@ async def _on_start(self) -> None: schema_registry=self.schema_registry_manager, settings=self.settings, logger=self.logger, + event_metrics=self.event_metrics, ) # Register handler for all event types - store everything @@ -166,6 +170,7 @@ def create_event_store_consumer( schema_registry_manager: SchemaRegistryManager, settings: Settings, logger: logging.Logger, + event_metrics: EventMetrics, producer: UnifiedProducer | None = None, group_id: GroupId = GroupId.EVENT_STORE_CONSUMER, batch_size: int = 100, @@ -180,5 +185,6 @@ def create_event_store_consumer( schema_registry_manager=schema_registry_manager, settings=settings, logger=logger, + event_metrics=event_metrics, producer=producer, ) diff --git a/backend/app/main.py b/backend/app/main.py index 52af39bb..bf776a41 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -31,21 +31,6 @@ from app.core.dishka_lifespan import lifespan from app.core.exceptions import configure_exception_handlers from app.core.logging import setup_logger -from app.core.metrics import ( - ConnectionMetrics, - CoordinatorMetrics, - DatabaseMetrics, - DLQMetrics, - EventMetrics, - ExecutionMetrics, - HealthMetrics, - KubernetesMetrics, - NotificationMetrics, - RateLimitMetrics, - ReplayMetrics, - SecurityMetrics, -) -from app.core.metrics.context import MetricsContext from app.core.middlewares import ( CacheControlMiddleware, CSRFMiddleware, @@ -68,22 +53,8 @@ def create_app(settings: Settings | None = None) -> FastAPI: settings = settings or Settings() logger = setup_logger(settings.LOG_LEVEL) - # Initialize metrics context for all services - MetricsContext.initialize_all( - logger, - connection=ConnectionMetrics(settings), - coordinator=CoordinatorMetrics(settings), - database=DatabaseMetrics(settings), - dlq=DLQMetrics(settings), - event=EventMetrics(settings), - execution=ExecutionMetrics(settings), - health=HealthMetrics(settings), - kubernetes=KubernetesMetrics(settings), - notification=NotificationMetrics(settings), - rate_limit=RateLimitMetrics(settings), - replay=ReplayMetrics(settings), - security=SecurityMetrics(settings), - ) + # Note: Metrics are now provided via DI (MetricsProvider) and injected into services. + # No manual MetricsContext initialization is needed. # Disable OpenAPI/Docs in production for security; health endpoints provide readiness app = FastAPI( @@ -99,9 +70,7 @@ def create_app(settings: Settings | None = None) -> FastAPI: setup_metrics(app, settings, logger) app.add_middleware(MetricsMiddleware) - if settings.RATE_LIMIT_ENABLED: - app.add_middleware(RateLimitMiddleware) - + app.add_middleware(RateLimitMiddleware, settings=settings) app.add_middleware(CSRFMiddleware, container=container) app.add_middleware(CorrelationMiddleware) app.add_middleware(RequestSizeLimitMiddleware) diff --git a/backend/app/services/coordinator/coordinator.py b/backend/app/services/coordinator/coordinator.py index b2610e59..5f93ceb6 100644 --- a/backend/app/services/coordinator/coordinator.py +++ b/backend/app/services/coordinator/coordinator.py @@ -6,7 +6,7 @@ from uuid import uuid4 from app.core.lifecycle import LifecycleEnabled -from app.core.metrics.context import get_coordinator_metrics +from app.core.metrics import CoordinatorMetrics, EventMetrics from app.db.repositories.execution_repository import ExecutionRepository from app.domain.enums.events import EventType from app.domain.enums.kafka import CONSUMER_GROUP_SUBSCRIPTIONS, GroupId @@ -56,13 +56,16 @@ def __init__( execution_repository: ExecutionRepository, idempotency_manager: IdempotencyManager, logger: logging.Logger, + coordinator_metrics: CoordinatorMetrics, + event_metrics: EventMetrics, consumer_group: str = GroupId.EXECUTION_COORDINATOR, max_concurrent_scheduling: int = 10, scheduling_interval_seconds: float = 0.5, ): super().__init__() self.logger = logger - self.metrics = get_coordinator_metrics() + self.metrics = coordinator_metrics + self._event_metrics = event_metrics self._settings = settings # Kafka configuration @@ -71,11 +74,19 @@ def __init__( # Components self.queue_manager = QueueManager( - logger=self.logger, max_queue_size=10000, max_executions_per_user=100, stale_timeout_seconds=3600 + logger=self.logger, + coordinator_metrics=coordinator_metrics, + max_queue_size=10000, + max_executions_per_user=100, + stale_timeout_seconds=3600, ) self.resource_manager = ResourceManager( - logger=self.logger, total_cpu_cores=32.0, total_memory_mb=65536, total_gpu_count=0 + logger=self.logger, + coordinator_metrics=coordinator_metrics, + total_cpu_cores=32.0, + total_memory_mb=65536, + total_gpu_count=0, ) # Kafka components @@ -127,6 +138,7 @@ async def _on_start(self) -> None: schema_registry=self._schema_registry_manager, settings=self._settings, logger=self.logger, + event_metrics=self._event_metrics, ) # Register handlers with EventDispatcher BEFORE wrapping with idempotency diff --git a/backend/app/services/coordinator/queue_manager.py b/backend/app/services/coordinator/queue_manager.py index 64ba66c8..b8ac98eb 100644 --- a/backend/app/services/coordinator/queue_manager.py +++ b/backend/app/services/coordinator/queue_manager.py @@ -7,7 +7,7 @@ from enum import IntEnum from typing import Any, Dict, List, Tuple -from app.core.metrics.context import get_coordinator_metrics +from app.core.metrics import CoordinatorMetrics from app.domain.events.typed import ExecutionRequestedEvent @@ -43,12 +43,13 @@ class QueueManager: def __init__( self, logger: logging.Logger, + coordinator_metrics: CoordinatorMetrics, max_queue_size: int = 10000, max_executions_per_user: int = 100, stale_timeout_seconds: int = 3600, ) -> None: self.logger = logger - self.metrics = get_coordinator_metrics() + self.metrics = coordinator_metrics self.max_queue_size = max_queue_size self.max_executions_per_user = max_executions_per_user self.stale_timeout_seconds = stale_timeout_seconds diff --git a/backend/app/services/coordinator/resource_manager.py b/backend/app/services/coordinator/resource_manager.py index 8910852f..de2cbba6 100644 --- a/backend/app/services/coordinator/resource_manager.py +++ b/backend/app/services/coordinator/resource_manager.py @@ -3,7 +3,7 @@ from dataclasses import dataclass from typing import Dict, List -from app.core.metrics.context import get_coordinator_metrics +from app.core.metrics import CoordinatorMetrics @dataclass @@ -86,13 +86,14 @@ class ResourceManager: def __init__( self, logger: logging.Logger, + coordinator_metrics: CoordinatorMetrics, total_cpu_cores: float = 32.0, total_memory_mb: int = 65536, # 64GB total_gpu_count: int = 0, overcommit_factor: float = 1.2, # Allow 20% overcommit ): self.logger = logger - self.metrics = get_coordinator_metrics() + self.metrics = coordinator_metrics self.pool = ResourcePool( total_cpu_cores=total_cpu_cores * overcommit_factor, total_memory_mb=int(total_memory_mb * overcommit_factor), diff --git a/backend/app/services/event_bus.py b/backend/app/services/event_bus.py index 455085d4..bd0080ee 100644 --- a/backend/app/services/event_bus.py +++ b/backend/app/services/event_bus.py @@ -13,7 +13,7 @@ from pydantic import BaseModel, ConfigDict from app.core.lifecycle import LifecycleEnabled -from app.core.metrics.context import get_connection_metrics +from app.core.metrics import ConnectionMetrics from app.domain.enums.kafka import KafkaTopic from app.settings import Settings @@ -53,11 +53,11 @@ class EventBus(LifecycleEnabled): - *.completed - matches all completed events """ - def __init__(self, settings: Settings, logger: logging.Logger) -> None: + def __init__(self, settings: Settings, logger: logging.Logger, connection_metrics: ConnectionMetrics) -> None: super().__init__() self.logger = logger self.settings = settings - self.metrics = get_connection_metrics() + self.metrics = connection_metrics self.producer: Optional[AIOKafkaProducer] = None self.consumer: Optional[AIOKafkaConsumer] = None self._subscriptions: dict[str, Subscription] = {} # id -> Subscription @@ -83,7 +83,6 @@ async def _initialize_kafka(self) -> None: max_batch_size=16384, enable_idempotence=True, ) - await self.producer.start() # Consumer setup self.consumer = AIOKafkaConsumer( @@ -98,7 +97,9 @@ async def _initialize_kafka(self) -> None: max_poll_interval_ms=self.settings.KAFKA_MAX_POLL_INTERVAL_MS, request_timeout_ms=self.settings.KAFKA_REQUEST_TIMEOUT_MS, ) - await self.consumer.start() + + # Start both in parallel for faster startup + await asyncio.gather(self.producer.start(), self.consumer.start()) async def _on_stop(self) -> None: """Stop the event bus and clean up resources.""" @@ -318,9 +319,10 @@ async def get_statistics(self) -> dict[str, Any]: class EventBusManager: """Manages EventBus lifecycle as a singleton.""" - def __init__(self, settings: Settings, logger: logging.Logger) -> None: + def __init__(self, settings: Settings, logger: logging.Logger, connection_metrics: ConnectionMetrics) -> None: self.settings = settings self.logger = logger + self._connection_metrics = connection_metrics self._event_bus: Optional[EventBus] = None self._lock = asyncio.Lock() @@ -328,7 +330,7 @@ async def get_event_bus(self) -> EventBus: """Get or create the event bus instance.""" async with self._lock: if self._event_bus is None: - self._event_bus = EventBus(self.settings, self.logger) + self._event_bus = EventBus(self.settings, self.logger, self._connection_metrics) await self._event_bus.__aenter__() return self._event_bus diff --git a/backend/app/services/execution_service.py b/backend/app/services/execution_service.py index e4455c36..fb394342 100644 --- a/backend/app/services/execution_service.py +++ b/backend/app/services/execution_service.py @@ -5,7 +5,7 @@ from typing import Any, Generator, TypeAlias from app.core.correlation import CorrelationContext -from app.core.metrics.context import get_execution_metrics +from app.core.metrics import ExecutionMetrics from app.db.repositories.execution_repository import ExecutionRepository from app.domain.enums.events import EventType from app.domain.enums.execution import ExecutionStatus @@ -52,6 +52,7 @@ def __init__( event_store: EventStore, settings: Settings, logger: logging.Logger, + execution_metrics: ExecutionMetrics, ) -> None: """ Initialize execution service. @@ -62,13 +63,14 @@ def __init__( event_store: Event store for event persistence. settings: Application settings. logger: Logger instance. + execution_metrics: Metrics for tracking execution operations. """ self.execution_repo = execution_repo self.producer = producer self.event_store = event_store self.settings = settings self.logger = logger - self.metrics = get_execution_metrics() + self.metrics = execution_metrics @contextmanager def _track_active_execution(self) -> Generator[None, None, None]: # noqa: D401 diff --git a/backend/app/services/idempotency/idempotency_manager.py b/backend/app/services/idempotency/idempotency_manager.py index 90757740..e30b6efe 100644 --- a/backend/app/services/idempotency/idempotency_manager.py +++ b/backend/app/services/idempotency/idempotency_manager.py @@ -8,7 +8,7 @@ from pydantic import BaseModel from pymongo.errors import DuplicateKeyError -from app.core.metrics.context import get_database_metrics +from app.core.metrics import DatabaseMetrics from app.domain.events.typed import BaseEvent from app.domain.idempotency import IdempotencyRecord, IdempotencyStats, IdempotencyStatus @@ -67,9 +67,15 @@ async def health_check(self) -> None: ... class IdempotencyManager: - def __init__(self, config: IdempotencyConfig, repository: IdempotencyRepoProtocol, logger: logging.Logger) -> None: + def __init__( + self, + config: IdempotencyConfig, + repository: IdempotencyRepoProtocol, + logger: logging.Logger, + database_metrics: DatabaseMetrics, + ) -> None: self.config = config - self.metrics = get_database_metrics() + self.metrics = database_metrics self._repo: IdempotencyRepoProtocol = repository self._stats_update_task: asyncio.Task[None] | None = None self.logger = logger @@ -320,5 +326,6 @@ def create_idempotency_manager( repository: IdempotencyRepoProtocol, config: IdempotencyConfig | None = None, logger: logging.Logger, + database_metrics: DatabaseMetrics, ) -> IdempotencyManager: - return IdempotencyManager(config or IdempotencyConfig(), repository, logger) + return IdempotencyManager(config or IdempotencyConfig(), repository, logger, database_metrics) diff --git a/backend/app/services/k8s_worker/worker.py b/backend/app/services/k8s_worker/worker.py index 5a1c0ccc..cd9af936 100644 --- a/backend/app/services/k8s_worker/worker.py +++ b/backend/app/services/k8s_worker/worker.py @@ -10,7 +10,7 @@ from kubernetes.client.rest import ApiException from app.core.lifecycle import LifecycleEnabled -from app.core.metrics import ExecutionMetrics, KubernetesMetrics +from app.core.metrics import EventMetrics, ExecutionMetrics, KubernetesMetrics from app.domain.enums.events import EventType from app.domain.enums.kafka import CONSUMER_GROUP_SUBSCRIPTIONS, GroupId from app.domain.enums.storage import ExecutionErrorType @@ -56,8 +56,10 @@ def __init__( event_store: EventStore, idempotency_manager: IdempotencyManager, logger: logging.Logger, + event_metrics: EventMetrics, ): super().__init__() + self._event_metrics = event_metrics self.logger = logger self.metrics = KubernetesMetrics(settings) self.execution_metrics = ExecutionMetrics(settings) @@ -126,6 +128,7 @@ async def _on_start(self) -> None: schema_registry=self._schema_registry_manager, settings=self._settings, logger=self.logger, + event_metrics=self._event_metrics, ) # Wrap consumer with idempotency - use content hash for pod commands diff --git a/backend/app/services/kafka_event_service.py b/backend/app/services/kafka_event_service.py index 1b25a34f..b0a3bcb7 100644 --- a/backend/app/services/kafka_event_service.py +++ b/backend/app/services/kafka_event_service.py @@ -7,7 +7,7 @@ from opentelemetry import trace from app.core.correlation import CorrelationContext -from app.core.metrics.context import get_event_metrics +from app.core.metrics import EventMetrics from app.core.tracing.utils import inject_trace_context from app.db.repositories.event_repository import EventRepository from app.domain.enums.events import EventType @@ -26,11 +26,12 @@ def __init__( kafka_producer: UnifiedProducer, settings: Settings, logger: logging.Logger, + event_metrics: EventMetrics, ): self.event_repository = event_repository self.kafka_producer = kafka_producer self.logger = logger - self.metrics = get_event_metrics() + self.metrics = event_metrics self.settings = settings async def publish_event( diff --git a/backend/app/services/notification_service.py b/backend/app/services/notification_service.py index eb6f79ad..780f1279 100644 --- a/backend/app/services/notification_service.py +++ b/backend/app/services/notification_service.py @@ -7,7 +7,7 @@ import httpx -from app.core.metrics.context import get_notification_metrics +from app.core.metrics import EventMetrics, NotificationMetrics from app.core.tracing.utils import add_span_attributes from app.core.utils import StringEnum from app.db.repositories.notification_repository import NotificationRepository @@ -122,11 +122,14 @@ def __init__( sse_bus: SSERedisBus, settings: Settings, logger: logging.Logger, + notification_metrics: NotificationMetrics, + event_metrics: EventMetrics, ) -> None: self.repository = notification_repository self.event_service = event_service self.event_bus_manager = event_bus_manager - self.metrics = get_notification_metrics() + self.metrics = notification_metrics + self._event_metrics = event_metrics self.settings = settings self.schema_registry_manager = schema_registry_manager self.sse_bus = sse_bus @@ -247,6 +250,7 @@ async def _subscribe_to_events(self) -> None: schema_registry=self.schema_registry_manager, settings=self.settings, logger=self.logger, + event_metrics=self._event_metrics, ) # Start consumer diff --git a/backend/app/services/pod_monitor/monitor.py b/backend/app/services/pod_monitor/monitor.py index f6325ab3..ecbb4556 100644 --- a/backend/app/services/pod_monitor/monitor.py +++ b/backend/app/services/pod_monitor/monitor.py @@ -12,7 +12,7 @@ from app.core.k8s_clients import K8sClients, close_k8s_clients, create_k8s_clients from app.core.lifecycle import LifecycleEnabled -from app.core.metrics.context import get_kubernetes_metrics +from app.core.metrics import KubernetesMetrics from app.core.utils import StringEnum from app.domain.events.typed import DomainEvent from app.services.kafka_event_service import KafkaEventService @@ -104,6 +104,7 @@ def __init__( logger: logging.Logger, k8s_clients: K8sClients, event_mapper: PodEventMapper, + kubernetes_metrics: KubernetesMetrics, ) -> None: """Initialize the pod monitor with all required dependencies. @@ -134,7 +135,7 @@ def __init__( self._reconcile_task: asyncio.Task[None] | None = None # Metrics - self._metrics = get_kubernetes_metrics() + self._metrics = kubernetes_metrics @property def state(self) -> MonitorState: @@ -462,6 +463,7 @@ async def create_pod_monitor( config: PodMonitorConfig, kafka_event_service: KafkaEventService, logger: logging.Logger, + kubernetes_metrics: KubernetesMetrics, k8s_clients: K8sClients | None = None, event_mapper: PodEventMapper | None = None, ) -> AsyncIterator[PodMonitor]: @@ -491,6 +493,7 @@ async def create_pod_monitor( logger=logger, k8s_clients=k8s_clients, event_mapper=event_mapper, + kubernetes_metrics=kubernetes_metrics, ) try: diff --git a/backend/app/services/rate_limit_service.py b/backend/app/services/rate_limit_service.py index d28204de..1b5393a0 100644 --- a/backend/app/services/rate_limit_service.py +++ b/backend/app/services/rate_limit_service.py @@ -9,7 +9,7 @@ import redis.asyncio as redis -from app.core.metrics.rate_limit import RateLimitMetrics +from app.core.metrics import RateLimitMetrics from app.core.tracing.utils import add_span_attributes from app.domain.rate_limit import ( EndpointGroup, @@ -200,18 +200,6 @@ async def check_rate_limit( ) try: - if not self.settings.RATE_LIMIT_ENABLED: - # Track request when rate limiting is disabled - self.metrics.requests_total.add( - 1, - { - "authenticated": str(ctx.authenticated).lower(), - "endpoint": ctx.normalized_endpoint, - "algorithm": "disabled", - }, - ) - return self._unlimited() - if config is None: with self._timer(self.metrics.redis_duration, {"operation": "get_config"}): config = await self._get_config() diff --git a/backend/app/services/result_processor/processor.py b/backend/app/services/result_processor/processor.py index 530dbd15..3f9864db 100644 --- a/backend/app/services/result_processor/processor.py +++ b/backend/app/services/result_processor/processor.py @@ -5,7 +5,7 @@ from pydantic import BaseModel, ConfigDict, Field from app.core.lifecycle import LifecycleEnabled -from app.core.metrics.context import get_execution_metrics +from app.core.metrics import EventMetrics, ExecutionMetrics from app.core.utils import StringEnum from app.db.repositories.execution_repository import ExecutionRepository from app.domain.enums.events import EventType @@ -62,6 +62,8 @@ def __init__( settings: Settings, idempotency_manager: IdempotencyManager, logger: logging.Logger, + execution_metrics: ExecutionMetrics, + event_metrics: EventMetrics, ) -> None: """Initialize the result processor.""" super().__init__() @@ -70,7 +72,8 @@ def __init__( self._producer = producer self._schema_registry = schema_registry self._settings = settings - self._metrics = get_execution_metrics() + self._metrics = execution_metrics + self._event_metrics = event_metrics self._idempotency_manager: IdempotencyManager = idempotency_manager self._state = ProcessingState.IDLE self._consumer: IdempotentConsumerWrapper | None = None @@ -137,6 +140,7 @@ async def _create_consumer(self) -> IdempotentConsumerWrapper: schema_registry=self._schema_registry, settings=self._settings, logger=self.logger, + event_metrics=self._event_metrics, ) wrapper = IdempotentConsumerWrapper( consumer=base_consumer, diff --git a/backend/app/services/saga/saga_orchestrator.py b/backend/app/services/saga/saga_orchestrator.py index 4fef4167..194d6ac3 100644 --- a/backend/app/services/saga/saga_orchestrator.py +++ b/backend/app/services/saga/saga_orchestrator.py @@ -6,6 +6,7 @@ from opentelemetry.trace import SpanKind from app.core.lifecycle import LifecycleEnabled +from app.core.metrics import EventMetrics from app.core.tracing import EventAttributes from app.core.tracing.utils import get_tracer from app.db.repositories.resource_allocation_repository import ResourceAllocationRepository @@ -40,6 +41,7 @@ def __init__( idempotency_manager: IdempotencyManager, resource_allocation_repository: ResourceAllocationRepository, logger: logging.Logger, + event_metrics: EventMetrics, ): super().__init__() self.config = config @@ -55,6 +57,7 @@ def __init__( self._alloc_repo: ResourceAllocationRepository = resource_allocation_repository self._tasks: list[asyncio.Task[None]] = [] self.logger = logger + self._event_metrics = event_metrics def register_saga(self, saga_class: type[BaseSaga]) -> None: self._sagas[saga_class.get_name()] = saga_class @@ -136,6 +139,7 @@ async def _start_consumer(self) -> None: schema_registry=self._schema_registry_manager, settings=self._settings, logger=self.logger, + event_metrics=self._event_metrics, ) self._consumer = IdempotentConsumerWrapper( consumer=base_consumer, @@ -542,6 +546,7 @@ def create_saga_orchestrator( resource_allocation_repository: ResourceAllocationRepository, config: SagaConfig, logger: logging.Logger, + event_metrics: EventMetrics, ) -> SagaOrchestrator: """Factory function to create a saga orchestrator. @@ -555,6 +560,7 @@ def create_saga_orchestrator( resource_allocation_repository: Repository for resource allocations config: Saga configuration logger: Logger instance + event_metrics: Event metrics for tracking Kafka consumption Returns: A new saga orchestrator instance @@ -569,4 +575,5 @@ def create_saga_orchestrator( idempotency_manager=idempotency_manager, resource_allocation_repository=resource_allocation_repository, logger=logger, + event_metrics=event_metrics, ) diff --git a/backend/app/services/sse/kafka_redis_bridge.py b/backend/app/services/sse/kafka_redis_bridge.py index 950837ca..07e03c44 100644 --- a/backend/app/services/sse/kafka_redis_bridge.py +++ b/backend/app/services/sse/kafka_redis_bridge.py @@ -1,9 +1,10 @@ from __future__ import annotations +import asyncio import logging from app.core.lifecycle import LifecycleEnabled -from app.core.metrics.events import EventMetrics +from app.core.metrics import EventMetrics from app.domain.enums.events import EventType from app.domain.enums.kafka import CONSUMER_GROUP_SUBSCRIPTIONS, GroupId from app.domain.events.typed import DomainEvent @@ -44,31 +45,29 @@ async def _on_start(self) -> None: """Start the SSE Kafka→Redis bridge.""" self.logger.info(f"Starting SSE Kafka→Redis bridge with {self.num_consumers} consumers") - for i in range(self.num_consumers): - consumer = await self._create_consumer(i) - self.consumers.append(consumer) + # Phase 1: Build all consumers and track them immediately (no I/O) + self.consumers = [self._build_consumer(i) for i in range(self.num_consumers)] + + # Phase 2: Start all in parallel - already tracked in self.consumers for cleanup + topics = list(CONSUMER_GROUP_SUBSCRIPTIONS[GroupId.WEBSOCKET_GATEWAY]) + await asyncio.gather(*[c.start(topics) for c in self.consumers]) self.logger.info("SSE Kafka→Redis bridge started successfully") async def _on_stop(self) -> None: """Stop the SSE Kafka→Redis bridge.""" self.logger.info("Stopping SSE Kafka→Redis bridge") - - for consumer in self.consumers: - await consumer.stop() - + await asyncio.gather(*[c.stop() for c in self.consumers], return_exceptions=True) self.consumers.clear() self.logger.info("SSE Kafka→Redis bridge stopped") - async def _create_consumer(self, consumer_index: int) -> UnifiedConsumer: + def _build_consumer(self, consumer_index: int) -> UnifiedConsumer: + """Build a consumer instance without starting it.""" suffix = self.settings.KAFKA_GROUP_SUFFIX - group_id = f"sse-bridge-pool.{suffix}" - client_id = f"sse-bridge-{consumer_index}.{suffix}" - config = ConsumerConfig( bootstrap_servers=self.settings.KAFKA_BOOTSTRAP_SERVERS, - group_id=group_id, - client_id=client_id, + group_id=f"sse-bridge-pool.{suffix}", + client_id=f"sse-bridge-{consumer_index}.{suffix}", enable_auto_commit=True, auto_offset_reset="latest", max_poll_interval_ms=self.settings.KAFKA_MAX_POLL_INTERVAL_MS, @@ -80,21 +79,15 @@ async def _create_consumer(self, consumer_index: int) -> UnifiedConsumer: dispatcher = EventDispatcher(logger=self.logger) self._register_routing_handlers(dispatcher) - consumer = UnifiedConsumer( + return UnifiedConsumer( config=config, event_dispatcher=dispatcher, schema_registry=self.schema_registry, settings=self.settings, logger=self.logger, + event_metrics=self.event_metrics, ) - # Use WEBSOCKET_GATEWAY subscriptions - SSE bridge serves same purpose (real-time client delivery) - topics = list(CONSUMER_GROUP_SUBSCRIPTIONS[GroupId.WEBSOCKET_GATEWAY]) - await consumer.start(topics) - - self.logger.info(f"Bridge consumer {consumer_index} started") - return consumer - def _register_routing_handlers(self, dispatcher: EventDispatcher) -> None: """Publish relevant events to Redis channels keyed by execution_id.""" relevant_events = [ diff --git a/backend/app/services/sse/sse_service.py b/backend/app/services/sse/sse_service.py index 3feed4c4..e474fc41 100644 --- a/backend/app/services/sse/sse_service.py +++ b/backend/app/services/sse/sse_service.py @@ -4,7 +4,7 @@ from datetime import datetime, timezone from typing import Any, Dict -from app.core.metrics.context import get_connection_metrics +from app.core.metrics import ConnectionMetrics from app.db.repositories.sse_repository import SSERepository from app.domain.enums.events import EventType from app.domain.enums.sse import SSEControlEvent, SSENotificationEvent @@ -39,6 +39,7 @@ def __init__( shutdown_manager: SSEShutdownManager, settings: Settings, logger: logging.Logger, + connection_metrics: ConnectionMetrics, ) -> None: self.repository = repository self.router = router @@ -46,7 +47,7 @@ def __init__( self.shutdown_manager = shutdown_manager self.settings = settings self.logger = logger - self.metrics = get_connection_metrics() + self.metrics = connection_metrics self.heartbeat_interval = getattr(settings, "SSE_HEARTBEAT_INTERVAL", 30) async def create_execution_stream(self, execution_id: str, user_id: str) -> AsyncGenerator[Dict[str, Any], None]: @@ -114,8 +115,8 @@ async def create_execution_stream(self, execution_id: str, user_id: str) -> Asyn finally: if subscription is not None: - await subscription.close() - await self.shutdown_manager.unregister_connection(execution_id, connection_id) + await asyncio.shield(subscription.close()) + await asyncio.shield(self.shutdown_manager.unregister_connection(execution_id, connection_id)) self.logger.info("SSE connection closed", extra={"execution_id": execution_id}) async def _stream_events_redis( @@ -254,11 +255,8 @@ async def create_notification_stream(self, user_id: str) -> AsyncGenerator[Dict[ ) ) finally: - try: - if subscription is not None: - await subscription.close() - except Exception: - pass + if subscription is not None: + await asyncio.shield(subscription.close()) async def get_health_status(self) -> SSEHealthDomain: router_stats = self.router.get_stats() diff --git a/backend/app/services/sse/sse_shutdown_manager.py b/backend/app/services/sse/sse_shutdown_manager.py index 86314b27..4551e812 100644 --- a/backend/app/services/sse/sse_shutdown_manager.py +++ b/backend/app/services/sse/sse_shutdown_manager.py @@ -5,7 +5,7 @@ from typing import Dict, Set from app.core.lifecycle import LifecycleEnabled -from app.core.metrics.context import get_connection_metrics +from app.core.metrics import ConnectionMetrics from app.domain.sse import ShutdownStatus @@ -36,6 +36,7 @@ class SSEShutdownManager: def __init__( self, logger: logging.Logger, + connection_metrics: ConnectionMetrics, drain_timeout: float = 30.0, notification_timeout: float = 5.0, force_close_timeout: float = 10.0, @@ -44,7 +45,7 @@ def __init__( self.drain_timeout = drain_timeout self.notification_timeout = notification_timeout self.force_close_timeout = force_close_timeout - self.metrics = get_connection_metrics() + self.metrics = connection_metrics self._phase = ShutdownPhase.READY self._shutdown_initiated = False @@ -309,6 +310,7 @@ async def _wait_for_complete(self) -> None: def create_sse_shutdown_manager( logger: logging.Logger, + connection_metrics: ConnectionMetrics, drain_timeout: float = 30.0, notification_timeout: float = 5.0, force_close_timeout: float = 10.0, @@ -317,6 +319,7 @@ def create_sse_shutdown_manager( Args: logger: Logger instance + connection_metrics: Connection metrics for tracking SSE connections drain_timeout: Time to wait for connections to close gracefully notification_timeout: Time to wait for shutdown notifications to be sent force_close_timeout: Time before force closing connections @@ -326,6 +329,7 @@ def create_sse_shutdown_manager( """ return SSEShutdownManager( logger=logger, + connection_metrics=connection_metrics, drain_timeout=drain_timeout, notification_timeout=notification_timeout, force_close_timeout=force_close_timeout, diff --git a/backend/app/settings.py b/backend/app/settings.py index 44f8e2a3..fd510051 100644 --- a/backend/app/settings.py +++ b/backend/app/settings.py @@ -21,6 +21,7 @@ class Settings(BaseSettings): KUBERNETES_CONFIG_PATH: str = "~/.kube/config" KUBERNETES_CA_CERTIFICATE_PATH: str | None = None RATE_LIMITS: str = "100/minute" + RATE_LIMIT_ENABLED: bool = True # Set to False to disable rate limiting entirely SSL_KEYFILE: str = "/app/certs/server.key" SSL_CERTFILE: str = "/app/certs/server.crt" @@ -28,6 +29,9 @@ class Settings(BaseSettings): SERVER_HOST: str = "localhost" SERVER_PORT: int = 443 + # Kubernetes namespace for execution pods + K8S_NAMESPACE: str = "integr8scode" + # Settings for Kubernetes resource limits and requests K8S_POD_CPU_LIMIT: str = "1000m" K8S_POD_MEMORY_LIMIT: str = "128Mi" @@ -119,11 +123,10 @@ class Settings(BaseSettings): REDIS_DB: int = 0 REDIS_PASSWORD: str | None = None REDIS_SSL: bool = False - REDIS_MAX_CONNECTIONS: int = 50 + REDIS_MAX_CONNECTIONS: int = 200 REDIS_DECODE_RESPONSES: bool = True # Rate Limiting Configuration - RATE_LIMIT_ENABLED: bool = True RATE_LIMIT_DEFAULT_REQUESTS: int = 100 RATE_LIMIT_DEFAULT_WINDOW: int = 60 # seconds RATE_LIMIT_BURST_MULTIPLIER: float = 1.5 diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py index c9eef28d..aeadecd4 100644 --- a/backend/tests/conftest.py +++ b/backend/tests/conftest.py @@ -17,10 +17,17 @@ from scripts.create_topics import create_topics # ===== Worker-specific isolation for pytest-xdist ===== -# Redis has 16 DBs (0-15); each xdist worker gets one, limiting parallel workers to 16. +# Supports both xdist workers AND multiple independent pytest processes. +# +# TEST_RUN_ID: Unique identifier for this pytest process (set by CI or auto-generated). +# Allows running backend-integration, backend-e2e, frontend-e2e in parallel. +# PYTEST_XDIST_WORKER: Worker ID within a single pytest-xdist run (gw0, gw1, etc.) +# +# Combined, these give full isolation: each test worker in each pytest process is unique. +_RUN_ID = os.environ.get("TEST_RUN_ID") or uuid.uuid4().hex[:8] _WORKER_ID = os.environ.get("PYTEST_XDIST_WORKER", "gw0") _WORKER_NUM = int(_WORKER_ID.removeprefix("gw") or "0") -assert _WORKER_NUM < 16, f"xdist worker {_WORKER_NUM} >= 16 exceeds Redis DB limit; use -n 16 or fewer" +_ISOLATION_KEY = f"{_RUN_ID}_{_WORKER_ID}" # ===== Pytest hooks ===== @@ -46,21 +53,26 @@ def test_settings() -> Settings: What gets isolated per worker (to prevent interference): - DATABASE_NAME: Each worker gets its own MongoDB database - - REDIS_DB: Each worker gets its own Redis database (0-15) + - REDIS_DB: Each worker gets its own Redis database (0-15, hash-distributed) - KAFKA_GROUP_SUFFIX: Each worker gets unique consumer groups What's SHARED (from env, no per-worker suffix): - KAFKA_TOPIC_PREFIX: Topics created once by CI/scripts - SCHEMA_SUBJECT_PREFIX: Schemas shared across workers + + Isolation works across: + - xdist workers within a single pytest process (gw0, gw1, ...) + - Multiple independent pytest processes (via TEST_RUN_ID or auto-UUID) """ base = Settings(_env_file=".env.test") - session_id = uuid.uuid4().hex[:8] + # Deterministic Redis DB: worker number + ASCII sum of RUN_ID (no hash randomization) + redis_db = (_WORKER_NUM + sum(ord(c) for c in _RUN_ID)) % 16 return base.model_copy( update={ - # Per-worker isolation for xdist - must be dynamic, can't be in .env.test - "DATABASE_NAME": f"integr8scode_test_{session_id}_{_WORKER_ID}", - "REDIS_DB": _WORKER_NUM, - "KAFKA_GROUP_SUFFIX": f"{session_id}.{_WORKER_ID}", + # Per-worker isolation - uses _ISOLATION_KEY which includes RUN_ID + WORKER_ID + "DATABASE_NAME": f"integr8scode_test_{_ISOLATION_KEY}", + "REDIS_DB": redis_db, + "KAFKA_GROUP_SUFFIX": _ISOLATION_KEY, } ) diff --git a/backend/tests/e2e/test_k8s_worker_create_pod.py b/backend/tests/e2e/test_k8s_worker_create_pod.py index 30c23eb2..c43bb2e5 100644 --- a/backend/tests/e2e/test_k8s_worker_create_pod.py +++ b/backend/tests/e2e/test_k8s_worker_create_pod.py @@ -1,8 +1,8 @@ import logging -import os import uuid import pytest +from app.core.metrics import EventMetrics from app.domain.events.typed import CreatePodCommandEvent, EventMetadata from app.events.core import UnifiedProducer from app.events.event_store import EventStore @@ -21,18 +21,15 @@ @pytest.mark.asyncio async def test_worker_creates_configmap_and_pod( - scope: AsyncContainer, monkeypatch: pytest.MonkeyPatch, test_settings: Settings + scope: AsyncContainer, test_settings: Settings ) -> None: - # Ensure non-default namespace for worker validation - ns = os.environ.get("K8S_NAMESPACE", "integr8scode") - if ns == "default": - ns = "integr8scode" - monkeypatch.setenv("K8S_NAMESPACE", ns) + ns = test_settings.K8S_NAMESPACE schema: SchemaRegistryManager = await scope.get(SchemaRegistryManager) store: EventStore = await scope.get(EventStore) producer: UnifiedProducer = await scope.get(UnifiedProducer) idem: IdempotencyManager = await scope.get(IdempotencyManager) + event_metrics: EventMetrics = await scope.get(EventMetrics) cfg = K8sWorkerConfig(namespace=ns, max_concurrent_pods=1) worker = KubernetesWorker( @@ -43,6 +40,7 @@ async def test_worker_creates_configmap_and_pod( event_store=store, idempotency_manager=idem, logger=_test_logger, + event_metrics=event_metrics, ) # Initialize k8s clients using worker's own method diff --git a/backend/tests/e2e/test_resource_cleaner_k8s.py b/backend/tests/e2e/test_resource_cleaner_k8s.py index 805aa785..e4a79fa8 100644 --- a/backend/tests/e2e/test_resource_cleaner_k8s.py +++ b/backend/tests/e2e/test_resource_cleaner_k8s.py @@ -1,9 +1,9 @@ import asyncio import logging -import os import pytest from app.services.result_processor.resource_cleaner import ResourceCleaner +from app.settings import Settings pytestmark = [pytest.mark.e2e, pytest.mark.k8s] @@ -11,19 +11,19 @@ @pytest.mark.asyncio -async def test_initialize_and_get_usage() -> None: +async def test_initialize_and_get_usage(test_settings: Settings) -> None: rc = ResourceCleaner(logger=_test_logger) await rc.initialize() - usage = await rc.get_resource_usage(namespace=os.environ.get("K8S_NAMESPACE", "default")) + usage = await rc.get_resource_usage(namespace=test_settings.K8S_NAMESPACE) assert set(usage.keys()) >= {"pods", "configmaps", "network_policies"} @pytest.mark.asyncio -async def test_cleanup_orphaned_resources_dry_run() -> None: +async def test_cleanup_orphaned_resources_dry_run(test_settings: Settings) -> None: rc = ResourceCleaner(logger=_test_logger) await rc.initialize() cleaned = await rc.cleanup_orphaned_resources( - namespace=os.environ.get("K8S_NAMESPACE", "default"), + namespace=test_settings.K8S_NAMESPACE, max_age_hours=0, dry_run=True, ) @@ -31,12 +31,12 @@ async def test_cleanup_orphaned_resources_dry_run() -> None: @pytest.mark.asyncio -async def test_cleanup_nonexistent_pod() -> None: +async def test_cleanup_nonexistent_pod(test_settings: Settings) -> None: rc = ResourceCleaner(logger=_test_logger) await rc.initialize() # Attempt to delete a pod that doesn't exist - should complete without errors - namespace = os.environ.get("K8S_NAMESPACE", "default") + namespace = test_settings.K8S_NAMESPACE nonexistent_pod = "integr8s-test-nonexistent-pod" # Should complete within timeout and not raise any exceptions diff --git a/backend/tests/e2e/test_resource_cleaner_orphan.py b/backend/tests/e2e/test_resource_cleaner_orphan.py index cf879ed1..334b7b29 100644 --- a/backend/tests/e2e/test_resource_cleaner_orphan.py +++ b/backend/tests/e2e/test_resource_cleaner_orphan.py @@ -3,6 +3,7 @@ import pytest from app.services.result_processor.resource_cleaner import ResourceCleaner +from app.settings import Settings from kubernetes import client as k8s_client from kubernetes import config as k8s_config @@ -19,10 +20,10 @@ def _ensure_kubeconfig() -> None: @pytest.mark.asyncio -async def test_cleanup_orphaned_configmaps_dry_run() -> None: +async def test_cleanup_orphaned_configmaps_dry_run(test_settings: Settings) -> None: _ensure_kubeconfig() v1 = k8s_client.CoreV1Api() - ns = "default" + ns = test_settings.K8S_NAMESPACE name = f"int-test-cm-{int(datetime.now().timestamp())}" # Create a configmap labeled like the app uses diff --git a/backend/tests/integration/app/test_main_app.py b/backend/tests/integration/app/test_main_app.py index c178fe14..d92a5359 100644 --- a/backend/tests/integration/app/test_main_app.py +++ b/backend/tests/integration/app/test_main_app.py @@ -8,7 +8,7 @@ pytestmark = pytest.mark.integration -def test_create_app_real_instance(app: FastAPI, test_settings: Settings) -> None: +def test_create_app_real_instance(app: FastAPI) -> None: assert isinstance(app, FastAPI) # Verify API routes are configured @@ -24,8 +24,7 @@ def test_create_app_real_instance(app: FastAPI, test_settings: Settings) -> None assert "RequestSizeLimitMiddleware" in middleware_class_names, "Request size limit middleware not configured" assert "CacheControlMiddleware" in middleware_class_names, "Cache control middleware not configured" assert "MetricsMiddleware" in middleware_class_names, "Metrics middleware not configured" - if test_settings.RATE_LIMIT_ENABLED: - assert "RateLimitMiddleware" in middleware_class_names, "Rate limit middleware not configured" + assert "RateLimitMiddleware" in middleware_class_names, "Rate limit middleware not configured" def test_create_app_function_constructs(test_settings: Settings) -> None: diff --git a/backend/tests/integration/dlq/test_dlq_manager.py b/backend/tests/integration/dlq/test_dlq_manager.py index b1f84426..6af47303 100644 --- a/backend/tests/integration/dlq/test_dlq_manager.py +++ b/backend/tests/integration/dlq/test_dlq_manager.py @@ -6,12 +6,14 @@ import pytest from aiokafka import AIOKafkaConsumer, AIOKafkaProducer +from app.core.metrics import DLQMetrics from app.dlq.manager import create_dlq_manager from app.domain.enums.events import EventType from app.domain.enums.kafka import KafkaTopic from app.domain.events.typed import DLQMessageReceivedEvent from app.events.schema.schema_registry import SchemaRegistryManager from app.settings import Settings +from dishka import AsyncContainer from tests.helpers import make_execution_requested_event @@ -24,10 +26,11 @@ @pytest.mark.asyncio -async def test_dlq_manager_persists_and_emits_event(test_settings: Settings) -> None: +async def test_dlq_manager_persists_and_emits_event(scope: AsyncContainer, test_settings: Settings) -> None: """Test that DLQ manager persists messages and emits DLQMessageReceivedEvent.""" schema_registry = SchemaRegistryManager(test_settings, _test_logger) - manager = create_dlq_manager(settings=test_settings, schema_registry=schema_registry, logger=_test_logger) + dlq_metrics: DLQMetrics = await scope.get(DLQMetrics) + manager = create_dlq_manager(settings=test_settings, schema_registry=schema_registry, logger=_test_logger, dlq_metrics=dlq_metrics) prefix = test_settings.KAFKA_TOPIC_PREFIX ev = make_execution_requested_event(execution_id=f"exec-dlq-persist-{uuid.uuid4().hex[:8]}") diff --git a/backend/tests/integration/events/test_consume_roundtrip.py b/backend/tests/integration/events/test_consume_roundtrip.py index 9812b14f..94193247 100644 --- a/backend/tests/integration/events/test_consume_roundtrip.py +++ b/backend/tests/integration/events/test_consume_roundtrip.py @@ -3,6 +3,7 @@ import uuid import pytest +from app.core.metrics import EventMetrics from app.domain.enums.events import EventType from app.domain.enums.kafka import KafkaTopic from app.domain.events.typed import DomainEvent @@ -27,6 +28,7 @@ async def test_produce_consume_roundtrip(scope: AsyncContainer) -> None: # Ensure schemas are registered registry: SchemaRegistryManager = await scope.get(SchemaRegistryManager) settings: Settings = await scope.get(Settings) + event_metrics: EventMetrics = await scope.get(EventMetrics) await initialize_event_schemas(registry) # Real producer from DI @@ -54,6 +56,7 @@ async def _handle(_event: DomainEvent) -> None: schema_registry=registry, settings=settings, logger=_test_logger, + event_metrics=event_metrics, ) await consumer.start([KafkaTopic.EXECUTION_EVENTS]) diff --git a/backend/tests/integration/events/test_consumer_lifecycle.py b/backend/tests/integration/events/test_consumer_lifecycle.py index 01833c19..5374e152 100644 --- a/backend/tests/integration/events/test_consumer_lifecycle.py +++ b/backend/tests/integration/events/test_consumer_lifecycle.py @@ -2,6 +2,7 @@ from uuid import uuid4 import pytest +from app.core.metrics import EventMetrics from app.domain.enums.kafka import KafkaTopic from app.events.core import ConsumerConfig, EventDispatcher, UnifiedConsumer from app.events.schema.schema_registry import SchemaRegistryManager @@ -19,6 +20,7 @@ async def test_consumer_start_status_seek_and_stop(scope: AsyncContainer) -> None: registry: SchemaRegistryManager = await scope.get(SchemaRegistryManager) settings: Settings = await scope.get(Settings) + event_metrics: EventMetrics = await scope.get(EventMetrics) cfg = ConsumerConfig( bootstrap_servers=settings.KAFKA_BOOTSTRAP_SERVERS, group_id=f"test-consumer-{uuid4().hex[:6]}", @@ -30,6 +32,7 @@ async def test_consumer_start_status_seek_and_stop(scope: AsyncContainer) -> Non schema_registry=registry, settings=settings, logger=_test_logger, + event_metrics=event_metrics, ) await c.start([KafkaTopic.EXECUTION_EVENTS]) try: diff --git a/backend/tests/integration/events/test_event_dispatcher.py b/backend/tests/integration/events/test_event_dispatcher.py index d5f118a3..3d166cec 100644 --- a/backend/tests/integration/events/test_event_dispatcher.py +++ b/backend/tests/integration/events/test_event_dispatcher.py @@ -3,6 +3,7 @@ import uuid import pytest +from app.core.metrics import EventMetrics from app.domain.enums.events import EventType from app.domain.enums.kafka import KafkaTopic from app.domain.events.typed import DomainEvent @@ -27,6 +28,7 @@ async def test_dispatcher_with_multiple_handlers(scope: AsyncContainer) -> None: # Ensure schema registry is ready registry: SchemaRegistryManager = await scope.get(SchemaRegistryManager) settings: Settings = await scope.get(Settings) + event_metrics: EventMetrics = await scope.get(EventMetrics) await initialize_event_schemas(registry) # Build dispatcher with two handlers for the same event @@ -55,6 +57,7 @@ async def h2(_e: DomainEvent) -> None: schema_registry=registry, settings=settings, logger=_test_logger, + event_metrics=event_metrics, ) await consumer.start([KafkaTopic.EXECUTION_EVENTS]) diff --git a/backend/tests/integration/events/test_producer_roundtrip.py b/backend/tests/integration/events/test_producer_roundtrip.py index 18493a51..cb91df15 100644 --- a/backend/tests/integration/events/test_producer_roundtrip.py +++ b/backend/tests/integration/events/test_producer_roundtrip.py @@ -2,6 +2,7 @@ from uuid import uuid4 import pytest +from app.core.metrics import EventMetrics from app.events.core import UnifiedProducer from app.events.schema.schema_registry import SchemaRegistryManager from app.infrastructure.kafka.mappings import get_topic_for_event @@ -20,10 +21,12 @@ async def test_unified_producer_start_produce_send_to_dlq_stop( scope: AsyncContainer, test_settings: Settings ) -> None: schema: SchemaRegistryManager = await scope.get(SchemaRegistryManager) + event_metrics: EventMetrics = await scope.get(EventMetrics) prod = UnifiedProducer( schema, logger=_test_logger, settings=test_settings, + event_metrics=event_metrics, ) async with prod: diff --git a/backend/tests/integration/idempotency/test_consumer_idempotent.py b/backend/tests/integration/idempotency/test_consumer_idempotent.py index 5e95eadb..19d4b05f 100644 --- a/backend/tests/integration/idempotency/test_consumer_idempotent.py +++ b/backend/tests/integration/idempotency/test_consumer_idempotent.py @@ -3,6 +3,7 @@ import uuid import pytest +from app.core.metrics import EventMetrics from app.domain.enums.events import EventType from app.domain.enums.kafka import KafkaTopic from app.domain.events.typed import DomainEvent @@ -34,6 +35,7 @@ async def test_consumer_idempotent_wrapper_blocks_duplicates(scope: AsyncContain idm: IdempotencyManager = await scope.get(IdempotencyManager) registry: SchemaRegistryManager = await scope.get(SchemaRegistryManager) settings: Settings = await scope.get(Settings) + event_metrics: EventMetrics = await scope.get(EventMetrics) # Future resolves when handler processes an event - no polling needed handled_future: asyncio.Future[None] = asyncio.get_running_loop().create_future() @@ -67,6 +69,7 @@ async def handle(_ev: DomainEvent) -> None: schema_registry=registry, settings=settings, logger=_test_logger, + event_metrics=event_metrics, ) wrapper = IdempotentConsumerWrapper( consumer=base, diff --git a/backend/tests/integration/idempotency/test_idempotency.py b/backend/tests/integration/idempotency/test_idempotency.py index cc5017e4..032a7f46 100644 --- a/backend/tests/integration/idempotency/test_idempotency.py +++ b/backend/tests/integration/idempotency/test_idempotency.py @@ -8,11 +8,13 @@ import pytest import redis.asyncio as redis +from app.core.metrics import DatabaseMetrics from app.domain.events.typed import DomainEvent from app.domain.idempotency import IdempotencyRecord, IdempotencyStatus from app.services.idempotency.idempotency_manager import IdempotencyConfig, IdempotencyManager from app.services.idempotency.middleware import IdempotentEventHandler, idempotent_handler from app.services.idempotency.redis_repository import RedisIdempotencyRepository +from app.settings import Settings from tests.helpers import make_execution_requested_event @@ -26,7 +28,7 @@ class TestIdempotencyManager: """IdempotencyManager backed by real Redis repository (DI-provided client).""" @pytest.fixture - async def manager(self, redis_client: redis.Redis) -> AsyncGenerator[IdempotencyManager, None]: + async def manager(self, redis_client: redis.Redis, test_settings: Settings) -> AsyncGenerator[IdempotencyManager, None]: prefix = f"idemp_ut:{uuid.uuid4().hex[:6]}" cfg = IdempotencyConfig( key_prefix=prefix, @@ -37,7 +39,8 @@ async def manager(self, redis_client: redis.Redis) -> AsyncGenerator[Idempotency enable_metrics=False, ) repo = RedisIdempotencyRepository(redis_client, key_prefix=prefix) - m = IdempotencyManager(cfg, repo, _test_logger) + database_metrics = DatabaseMetrics(test_settings) + m = IdempotencyManager(cfg, repo, _test_logger, database_metrics=database_metrics) await m.initialize() try: yield m @@ -254,11 +257,12 @@ class TestIdempotentEventHandlerIntegration: """Test IdempotentEventHandler with real components""" @pytest.fixture - async def manager(self, redis_client: redis.Redis) -> AsyncGenerator[IdempotencyManager, None]: + async def manager(self, redis_client: redis.Redis, test_settings: Settings) -> AsyncGenerator[IdempotencyManager, None]: prefix = f"handler_test:{uuid.uuid4().hex[:6]}" config = IdempotencyConfig(key_prefix=prefix, enable_metrics=False) repo = RedisIdempotencyRepository(redis_client, key_prefix=prefix) - m = IdempotencyManager(config, repo, _test_logger) + database_metrics = DatabaseMetrics(test_settings) + m = IdempotencyManager(config, repo, _test_logger, database_metrics=database_metrics) await m.initialize() try: yield m @@ -509,11 +513,12 @@ async def test_cleanup_expired_keys(self, manager: IdempotencyManager) -> None: assert record is not None # Still exists until explicit cleanup @pytest.mark.asyncio - async def test_metrics_enabled(self, redis_client: redis.Redis) -> None: + async def test_metrics_enabled(self, redis_client: redis.Redis, test_settings: Settings) -> None: """Test manager with metrics enabled""" config = IdempotencyConfig(key_prefix=f"metrics:{uuid.uuid4().hex[:6]}", enable_metrics=True) repository = RedisIdempotencyRepository(redis_client, key_prefix=config.key_prefix) - manager = IdempotencyManager(config, repository, _test_logger) + database_metrics = DatabaseMetrics(test_settings) + manager = IdempotencyManager(config, repository, _test_logger, database_metrics=database_metrics) # Initialize with metrics await manager.initialize() diff --git a/backend/tests/integration/result_processor/test_result_processor.py b/backend/tests/integration/result_processor/test_result_processor.py index 08a44a37..de2546d6 100644 --- a/backend/tests/integration/result_processor/test_result_processor.py +++ b/backend/tests/integration/result_processor/test_result_processor.py @@ -4,6 +4,7 @@ import pytest from app.core.database_context import Database +from app.core.metrics import EventMetrics, ExecutionMetrics from app.db.repositories.execution_repository import ExecutionRepository from app.domain.enums.events import EventType from app.domain.enums.execution import ExecutionStatus @@ -37,6 +38,8 @@ async def test_result_processor_persists_and_emits(scope: AsyncContainer) -> Non # Ensure schemas registry: SchemaRegistryManager = await scope.get(SchemaRegistryManager) settings: Settings = await scope.get(Settings) + event_metrics: EventMetrics = await scope.get(EventMetrics) + execution_metrics: ExecutionMetrics = await scope.get(ExecutionMetrics) await initialize_event_schemas(registry) # Dependencies @@ -63,6 +66,8 @@ async def test_result_processor_persists_and_emits(scope: AsyncContainer) -> Non settings=settings, idempotency_manager=idem, logger=_test_logger, + execution_metrics=execution_metrics, + event_metrics=event_metrics, ) # Setup a small consumer to capture ResultStoredEvent @@ -87,6 +92,7 @@ async def _stored(event: ResultStoredEvent) -> None: schema_registry=registry, settings=settings, logger=_test_logger, + event_metrics=event_metrics, ) # Produce the event BEFORE starting consumers (auto_offset_reset="earliest" will read it) diff --git a/backend/tests/integration/services/rate_limit/test_rate_limit_service.py b/backend/tests/integration/services/rate_limit/test_rate_limit_service.py index 942b2a37..0476f048 100644 --- a/backend/tests/integration/services/rate_limit/test_rate_limit_service.py +++ b/backend/tests/integration/services/rate_limit/test_rate_limit_service.py @@ -19,21 +19,15 @@ @pytest.mark.asyncio -async def test_normalize_and_disabled_and_bypass_and_no_rule(scope: AsyncContainer) -> None: +async def test_normalize_and_bypass_and_no_rule(scope: AsyncContainer) -> None: svc: RateLimitService = await scope.get(RateLimitService) svc.prefix = f"{svc.prefix}{uuid4().hex[:6]}:" - # ensure disabled for first path - await svc.update_config(RateLimitConfig(default_rules=[])) - svc.settings.RATE_LIMIT_ENABLED = False + # normalization masks uuids and ids n = svc._normalize_endpoint("/api/12345678901234567890/abcdef-1234-5678-9abc-def012345678") assert "*" in n - # disabled path allowed - res = await svc.check_rate_limit("u1", "/api/x") - assert res.allowed is True - # enabled, bypass - svc.settings.RATE_LIMIT_ENABLED = True + # bypass user is always allowed cfg = RateLimitConfig(default_rules=[], user_overrides={ "u1": UserRateLimit(user_id="u1", bypass_rate_limit=True) }) @@ -51,7 +45,6 @@ async def test_normalize_and_disabled_and_bypass_and_no_rule(scope: AsyncContain async def test_sliding_window_allowed_and_rejected(scope: AsyncContainer) -> None: svc: RateLimitService = await scope.get(RateLimitService) svc.prefix = f"{svc.prefix}{uuid4().hex[:6]}:" - svc.settings.RATE_LIMIT_ENABLED = True # Enable rate limiting for this test # matching rule with window 5, limit 3 rule = RateLimitRule(endpoint_pattern=r"^/api/v1/x", group=EndpointGroup.API, requests=3, window_seconds=5, algorithm=RateLimitAlgorithm.SLIDING_WINDOW) @@ -76,7 +69,6 @@ async def test_sliding_window_allowed_and_rejected(scope: AsyncContainer) -> Non async def test_token_bucket_paths(scope: AsyncContainer) -> None: svc: RateLimitService = await scope.get(RateLimitService) svc.prefix = f"{svc.prefix}{uuid4().hex[:6]}:" - svc.settings.RATE_LIMIT_ENABLED = True # Enable rate limiting for this test rule = RateLimitRule(endpoint_pattern=r"^/api/v1/t", group=EndpointGroup.API, requests=2, window_seconds=10, burst_multiplier=1.0, algorithm=RateLimitAlgorithm.TOKEN_BUCKET) await svc.update_config(RateLimitConfig(default_rules=[rule])) @@ -168,7 +160,6 @@ async def test_get_config_roundtrip(scope: AsyncContainer) -> None: async def test_sliding_window_edge(scope: AsyncContainer) -> None: svc: RateLimitService = await scope.get(RateLimitService) svc.prefix = f"{svc.prefix}{uuid4().hex[:6]}:" - svc.settings.RATE_LIMIT_ENABLED = True # Enable rate limiting for this test # Configure a tight window and ensure behavior is consistent cfg = RateLimitConfig( default_rules=[ @@ -294,7 +285,6 @@ async def test_get_usage_stats_with_keys(scope: AsyncContainer) -> None: @pytest.mark.asyncio async def test_check_rate_limit_with_user_override(scope: AsyncContainer) -> None: svc: RateLimitService = await scope.get(RateLimitService) - svc.settings.RATE_LIMIT_ENABLED = True # Enable rate limiting for this test rule = RateLimitRule( endpoint_pattern=r"^/api", group=EndpointGroup.API, diff --git a/backend/tests/integration/services/sse/test_partitioned_event_router.py b/backend/tests/integration/services/sse/test_partitioned_event_router.py index 15b0ec63..7e1c4ac6 100644 --- a/backend/tests/integration/services/sse/test_partitioned_event_router.py +++ b/backend/tests/integration/services/sse/test_partitioned_event_router.py @@ -4,7 +4,7 @@ import pytest import redis.asyncio as redis -from app.core.metrics.events import EventMetrics +from app.core.metrics import EventMetrics from app.events.core import EventDispatcher from app.events.schema.schema_registry import SchemaRegistryManager from app.schemas_pydantic.sse import RedisSSEMessage diff --git a/backend/tests/unit/conftest.py b/backend/tests/unit/conftest.py index ea7bab9f..65b28839 100644 --- a/backend/tests/unit/conftest.py +++ b/backend/tests/unit/conftest.py @@ -1,46 +1,82 @@ -import logging -from collections.abc import Generator from typing import NoReturn import pytest -from app.core.metrics.connections import ConnectionMetrics -from app.core.metrics.context import MetricsContext -from app.core.metrics.coordinator import CoordinatorMetrics -from app.core.metrics.database import DatabaseMetrics -from app.core.metrics.dlq import DLQMetrics -from app.core.metrics.events import EventMetrics -from app.core.metrics.execution import ExecutionMetrics -from app.core.metrics.health import HealthMetrics -from app.core.metrics.kubernetes import KubernetesMetrics -from app.core.metrics.notifications import NotificationMetrics -from app.core.metrics.rate_limit import RateLimitMetrics -from app.core.metrics.replay import ReplayMetrics -from app.core.metrics.security import SecurityMetrics +from app.core.metrics import ( + ConnectionMetrics, + CoordinatorMetrics, + DatabaseMetrics, + DLQMetrics, + EventMetrics, + ExecutionMetrics, + HealthMetrics, + KubernetesMetrics, + NotificationMetrics, + RateLimitMetrics, + ReplayMetrics, + SecurityMetrics, +) from app.settings import Settings -_unit_test_logger = logging.getLogger("test.unit") - - -@pytest.fixture(scope="session", autouse=True) -def init_metrics_for_unit_tests(test_settings: Settings) -> Generator[None, None, None]: - """Initialize all metrics context for unit tests.""" - MetricsContext.initialize_all( - _unit_test_logger, - connection=ConnectionMetrics(test_settings), - coordinator=CoordinatorMetrics(test_settings), - database=DatabaseMetrics(test_settings), - dlq=DLQMetrics(test_settings), - event=EventMetrics(test_settings), - execution=ExecutionMetrics(test_settings), - health=HealthMetrics(test_settings), - kubernetes=KubernetesMetrics(test_settings), - notification=NotificationMetrics(test_settings), - rate_limit=RateLimitMetrics(test_settings), - replay=ReplayMetrics(test_settings), - security=SecurityMetrics(test_settings), - ) - yield - MetricsContext.reset_all(_unit_test_logger) + +# Metrics fixtures - provided via DI, not global context +@pytest.fixture +def connection_metrics(test_settings: Settings) -> ConnectionMetrics: + return ConnectionMetrics(test_settings) + + +@pytest.fixture +def coordinator_metrics(test_settings: Settings) -> CoordinatorMetrics: + return CoordinatorMetrics(test_settings) + + +@pytest.fixture +def database_metrics(test_settings: Settings) -> DatabaseMetrics: + return DatabaseMetrics(test_settings) + + +@pytest.fixture +def dlq_metrics(test_settings: Settings) -> DLQMetrics: + return DLQMetrics(test_settings) + + +@pytest.fixture +def event_metrics(test_settings: Settings) -> EventMetrics: + return EventMetrics(test_settings) + + +@pytest.fixture +def execution_metrics(test_settings: Settings) -> ExecutionMetrics: + return ExecutionMetrics(test_settings) + + +@pytest.fixture +def health_metrics(test_settings: Settings) -> HealthMetrics: + return HealthMetrics(test_settings) + + +@pytest.fixture +def kubernetes_metrics(test_settings: Settings) -> KubernetesMetrics: + return KubernetesMetrics(test_settings) + + +@pytest.fixture +def notification_metrics(test_settings: Settings) -> NotificationMetrics: + return NotificationMetrics(test_settings) + + +@pytest.fixture +def rate_limit_metrics(test_settings: Settings) -> RateLimitMetrics: + return RateLimitMetrics(test_settings) + + +@pytest.fixture +def replay_metrics(test_settings: Settings) -> ReplayMetrics: + return ReplayMetrics(test_settings) + + +@pytest.fixture +def security_metrics(test_settings: Settings) -> SecurityMetrics: + return SecurityMetrics(test_settings) @pytest.fixture diff --git a/backend/tests/unit/core/metrics/test_base_metrics.py b/backend/tests/unit/core/metrics/test_base_metrics.py index ba4cdfde..e64f35fb 100644 --- a/backend/tests/unit/core/metrics/test_base_metrics.py +++ b/backend/tests/unit/core/metrics/test_base_metrics.py @@ -1,5 +1,5 @@ import pytest -from app.core.metrics.base import BaseMetrics +from app.core.metrics import BaseMetrics from app.settings import Settings pytestmark = pytest.mark.unit diff --git a/backend/tests/unit/core/metrics/test_connections_and_coordinator_metrics.py b/backend/tests/unit/core/metrics/test_connections_and_coordinator_metrics.py index fab6f368..202b7233 100644 --- a/backend/tests/unit/core/metrics/test_connections_and_coordinator_metrics.py +++ b/backend/tests/unit/core/metrics/test_connections_and_coordinator_metrics.py @@ -1,6 +1,5 @@ import pytest -from app.core.metrics.connections import ConnectionMetrics -from app.core.metrics.coordinator import CoordinatorMetrics +from app.core.metrics import ConnectionMetrics, CoordinatorMetrics from app.settings import Settings pytestmark = pytest.mark.unit diff --git a/backend/tests/unit/core/metrics/test_database_and_dlq_metrics.py b/backend/tests/unit/core/metrics/test_database_and_dlq_metrics.py index 691d05aa..623e20b6 100644 --- a/backend/tests/unit/core/metrics/test_database_and_dlq_metrics.py +++ b/backend/tests/unit/core/metrics/test_database_and_dlq_metrics.py @@ -1,6 +1,5 @@ import pytest -from app.core.metrics.database import DatabaseMetrics -from app.core.metrics.dlq import DLQMetrics +from app.core.metrics import DatabaseMetrics, DLQMetrics from app.settings import Settings pytestmark = pytest.mark.unit diff --git a/backend/tests/unit/core/metrics/test_execution_and_events_metrics.py b/backend/tests/unit/core/metrics/test_execution_and_events_metrics.py index 2eda95a8..fdd09bdc 100644 --- a/backend/tests/unit/core/metrics/test_execution_and_events_metrics.py +++ b/backend/tests/unit/core/metrics/test_execution_and_events_metrics.py @@ -1,6 +1,5 @@ import pytest -from app.core.metrics.events import EventMetrics -from app.core.metrics.execution import ExecutionMetrics +from app.core.metrics import EventMetrics, ExecutionMetrics from app.domain.enums.execution import ExecutionStatus from app.settings import Settings diff --git a/backend/tests/unit/core/metrics/test_health_and_rate_limit_metrics.py b/backend/tests/unit/core/metrics/test_health_and_rate_limit_metrics.py index e22a3bff..54d06d27 100644 --- a/backend/tests/unit/core/metrics/test_health_and_rate_limit_metrics.py +++ b/backend/tests/unit/core/metrics/test_health_and_rate_limit_metrics.py @@ -1,5 +1,5 @@ import pytest -from app.core.metrics.health import HealthMetrics +from app.core.metrics import HealthMetrics from app.settings import Settings pytestmark = pytest.mark.unit diff --git a/backend/tests/unit/core/metrics/test_kubernetes_and_notifications_metrics.py b/backend/tests/unit/core/metrics/test_kubernetes_and_notifications_metrics.py index 061eed0e..3a12d8de 100644 --- a/backend/tests/unit/core/metrics/test_kubernetes_and_notifications_metrics.py +++ b/backend/tests/unit/core/metrics/test_kubernetes_and_notifications_metrics.py @@ -1,6 +1,5 @@ import pytest -from app.core.metrics.kubernetes import KubernetesMetrics -from app.core.metrics.notifications import NotificationMetrics +from app.core.metrics import KubernetesMetrics, NotificationMetrics from app.settings import Settings pytestmark = pytest.mark.unit diff --git a/backend/tests/unit/core/metrics/test_metrics_classes.py b/backend/tests/unit/core/metrics/test_metrics_classes.py index 542a4a6a..382ed9c5 100644 --- a/backend/tests/unit/core/metrics/test_metrics_classes.py +++ b/backend/tests/unit/core/metrics/test_metrics_classes.py @@ -1,16 +1,18 @@ import pytest -from app.core.metrics.connections import ConnectionMetrics -from app.core.metrics.coordinator import CoordinatorMetrics -from app.core.metrics.database import DatabaseMetrics -from app.core.metrics.dlq import DLQMetrics -from app.core.metrics.events import EventMetrics -from app.core.metrics.execution import ExecutionMetrics -from app.core.metrics.health import HealthMetrics -from app.core.metrics.kubernetes import KubernetesMetrics -from app.core.metrics.notifications import NotificationMetrics -from app.core.metrics.rate_limit import RateLimitMetrics -from app.core.metrics.replay import ReplayMetrics -from app.core.metrics.security import SecurityMetrics +from app.core.metrics import ( + ConnectionMetrics, + CoordinatorMetrics, + DatabaseMetrics, + DLQMetrics, + EventMetrics, + ExecutionMetrics, + HealthMetrics, + KubernetesMetrics, + NotificationMetrics, + RateLimitMetrics, + ReplayMetrics, + SecurityMetrics, +) from app.domain.enums.execution import ExecutionStatus from app.settings import Settings diff --git a/backend/tests/unit/core/metrics/test_metrics_context.py b/backend/tests/unit/core/metrics/test_metrics_context.py deleted file mode 100644 index 5f24a999..00000000 --- a/backend/tests/unit/core/metrics/test_metrics_context.py +++ /dev/null @@ -1,24 +0,0 @@ -import logging - -import pytest -from app.core.metrics.context import ( - get_connection_metrics, - get_coordinator_metrics, -) - -_test_logger = logging.getLogger("test.core.metrics.context") - -pytestmark = pytest.mark.unit - - -def test_metrics_context_returns_initialized_metrics() -> None: - """Test metrics context returns initialized metrics from session fixture.""" - # Metrics are initialized by the session-scoped fixture in conftest.py - c1 = get_connection_metrics() - c2 = get_connection_metrics() - assert c1 is c2 # same instance per context - - d1 = get_coordinator_metrics() - d2 = get_coordinator_metrics() - assert d1 is d2 - diff --git a/backend/tests/unit/core/metrics/test_replay_and_security_metrics.py b/backend/tests/unit/core/metrics/test_replay_and_security_metrics.py index 09462600..c7966e94 100644 --- a/backend/tests/unit/core/metrics/test_replay_and_security_metrics.py +++ b/backend/tests/unit/core/metrics/test_replay_and_security_metrics.py @@ -1,6 +1,5 @@ import pytest -from app.core.metrics.replay import ReplayMetrics -from app.core.metrics.security import SecurityMetrics +from app.core.metrics import ReplayMetrics, SecurityMetrics from app.settings import Settings pytestmark = pytest.mark.unit diff --git a/backend/tests/unit/services/coordinator/test_queue_manager.py b/backend/tests/unit/services/coordinator/test_queue_manager.py index b3b87dee..b4b39b2d 100644 --- a/backend/tests/unit/services/coordinator/test_queue_manager.py +++ b/backend/tests/unit/services/coordinator/test_queue_manager.py @@ -1,6 +1,7 @@ import logging import pytest +from app.core.metrics import CoordinatorMetrics from app.domain.events.typed import ExecutionRequestedEvent from app.services.coordinator.queue_manager import QueueManager, QueuePriority @@ -16,8 +17,8 @@ def ev(execution_id: str, priority: int = QueuePriority.NORMAL.value) -> Executi @pytest.mark.asyncio -async def test_requeue_execution_increments_priority() -> None: - qm = QueueManager(max_queue_size=10, logger=_test_logger) +async def test_requeue_execution_increments_priority(coordinator_metrics: CoordinatorMetrics) -> None: + qm = QueueManager(max_queue_size=10, logger=_test_logger, coordinator_metrics=coordinator_metrics) await qm.start() # Use NORMAL priority which can be incremented to LOW e = ev("x", priority=QueuePriority.NORMAL.value) @@ -29,8 +30,8 @@ async def test_requeue_execution_increments_priority() -> None: @pytest.mark.asyncio -async def test_queue_stats_empty_and_after_add() -> None: - qm = QueueManager(max_queue_size=5, logger=_test_logger) +async def test_queue_stats_empty_and_after_add(coordinator_metrics: CoordinatorMetrics) -> None: + qm = QueueManager(max_queue_size=5, logger=_test_logger, coordinator_metrics=coordinator_metrics) await qm.start() stats0 = await qm.get_queue_stats() assert stats0["total_size"] == 0 diff --git a/backend/tests/unit/services/coordinator/test_resource_manager.py b/backend/tests/unit/services/coordinator/test_resource_manager.py index 1cea9f82..3624dae6 100644 --- a/backend/tests/unit/services/coordinator/test_resource_manager.py +++ b/backend/tests/unit/services/coordinator/test_resource_manager.py @@ -1,14 +1,15 @@ import logging import pytest +from app.core.metrics import CoordinatorMetrics from app.services.coordinator.resource_manager import ResourceManager _test_logger = logging.getLogger("test.services.coordinator.resource_manager") @pytest.mark.asyncio -async def test_request_allocation_defaults_and_limits() -> None: - rm = ResourceManager(total_cpu_cores=8.0, total_memory_mb=16384, total_gpu_count=0, logger=_test_logger) +async def test_request_allocation_defaults_and_limits(coordinator_metrics: CoordinatorMetrics) -> None: + rm = ResourceManager(total_cpu_cores=8.0, total_memory_mb=16384, total_gpu_count=0, logger=_test_logger, coordinator_metrics=coordinator_metrics) # Default for python alloc = await rm.request_allocation("e1", "python") @@ -25,8 +26,8 @@ async def test_request_allocation_defaults_and_limits() -> None: @pytest.mark.asyncio -async def test_release_and_can_allocate() -> None: - rm = ResourceManager(total_cpu_cores=4.0, total_memory_mb=8192, total_gpu_count=0, logger=_test_logger) +async def test_release_and_can_allocate(coordinator_metrics: CoordinatorMetrics) -> None: + rm = ResourceManager(total_cpu_cores=4.0, total_memory_mb=8192, total_gpu_count=0, logger=_test_logger, coordinator_metrics=coordinator_metrics) a = await rm.request_allocation("e1", "python", requested_cpu=1.0, requested_memory_mb=512) assert a is not None @@ -45,8 +46,8 @@ async def test_release_and_can_allocate() -> None: @pytest.mark.asyncio -async def test_resource_stats() -> None: - rm = ResourceManager(total_cpu_cores=2.0, total_memory_mb=4096, total_gpu_count=0, logger=_test_logger) +async def test_resource_stats(coordinator_metrics: CoordinatorMetrics) -> None: + rm = ResourceManager(total_cpu_cores=2.0, total_memory_mb=4096, total_gpu_count=0, logger=_test_logger, coordinator_metrics=coordinator_metrics) # Make sure the allocation succeeds alloc = await rm.request_allocation("e1", "python", requested_cpu=0.5, requested_memory_mb=256) assert alloc is not None, "Allocation should have succeeded" diff --git a/backend/tests/unit/services/idempotency/test_idempotency_manager.py b/backend/tests/unit/services/idempotency/test_idempotency_manager.py index 102aa56c..ef4676fb 100644 --- a/backend/tests/unit/services/idempotency/test_idempotency_manager.py +++ b/backend/tests/unit/services/idempotency/test_idempotency_manager.py @@ -2,6 +2,7 @@ from unittest.mock import MagicMock import pytest +from app.core.metrics import DatabaseMetrics from app.domain.events.typed import BaseEvent from app.services.idempotency.idempotency_manager import ( IdempotencyConfig, @@ -85,9 +86,9 @@ def test_custom_config(self) -> None: assert config.collection_name == "custom_keys" -def test_manager_generate_key_variants() -> None: +def test_manager_generate_key_variants(database_metrics: DatabaseMetrics) -> None: repo = MagicMock() - mgr = IdempotencyManager(IdempotencyConfig(), repo, _test_logger) + mgr = IdempotencyManager(IdempotencyConfig(), repo, _test_logger, database_metrics=database_metrics) ev = MagicMock(spec=BaseEvent) ev.event_type = "t" ev.event_id = "e" diff --git a/backend/tests/unit/services/pod_monitor/test_monitor.py b/backend/tests/unit/services/pod_monitor/test_monitor.py index ec60121a..dc93a150 100644 --- a/backend/tests/unit/services/pod_monitor/test_monitor.py +++ b/backend/tests/unit/services/pod_monitor/test_monitor.py @@ -7,6 +7,7 @@ import pytest from app.core import k8s_clients as k8s_clients_module from app.core.k8s_clients import K8sClients +from app.core.metrics import EventMetrics, KubernetesMetrics from app.db.repositories.event_repository import EventRepository from app.domain.events.typed import DomainEvent, EventMetadata, ExecutionCompletedEvent, ExecutionStartedEvent from app.domain.execution.models import ResourceUsageDomain @@ -72,7 +73,7 @@ async def aclose(self) -> None: pass -def create_test_kafka_event_service() -> tuple[KafkaEventService, FakeUnifiedProducer]: +def create_test_kafka_event_service(event_metrics: EventMetrics) -> tuple[KafkaEventService, FakeUnifiedProducer]: """Create real KafkaEventService with fake dependencies for testing.""" fake_producer = FakeUnifiedProducer() fake_repo = FakeEventRepository() @@ -83,6 +84,7 @@ def create_test_kafka_event_service() -> tuple[KafkaEventService, FakeUnifiedPro kafka_producer=fake_producer, settings=settings, logger=_test_logger, + event_metrics=event_metrics, ) return service, fake_producer @@ -120,6 +122,8 @@ def make_k8s_clients_di( def make_pod_monitor( + event_metrics: EventMetrics, + kubernetes_metrics: KubernetesMetrics, config: PodMonitorConfig | None = None, kafka_service: KafkaEventService | None = None, k8s_clients: K8sClients | None = None, @@ -129,13 +133,14 @@ def make_pod_monitor( cfg = config or PodMonitorConfig() clients = k8s_clients or make_k8s_clients_di() mapper = event_mapper or PodEventMapper(logger=_test_logger, k8s_api=FakeApi("{}")) - service = kafka_service or create_test_kafka_event_service()[0] + service = kafka_service or create_test_kafka_event_service(event_metrics)[0] return PodMonitor( config=cfg, kafka_event_service=service, logger=_test_logger, k8s_clients=clients, event_mapper=mapper, + kubernetes_metrics=kubernetes_metrics, ) @@ -143,12 +148,12 @@ def make_pod_monitor( @pytest.mark.asyncio -async def test_start_and_stop_lifecycle() -> None: +async def test_start_and_stop_lifecycle(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() cfg.enable_state_reconciliation = False spy = SpyMapper() - pm = make_pod_monitor(config=cfg, event_mapper=spy) # type: ignore[arg-type] + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg, event_mapper=spy) # type: ignore[arg-type] # Replace _watch_pods to avoid real watch loop async def _quick_watch() -> None: @@ -166,14 +171,14 @@ async def _quick_watch() -> None: @pytest.mark.asyncio -async def test_watch_pod_events_flow_and_publish() -> None: +async def test_watch_pod_events_flow_and_publish(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() cfg.enable_state_reconciliation = False pod = make_pod(name="p", phase="Succeeded", labels={"execution-id": "e1"}, term_exit=0, resource_version="rv1") k8s_clients = make_k8s_clients_di(events=[{"type": "MODIFIED", "object": pod}], resource_version="rv2") - pm = make_pod_monitor(config=cfg, k8s_clients=k8s_clients) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg, k8s_clients=k8s_clients) pm._state = MonitorState.RUNNING await pm._watch_pod_events() @@ -181,9 +186,9 @@ async def test_watch_pod_events_flow_and_publish() -> None: @pytest.mark.asyncio -async def test_process_raw_event_invalid_and_handle_watch_error() -> None: +async def test_process_raw_event_invalid_and_handle_watch_error(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) await pm._process_raw_event({}) @@ -195,13 +200,13 @@ async def test_process_raw_event_invalid_and_handle_watch_error() -> None: @pytest.mark.asyncio -async def test_get_status() -> None: +async def test_get_status(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() cfg.namespace = "test-ns" cfg.label_selector = "app=test" cfg.enable_state_reconciliation = True - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) pm._tracked_pods = {"pod1", "pod2"} pm._reconnect_attempts = 3 pm._last_resource_version = "v123" @@ -217,12 +222,12 @@ async def test_get_status() -> None: @pytest.mark.asyncio -async def test_reconciliation_loop_and_state() -> None: +async def test_reconciliation_loop_and_state(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() cfg.enable_state_reconciliation = True cfg.reconcile_interval_seconds = 0 # sleep(0) yields control immediately - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) pm._state = MonitorState.RUNNING reconcile_called: list[bool] = [] @@ -251,7 +256,7 @@ async def wrapped_reconcile() -> ReconciliationResult: @pytest.mark.asyncio -async def test_reconcile_state_success() -> None: +async def test_reconcile_state_success(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() cfg.namespace = "test" cfg.label_selector = "app=test" @@ -260,7 +265,7 @@ async def test_reconcile_state_success() -> None: pod2 = make_pod(name="pod2", phase="Running", resource_version="v1") k8s_clients = make_k8s_clients_di(pods=[pod1, pod2]) - pm = make_pod_monitor(config=cfg, k8s_clients=k8s_clients) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg, k8s_clients=k8s_clients) pm._tracked_pods = {"pod2", "pod3"} processed: list[str] = [] @@ -280,7 +285,7 @@ async def mock_process(event: PodEvent) -> None: @pytest.mark.asyncio -async def test_reconcile_state_exception() -> None: +async def test_reconcile_state_exception(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() class FailV1(FakeV1Api): @@ -296,7 +301,7 @@ def list_namespaced_pod(self, namespace: str, label_selector: str) -> Any: watch=make_watch([]), ) - pm = make_pod_monitor(config=cfg, k8s_clients=k8s_clients) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg, k8s_clients=k8s_clients) result = await pm._reconcile_state() assert result.success is False @@ -305,7 +310,7 @@ def list_namespaced_pod(self, namespace: str, label_selector: str) -> Any: @pytest.mark.asyncio -async def test_process_pod_event_full_flow() -> None: +async def test_process_pod_event_full_flow(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() cfg.ignored_pod_phases = ["Unknown"] @@ -321,7 +326,7 @@ class Event: def clear_cache(self) -> None: pass - pm = make_pod_monitor(config=cfg, event_mapper=MockMapper()) # type: ignore[arg-type] + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg, event_mapper=MockMapper()) # type: ignore[arg-type] published: list[Any] = [] @@ -363,7 +368,7 @@ async def mock_publish(event: Any, pod: Any) -> None: # noqa: ARG001 @pytest.mark.asyncio -async def test_process_pod_event_exception_handling() -> None: +async def test_process_pod_event_exception_handling(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() class FailMapper: @@ -373,7 +378,7 @@ def map_pod_event(self, pod: Any, event_type: WatchEventType) -> list[Any]: def clear_cache(self) -> None: pass - pm = make_pod_monitor(config=cfg, event_mapper=FailMapper()) # type: ignore[arg-type] + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg, event_mapper=FailMapper()) # type: ignore[arg-type] event = PodEvent( event_type=WatchEventType.ADDED, @@ -386,10 +391,10 @@ def clear_cache(self) -> None: @pytest.mark.asyncio -async def test_publish_event_full_flow() -> None: +async def test_publish_event_full_flow(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() - service, fake_producer = create_test_kafka_event_service() - pm = make_pod_monitor(config=cfg, kafka_service=service) + service, fake_producer = create_test_kafka_event_service(event_metrics) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg, kafka_service=service) event = ExecutionCompletedEvent( execution_id="exec1", @@ -407,7 +412,7 @@ async def test_publish_event_full_flow() -> None: @pytest.mark.asyncio -async def test_publish_event_exception_handling() -> None: +async def test_publish_event_exception_handling(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() class FailingProducer(FakeUnifiedProducer): @@ -424,9 +429,10 @@ async def produce( kafka_producer=failing_producer, settings=Settings(), logger=_test_logger, + event_metrics=event_metrics, ) - pm = make_pod_monitor(config=cfg, kafka_service=failing_service) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg, kafka_service=failing_service) event = ExecutionStartedEvent( execution_id="exec1", @@ -443,11 +449,11 @@ async def produce( @pytest.mark.asyncio -async def test_handle_watch_error_max_attempts() -> None: +async def test_handle_watch_error_max_attempts(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() cfg.max_reconnect_attempts = 2 - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) pm._state = MonitorState.RUNNING pm._reconnect_attempts = 2 @@ -457,9 +463,9 @@ async def test_handle_watch_error_max_attempts() -> None: @pytest.mark.asyncio -async def test_watch_pods_main_loop() -> None: +async def test_watch_pods_main_loop(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) pm._state = MonitorState.RUNNING watch_count: list[int] = [] @@ -480,9 +486,9 @@ async def mock_handle_error() -> None: @pytest.mark.asyncio -async def test_watch_pods_api_exception() -> None: +async def test_watch_pods_api_exception(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) pm._state = MonitorState.RUNNING async def mock_watch() -> None: @@ -504,9 +510,9 @@ async def mock_handle() -> None: @pytest.mark.asyncio -async def test_watch_pods_generic_exception() -> None: +async def test_watch_pods_generic_exception(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) pm._state = MonitorState.RUNNING async def mock_watch() -> None: @@ -526,7 +532,7 @@ async def mock_handle() -> None: @pytest.mark.asyncio -async def test_create_pod_monitor_context_manager(monkeypatch: pytest.MonkeyPatch) -> None: +async def test_create_pod_monitor_context_manager(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics, monkeypatch: pytest.MonkeyPatch) -> None: """Test create_pod_monitor factory with auto-created dependencies.""" # Mock create_k8s_clients to avoid real K8s connection mock_v1 = FakeV1Api() @@ -552,10 +558,10 @@ def mock_create_clients( cfg = PodMonitorConfig() cfg.enable_state_reconciliation = False - service, _ = create_test_kafka_event_service() + service, _ = create_test_kafka_event_service(event_metrics) # Use the actual create_pod_monitor which will use our mocked create_k8s_clients - async with create_pod_monitor(cfg, service, _test_logger) as monitor: + async with create_pod_monitor(cfg, service, _test_logger, kubernetes_metrics=kubernetes_metrics) as monitor: assert monitor.state == MonitorState.RUNNING final_state: MonitorState = monitor.state @@ -563,12 +569,12 @@ def mock_create_clients( @pytest.mark.asyncio -async def test_create_pod_monitor_with_injected_k8s_clients() -> None: +async def test_create_pod_monitor_with_injected_k8s_clients(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: """Test create_pod_monitor with injected K8sClients (DI path).""" cfg = PodMonitorConfig() cfg.enable_state_reconciliation = False - service, _ = create_test_kafka_event_service() + service, _ = create_test_kafka_event_service(event_metrics) mock_v1 = FakeV1Api() mock_watch = make_watch([]) @@ -581,7 +587,7 @@ async def test_create_pod_monitor_with_injected_k8s_clients() -> None: ) async with create_pod_monitor( - cfg, service, _test_logger, k8s_clients=mock_k8s_clients + cfg, service, _test_logger, k8s_clients=mock_k8s_clients, kubernetes_metrics=kubernetes_metrics ) as monitor: assert monitor.state == MonitorState.RUNNING assert monitor._clients is mock_k8s_clients @@ -592,10 +598,10 @@ async def test_create_pod_monitor_with_injected_k8s_clients() -> None: @pytest.mark.asyncio -async def test_start_already_running() -> None: +async def test_start_already_running(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: """Test idempotent start via __aenter__.""" cfg = PodMonitorConfig() - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) # Simulate already started state pm._lifecycle_started = True @@ -606,10 +612,10 @@ async def test_start_already_running() -> None: @pytest.mark.asyncio -async def test_stop_already_stopped() -> None: +async def test_stop_already_stopped(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: """Test idempotent stop via aclose().""" cfg = PodMonitorConfig() - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) pm._state = MonitorState.STOPPED # Not started, so aclose should be a no-op @@ -617,10 +623,10 @@ async def test_stop_already_stopped() -> None: @pytest.mark.asyncio -async def test_stop_with_tasks() -> None: +async def test_stop_with_tasks(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: """Test cleanup of tasks on aclose().""" cfg = PodMonitorConfig() - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) pm._state = MonitorState.RUNNING pm._lifecycle_started = True @@ -637,9 +643,9 @@ async def dummy_task() -> None: assert len(pm._tracked_pods) == 0 -def test_update_resource_version() -> None: +def test_update_resource_version(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) class Stream: _stop_event = types.SimpleNamespace(resource_version="v123") @@ -654,9 +660,9 @@ class BadStream: @pytest.mark.asyncio -async def test_process_raw_event_with_metadata() -> None: +async def test_process_raw_event_with_metadata(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) processed: list[PodEvent] = [] @@ -682,9 +688,9 @@ async def mock_process(event: PodEvent) -> None: @pytest.mark.asyncio -async def test_watch_pods_api_exception_other_status() -> None: +async def test_watch_pods_api_exception_other_status(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) pm._state = MonitorState.RUNNING async def mock_watch() -> None: @@ -704,7 +710,7 @@ async def mock_handle() -> None: @pytest.mark.asyncio -async def test_watch_pod_events_with_field_selector() -> None: +async def test_watch_pod_events_with_field_selector(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() cfg.field_selector = "status.phase=Running" cfg.enable_state_reconciliation = False @@ -729,7 +735,7 @@ def stream(self, func: Any, **kwargs: Any) -> FakeWatchStream: watch=TrackingWatch([], "rv1"), ) - pm = make_pod_monitor(config=cfg, k8s_clients=k8s_clients) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg, k8s_clients=k8s_clients) pm._state = MonitorState.RUNNING await pm._watch_pod_events() @@ -738,12 +744,12 @@ def stream(self, func: Any, **kwargs: Any) -> FakeWatchStream: @pytest.mark.asyncio -async def test_reconciliation_loop_exception() -> None: +async def test_reconciliation_loop_exception(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() cfg.enable_state_reconciliation = True cfg.reconcile_interval_seconds = 0 # sleep(0) yields control immediately - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) pm._state = MonitorState.RUNNING hit = asyncio.Event() @@ -763,11 +769,11 @@ async def raising() -> ReconciliationResult: @pytest.mark.asyncio -async def test_start_with_reconciliation() -> None: +async def test_start_with_reconciliation(event_metrics: EventMetrics, kubernetes_metrics: KubernetesMetrics) -> None: cfg = PodMonitorConfig() cfg.enable_state_reconciliation = True - pm = make_pod_monitor(config=cfg) + pm = make_pod_monitor(event_metrics, kubernetes_metrics, config=cfg) async def mock_watch() -> None: return None diff --git a/backend/tests/unit/services/result_processor/test_processor.py b/backend/tests/unit/services/result_processor/test_processor.py index f78cc3bc..c13fe0ab 100644 --- a/backend/tests/unit/services/result_processor/test_processor.py +++ b/backend/tests/unit/services/result_processor/test_processor.py @@ -2,6 +2,7 @@ from unittest.mock import MagicMock import pytest +from app.core.metrics import EventMetrics, ExecutionMetrics from app.domain.enums.events import EventType from app.domain.enums.kafka import CONSUMER_GROUP_SUBSCRIPTIONS, GroupId, KafkaTopic from app.services.result_processor.processor import ResultProcessor, ResultProcessorConfig @@ -28,7 +29,9 @@ def test_custom_values(self) -> None: assert config.processing_timeout == 600 -def test_create_dispatcher_registers_handlers() -> None: +def test_create_dispatcher_registers_handlers( + execution_metrics: ExecutionMetrics, event_metrics: EventMetrics +) -> None: rp = ResultProcessor( execution_repo=MagicMock(), producer=MagicMock(), @@ -36,6 +39,8 @@ def test_create_dispatcher_registers_handlers() -> None: settings=MagicMock(), idempotency_manager=MagicMock(), logger=_test_logger, + execution_metrics=execution_metrics, + event_metrics=event_metrics, ) dispatcher = rp._create_dispatcher() assert dispatcher is not None diff --git a/backend/tests/unit/services/saga/test_saga_orchestrator_unit.py b/backend/tests/unit/services/saga/test_saga_orchestrator_unit.py index b8e24fb1..b414884a 100644 --- a/backend/tests/unit/services/saga/test_saga_orchestrator_unit.py +++ b/backend/tests/unit/services/saga/test_saga_orchestrator_unit.py @@ -2,6 +2,7 @@ from unittest.mock import MagicMock import pytest +from app.core.metrics import EventMetrics from app.db.repositories.resource_allocation_repository import ResourceAllocationRepository from app.db.repositories.saga_repository import SagaRepository from app.domain.enums.events import EventType @@ -99,7 +100,7 @@ def get_steps(self) -> list[SagaStep[ExecutionRequestedEvent]]: return [_StepOK()] -def _orch() -> SagaOrchestrator: +def _orch(event_metrics: EventMetrics) -> SagaOrchestrator: return SagaOrchestrator( config=SagaConfig(name="t", enable_compensation=True, store_events=True, publish_commands=False), saga_repository=_FakeRepo(), @@ -110,12 +111,13 @@ def _orch() -> SagaOrchestrator: idempotency_manager=_FakeIdem(), resource_allocation_repository=_FakeAlloc(), logger=_test_logger, + event_metrics=event_metrics, ) @pytest.mark.asyncio -async def test_min_success_flow() -> None: - orch = _orch() +async def test_min_success_flow(event_metrics: EventMetrics) -> None: + orch = _orch(event_metrics) orch.register_saga(_Saga) # Set orchestrator running state via lifecycle property orch._lifecycle_started = True @@ -125,7 +127,7 @@ async def test_min_success_flow() -> None: @pytest.mark.asyncio -async def test_should_trigger_and_existing_short_circuit() -> None: +async def test_should_trigger_and_existing_short_circuit(event_metrics: EventMetrics) -> None: fake_repo = _FakeRepo() orch = SagaOrchestrator( config=SagaConfig(name="t", enable_compensation=True, store_events=True, publish_commands=False), @@ -137,6 +139,7 @@ async def test_should_trigger_and_existing_short_circuit() -> None: idempotency_manager=_FakeIdem(), resource_allocation_repository=_FakeAlloc(), logger=_test_logger, + event_metrics=event_metrics, ) orch.register_saga(_Saga) assert orch._should_trigger_saga(_Saga, make_execution_requested_event(execution_id="e")) is True diff --git a/backend/tests/unit/services/sse/test_kafka_redis_bridge.py b/backend/tests/unit/services/sse/test_kafka_redis_bridge.py index 6e78449b..6fa5d1ef 100644 --- a/backend/tests/unit/services/sse/test_kafka_redis_bridge.py +++ b/backend/tests/unit/services/sse/test_kafka_redis_bridge.py @@ -2,7 +2,7 @@ from unittest.mock import MagicMock import pytest -from app.core.metrics.events import EventMetrics +from app.core.metrics import EventMetrics from app.domain.enums.events import EventType from app.domain.events.typed import DomainEvent, EventMetadata, ExecutionStartedEvent from app.events.core import EventDispatcher diff --git a/backend/tests/unit/services/sse/test_shutdown_manager.py b/backend/tests/unit/services/sse/test_shutdown_manager.py index 69c9d9f5..05f6e023 100644 --- a/backend/tests/unit/services/sse/test_shutdown_manager.py +++ b/backend/tests/unit/services/sse/test_shutdown_manager.py @@ -3,6 +3,7 @@ import pytest from app.core.lifecycle import LifecycleEnabled +from app.core.metrics import ConnectionMetrics from app.services.sse.sse_shutdown_manager import SSEShutdownManager _test_logger = logging.getLogger("test.services.sse.shutdown_manager") @@ -21,8 +22,8 @@ async def _on_stop(self) -> None: @pytest.mark.asyncio -async def test_shutdown_graceful_notify_and_drain() -> None: - mgr = SSEShutdownManager(drain_timeout=1.0, notification_timeout=0.01, force_close_timeout=0.1, logger=_test_logger) +async def test_shutdown_graceful_notify_and_drain(connection_metrics: ConnectionMetrics) -> None: + mgr = SSEShutdownManager(drain_timeout=1.0, notification_timeout=0.01, force_close_timeout=0.1, logger=_test_logger, connection_metrics=connection_metrics) # Register two connections and arrange that they unregister when notified ev1 = await mgr.register_connection("e1", "c1") @@ -45,9 +46,9 @@ async def on_shutdown(event: asyncio.Event, cid: str) -> None: @pytest.mark.asyncio -async def test_shutdown_force_close_calls_router_stop_and_rejects_new() -> None: +async def test_shutdown_force_close_calls_router_stop_and_rejects_new(connection_metrics: ConnectionMetrics) -> None: mgr = SSEShutdownManager( - drain_timeout=0.01, notification_timeout=0.01, force_close_timeout=0.01, logger=_test_logger + drain_timeout=0.01, notification_timeout=0.01, force_close_timeout=0.01, logger=_test_logger, connection_metrics=connection_metrics ) router = _FakeRouter() mgr.set_router(router) @@ -69,8 +70,8 @@ async def test_shutdown_force_close_calls_router_stop_and_rejects_new() -> None: @pytest.mark.asyncio -async def test_get_shutdown_status_transitions() -> None: - m = SSEShutdownManager(drain_timeout=0.01, notification_timeout=0.0, force_close_timeout=0.0, logger=_test_logger) +async def test_get_shutdown_status_transitions(connection_metrics: ConnectionMetrics) -> None: + m = SSEShutdownManager(drain_timeout=0.01, notification_timeout=0.0, force_close_timeout=0.0, logger=_test_logger, connection_metrics=connection_metrics) st0 = m.get_shutdown_status() assert st0.phase == "ready" await m.initiate_shutdown() diff --git a/backend/tests/unit/services/sse/test_sse_service.py b/backend/tests/unit/services/sse/test_sse_service.py index 5aa59e21..48ff1751 100644 --- a/backend/tests/unit/services/sse/test_sse_service.py +++ b/backend/tests/unit/services/sse/test_sse_service.py @@ -6,6 +6,7 @@ from unittest.mock import MagicMock import pytest +from app.core.metrics import ConnectionMetrics from app.db.repositories.sse_repository import SSERepository from app.domain.enums.events import EventType from app.domain.enums.execution import ExecutionStatus @@ -129,12 +130,12 @@ def _decode(evt: dict[str, Any]) -> dict[str, Any]: @pytest.mark.asyncio -async def test_execution_stream_closes_on_failed_event() -> None: +async def test_execution_stream_closes_on_failed_event(connection_metrics: ConnectionMetrics) -> None: repo = _FakeRepo() bus = _FakeBus() sm = _FakeShutdown() svc = SSEService(repository=repo, router=_FakeRouter(), sse_bus=bus, shutdown_manager=sm, - settings=_make_fake_settings(), logger=_test_logger) + settings=_make_fake_settings(), logger=_test_logger, connection_metrics=connection_metrics) agen = svc.create_execution_stream("exec-1", user_id="u1") first = await agen.__anext__() @@ -158,7 +159,7 @@ async def test_execution_stream_closes_on_failed_event() -> None: @pytest.mark.asyncio -async def test_execution_stream_result_stored_includes_result_payload() -> None: +async def test_execution_stream_result_stored_includes_result_payload(connection_metrics: ConnectionMetrics) -> None: repo = _FakeRepo() # DomainExecution with RU to_dict repo.exec_for_result = DomainExecution( @@ -178,7 +179,7 @@ async def test_execution_stream_result_stored_includes_result_payload() -> None: bus = _FakeBus() sm = _FakeShutdown() svc = SSEService(repository=repo, router=_FakeRouter(), sse_bus=bus, shutdown_manager=sm, - settings=_make_fake_settings(), logger=_test_logger) + settings=_make_fake_settings(), logger=_test_logger, connection_metrics=connection_metrics) agen = svc.create_execution_stream("exec-2", user_id="u1") await agen.__anext__() # connected @@ -196,14 +197,14 @@ async def test_execution_stream_result_stored_includes_result_payload() -> None: @pytest.mark.asyncio -async def test_notification_stream_connected_and_heartbeat_and_message() -> None: +async def test_notification_stream_connected_and_heartbeat_and_message(connection_metrics: ConnectionMetrics) -> None: repo = _FakeRepo() bus = _FakeBus() sm = _FakeShutdown() settings = _make_fake_settings() settings.SSE_HEARTBEAT_INTERVAL = 0 # emit immediately svc = SSEService(repository=repo, router=_FakeRouter(), sse_bus=bus, shutdown_manager=sm, settings=settings, - logger=_test_logger) + logger=_test_logger, connection_metrics=connection_metrics) agen = svc.create_notification_stream("u1") connected = await agen.__anext__() @@ -241,9 +242,9 @@ async def test_notification_stream_connected_and_heartbeat_and_message() -> None @pytest.mark.asyncio -async def test_health_status_shape() -> None: +async def test_health_status_shape(connection_metrics: ConnectionMetrics) -> None: svc = SSEService(repository=_FakeRepo(), router=_FakeRouter(), sse_bus=_FakeBus(), shutdown_manager=_FakeShutdown(), - settings=_make_fake_settings(), logger=_test_logger) + settings=_make_fake_settings(), logger=_test_logger, connection_metrics=connection_metrics) h = await svc.get_health_status() assert isinstance(h, SSEHealthDomain) assert h.active_consumers == 3 and h.active_executions == 2 diff --git a/backend/tests/unit/services/sse/test_sse_shutdown_manager.py b/backend/tests/unit/services/sse/test_sse_shutdown_manager.py index 43d3e61c..fc7ffb3b 100644 --- a/backend/tests/unit/services/sse/test_sse_shutdown_manager.py +++ b/backend/tests/unit/services/sse/test_sse_shutdown_manager.py @@ -3,6 +3,7 @@ import pytest from app.core.lifecycle import LifecycleEnabled +from app.core.metrics import ConnectionMetrics from app.services.sse.sse_shutdown_manager import SSEShutdownManager pytestmark = pytest.mark.unit @@ -23,8 +24,8 @@ async def _on_stop(self) -> None: @pytest.mark.asyncio -async def test_register_unregister_and_shutdown_flow() -> None: - mgr = SSEShutdownManager(drain_timeout=0.5, notification_timeout=0.1, force_close_timeout=0.1, logger=_test_logger) +async def test_register_unregister_and_shutdown_flow(connection_metrics: ConnectionMetrics) -> None: + mgr = SSEShutdownManager(drain_timeout=0.5, notification_timeout=0.1, force_close_timeout=0.1, logger=_test_logger, connection_metrics=connection_metrics) mgr.set_router(_FakeRouter()) # Register two connections @@ -50,9 +51,9 @@ async def test_register_unregister_and_shutdown_flow() -> None: @pytest.mark.asyncio -async def test_reject_new_connection_during_shutdown() -> None: +async def test_reject_new_connection_during_shutdown(connection_metrics: ConnectionMetrics) -> None: mgr = SSEShutdownManager(drain_timeout=0.5, notification_timeout=0.01, force_close_timeout=0.01, - logger=_test_logger) + logger=_test_logger, connection_metrics=connection_metrics) # Pre-register one active connection - shutdown will block waiting for it e = await mgr.register_connection("e", "c0") assert e is not None diff --git a/backend/workers/run_result_processor.py b/backend/workers/run_result_processor.py index 11cb7a72..5431b011 100644 --- a/backend/workers/run_result_processor.py +++ b/backend/workers/run_result_processor.py @@ -5,6 +5,7 @@ from app.core.container import create_result_processor_container from app.core.logging import setup_logger +from app.core.metrics import EventMetrics, ExecutionMetrics from app.core.tracing import init_tracing from app.db.docs import ALL_DOCUMENTS from app.db.repositories.execution_repository import ExecutionRepository @@ -30,6 +31,8 @@ async def run_result_processor(settings: Settings) -> None: schema_registry = await container.get(SchemaRegistryManager) idempotency_manager = await container.get(IdempotencyManager) execution_repo = await container.get(ExecutionRepository) + execution_metrics = await container.get(ExecutionMetrics) + event_metrics = await container.get(EventMetrics) logger = await container.get(logging.Logger) logger.info(f"Beanie ODM initialized with {len(ALL_DOCUMENTS)} document models") @@ -41,6 +44,8 @@ async def run_result_processor(settings: Settings) -> None: settings=settings, idempotency_manager=idempotency_manager, logger=logger, + execution_metrics=execution_metrics, + event_metrics=event_metrics, ) # Shutdown event - signal handlers just set this diff --git a/cert-generator/Dockerfile b/cert-generator/Dockerfile index 6dc068ab..5cc0fdb5 100644 --- a/cert-generator/Dockerfile +++ b/cert-generator/Dockerfile @@ -7,7 +7,7 @@ ARG KUBECTL_VERSION=v1.33.6 ARG MKCERT_VERSION=v1.4.4 # Install required packages and tools for all architectures -RUN apk add --no-cache wget ca-certificates openssl curl dos2unix netcat-openbsd && \ +RUN apk add --no-cache wget ca-certificates openssl curl dos2unix netcat-openbsd iproute2 && \ update-ca-certificates && \ # Detect architecture and install appropriate binaries ARCH=$(uname -m); \ diff --git a/cert-generator/setup-k8s.sh b/cert-generator/setup-k8s.sh index c665a17e..43609841 100644 --- a/cert-generator/setup-k8s.sh +++ b/cert-generator/setup-k8s.sh @@ -3,6 +3,50 @@ set -e echo "Setting up Kubernetes resources..." +# Auto-configure kubectl for k3s if needed +# k3s stores its kubeconfig at /etc/rancher/k3s/k3s.yaml +# When running in bridge network, we need to use the routable host IP instead of 127.0.0.1 +configure_kubectl() { + # If kubectl already works, nothing to do + if kubectl version --request-timeout=2s >/dev/null 2>&1; then + return 0 + fi + # Try k3s kubeconfig with routable IP (for bridge network containers) + if [ -r /etc/rancher/k3s/k3s.yaml ]; then + # Get the k3s node-ip from config (routable from containers) + K3S_HOST_IP="" + if [ -r /etc/rancher/k3s/config.yaml ]; then + K3S_HOST_IP=$(grep -E '^node-ip:' /etc/rancher/k3s/config.yaml 2>/dev/null | sed -E 's/^node-ip:[[:space:]]*"?([^"[:space:]]+)"?.*/\1/' | head -1) + fi + # If no node-ip found, try to detect host from container (for CI/Docker environments) + if [ -z "$K3S_HOST_IP" ] || [ "$K3S_HOST_IP" = "127.0.0.1" ]; then + # Prefer host.docker.internal (works with TLS cert SANs, requires extra_hosts in compose) + if getent hosts host.docker.internal >/dev/null 2>&1; then + K3S_HOST_IP="host.docker.internal" + fi + fi + if [ -z "$K3S_HOST_IP" ] || [ "$K3S_HOST_IP" = "127.0.0.1" ]; then + # Fallback: Docker gateway (may need insecure TLS if IP not in cert SANs) + K3S_HOST_IP=$(ip route 2>/dev/null | grep default | awk '{print $3}' | head -1) + fi + if [ -n "$K3S_HOST_IP" ] && [ "$K3S_HOST_IP" != "127.0.0.1" ]; then + # Create modified kubeconfig with routable IP/hostname + # Handle both 127.0.0.1 and 0.0.0.0 (k3s may use either depending on config) + mkdir -p /tmp/kube + sed -E "s#https://(127\.0\.0\.1|0\.0\.0\.0):#https://${K3S_HOST_IP}:#g" /etc/rancher/k3s/k3s.yaml > /tmp/kube/config + export KUBECONFIG=/tmp/kube/config + echo "Using k3s kubeconfig with routable IP: $K3S_HOST_IP" + else + export KUBECONFIG=/etc/rancher/k3s/k3s.yaml + echo "Using k3s kubeconfig: $KUBECONFIG" + fi + return 0 + fi + return 1 +} + +configure_kubectl || true + # In CI mode, skip k8s setup if connection fails if [ -n "$CI" ]; then echo "Running in CI mode" @@ -34,11 +78,22 @@ EOF fi fi -# Check k8s connection -if ! kubectl version --request-timeout=5s >/dev/null 2>&1; then - echo "ERROR: Cannot connect to Kubernetes cluster!" - exit 1 -fi +# Check k8s connection with retries (k3s may still be initializing) +echo "Checking Kubernetes connection..." +MAX_RETRIES=12 +RETRY_DELAY=5 +for i in $(seq 1 $MAX_RETRIES); do + if kubectl version --request-timeout=10s >/dev/null 2>&1; then + echo "Connected to Kubernetes (attempt $i)" + break + fi + if [ $i -eq $MAX_RETRIES ]; then + echo "ERROR: Cannot connect to Kubernetes cluster after $MAX_RETRIES attempts!" + exit 1 + fi + echo "Kubernetes not ready, retrying in ${RETRY_DELAY}s... (attempt $i/$MAX_RETRIES)" + sleep $RETRY_DELAY +done echo "Connected to Kubernetes" @@ -167,8 +222,29 @@ TOKEN_LEN=$(printf %s "$TOKEN" | wc -c | awk '{print $1}') TOKEN_HEAD=$(printf %s "$TOKEN" | cut -c1-10) echo "ServiceAccount token acquired (len=${TOKEN_LEN}, head=${TOKEN_HEAD}...)" -# For containers: use host.docker.internal (mapped to host-gateway) but keep TLS host verification via tls-server-name -CONTAINER_SERVER="https://host.docker.internal:${K8S_PORT}" +# Determine the host IP that containers can reach +# Priority: 1) k3s node-ip config, 2) server URL from kubeconfig, 3) fallback to host.docker.internal +get_container_host_ip() { + # Try k3s config node-ip (most reliable for k3s setups) + if [ -f /etc/rancher/k3s/config.yaml ]; then + K3S_NODE_IP=$(grep -E '^node-ip:' /etc/rancher/k3s/config.yaml 2>/dev/null | sed -E 's/^node-ip:[[:space:]]*"?([^"[:space:]]+)"?.*/\1/' | head -1) + if [ -n "$K3S_NODE_IP" ] && [ "$K3S_NODE_IP" != "127.0.0.1" ]; then + echo "$K3S_NODE_IP" + return + fi + fi + # Try extracting from kubeconfig server URL (if not localhost) + if [ -n "$SERVER_HOST" ] && [ "$SERVER_HOST" != "127.0.0.1" ] && [ "$SERVER_HOST" != "localhost" ]; then + echo "$SERVER_HOST" + return + fi + # Fallback to host.docker.internal (works on Docker Desktop, may need extra_hosts on Linux) + echo "host.docker.internal" +} + +CONTAINER_HOST_IP=$(get_container_host_ip) +CONTAINER_SERVER="https://${CONTAINER_HOST_IP}:${K8S_PORT}" +echo "Detected container-accessible host IP: ${CONTAINER_HOST_IP}" echo "Writing kubeconfig for containers:" echo " cluster: ${CLUSTER_NAME}" diff --git a/deploy.sh b/deploy.sh index 6d24f356..66f6cf8f 100755 --- a/deploy.sh +++ b/deploy.sh @@ -55,11 +55,17 @@ show_help() { echo "Usage: ./deploy.sh [options]" echo "" echo "Commands:" - echo " dev [--build] Start local development environment (docker-compose)" - echo " down Stop local development environment" + echo " dev [options] Start full stack (docker-compose)" + echo " --build Rebuild images" + echo " --wait Wait for services to be healthy" + echo " --timeout Health check timeout (default: 300)" + echo " infra [options] Start infrastructure only (mongo, redis, kafka, etc.)" + echo " --wait Wait for services to be healthy" + echo " --timeout Health check timeout (default: 120)" + echo " down Stop all services" echo " prod [options] Deploy to Kubernetes with Helm" echo " check Run quality checks (ruff, mypy, bandit)" - echo " test Run full test suite with docker-compose" + echo " test Run full test suite" echo " logs [service] View logs (defaults to all services)" echo " status Show status of running services" echo " openapi [path] Generate OpenAPI spec (default: docs/reference/openapi.json)" @@ -72,13 +78,15 @@ show_help() { echo " --local Force local build even with --prod values" echo " --set key=value Override Helm values" echo "" + echo "Configuration:" + echo " All settings come from backend/.env (single source of truth)" + echo " For CI/tests: cp backend/.env.test backend/.env" + echo "" echo "Examples:" echo " ./deploy.sh dev # Start dev environment" echo " ./deploy.sh dev --build # Rebuild and start" + echo " ./deploy.sh dev --wait # Start and wait for healthy" echo " ./deploy.sh prod # Deploy with local images" - echo " ./deploy.sh prod --prod # Deploy with registry images (no build)" - echo " ./deploy.sh prod --prod --local # Deploy prod values but build locally" - echo " ./deploy.sh prod --set mongodb.auth.rootPassword=secret" echo " ./deploy.sh logs backend # View backend logs" } @@ -89,12 +97,32 @@ cmd_dev() { print_header "Starting Local Development Environment" local BUILD_FLAG="" - if [[ "$1" == "--build" ]]; then - BUILD_FLAG="--build" - print_info "Rebuilding images..." + local WAIT_FLAG="" + local WAIT_TIMEOUT="300" + + while [[ $# -gt 0 ]]; do + case "$1" in + --build) + BUILD_FLAG="--build" + print_info "Rebuilding images..." + ;; + --wait) + WAIT_FLAG="--wait" + ;; + --timeout) + shift + WAIT_TIMEOUT="$1" + ;; + esac + shift + done + + local WAIT_TIMEOUT_FLAG="" + if [[ -n "$WAIT_FLAG" ]]; then + WAIT_TIMEOUT_FLAG="--wait-timeout $WAIT_TIMEOUT" fi - docker compose up -d $BUILD_FLAG + docker compose --profile observability up -d $BUILD_FLAG $WAIT_FLAG $WAIT_TIMEOUT_FLAG echo "" print_success "Development environment started!" @@ -118,6 +146,38 @@ cmd_down() { print_success "All services stopped" } +cmd_infra() { + print_header "Starting Infrastructure Services Only" + + local WAIT_FLAG="" + local WAIT_TIMEOUT="120" + + while [[ $# -gt 0 ]]; do + case "$1" in + --wait) + WAIT_FLAG="--wait" + ;; + --timeout) + shift + WAIT_TIMEOUT="$1" + ;; + esac + shift + done + + local WAIT_TIMEOUT_FLAG="" + if [[ -n "$WAIT_FLAG" ]]; then + WAIT_TIMEOUT_FLAG="--wait-timeout $WAIT_TIMEOUT" + fi + + # Start only infrastructure services (no app, no workers, no observability) + # zookeeper-certgen is needed for kafka to start + docker compose up -d zookeeper-certgen mongo redis zookeeper kafka schema-registry $WAIT_FLAG $WAIT_TIMEOUT_FLAG + + print_success "Infrastructure services started" + docker compose ps +} + cmd_logs() { local SERVICE="$1" if [[ -n "$SERVICE" ]]; then @@ -181,27 +241,18 @@ cmd_check() { cmd_test() { print_header "Running Test Suite" - print_info "Starting services..." - docker compose up -d --build + print_info "Starting full stack..." + cmd_dev --build --wait - print_info "Waiting for backend to be healthy..." - if ! curl --retry 60 --retry-delay 5 --retry-all-errors -ksfo /dev/null https://localhost:443/api/v1/health/live; then - print_error "Backend failed to become healthy" - docker compose logs - exit 1 - fi - print_success "Backend is healthy" - - print_info "Running tests..." - cd backend - if uv run pytest tests/integration tests/unit -v --cov=app --cov-report=term; then + print_info "Running tests inside Docker..." + if docker compose exec -T backend \ + uv run pytest tests/integration tests/unit -v --cov=app --cov-report=term; then print_success "All tests passed!" TEST_RESULT=0 else print_error "Tests failed" TEST_RESULT=1 fi - cd .. print_info "Cleaning up..." docker compose down @@ -383,7 +434,12 @@ cmd_types() { # ============================================================================= case "${1:-help}" in dev) - cmd_dev "$2" + shift + cmd_dev "$@" + ;; + infra) + shift + cmd_infra "$@" ;; down) cmd_down diff --git a/docker-compose.ci.yaml b/docker-compose.ci.yaml deleted file mode 100644 index 3367c677..00000000 --- a/docker-compose.ci.yaml +++ /dev/null @@ -1,243 +0,0 @@ -# CI-optimized Docker Compose configuration -# -# Usage: -# Backend integration tests (infra only, no builds): -# docker compose -f docker-compose.ci.yaml up -d --wait -# -# Frontend E2E tests (full stack with builds): -# docker compose -f docker-compose.ci.yaml --profile full up -d --wait -# -# Key differences from docker-compose.yaml: -# - KRaft Kafka (no Zookeeper) - simpler, faster startup -# - No SASL/TLS for Kafka - not needed for tests -# - Profiles separate infra from app services -# - Minimal services for fast CI - -services: - # ============================================================================= - # INFRASTRUCTURE SERVICES (no profile = always started) - # ============================================================================= - - mongo: - image: mongo:8.0 - container_name: mongo - ports: - - "27017:27017" - environment: - MONGO_INITDB_ROOT_USERNAME: root - MONGO_INITDB_ROOT_PASSWORD: rootpassword - MONGO_INITDB_DATABASE: integr8scode - tmpfs: - - /data/db # Use tmpfs for faster CI - networks: - - ci-network - healthcheck: - test: mongosh --eval 'db.runCommand("ping").ok' --quiet - interval: 2s - timeout: 3s - retries: 15 - start_period: 5s - - redis: - image: redis:7-alpine - container_name: redis - ports: - - "6379:6379" - command: redis-server --maxmemory 128mb --maxmemory-policy allkeys-lru --save "" - networks: - - ci-network - healthcheck: - test: ["CMD", "redis-cli", "ping"] - interval: 2s - timeout: 2s - retries: 10 - start_period: 2s - - # KRaft mode Kafka - official Apache image, no Zookeeper needed - kafka: - image: apache/kafka:3.9.0 - container_name: kafka - ports: - - "9092:9092" - environment: - # KRaft mode configuration - KAFKA_NODE_ID: 1 - KAFKA_PROCESS_ROLES: broker,controller - KAFKA_CONTROLLER_QUORUM_VOTERS: 1@localhost:9093 - # Listeners: CONTROLLER for raft, HOST for external, DOCKER for internal - KAFKA_LISTENERS: CONTROLLER://localhost:9093,HOST://0.0.0.0:9092,DOCKER://0.0.0.0:29092 - KAFKA_ADVERTISED_LISTENERS: HOST://localhost:9092,DOCKER://kafka:29092 - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,HOST:PLAINTEXT,DOCKER:PLAINTEXT - KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER - KAFKA_INTER_BROKER_LISTENER_NAME: DOCKER - # CI optimizations - KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 - KAFKA_NUM_PARTITIONS: 1 - KAFKA_DEFAULT_REPLICATION_FACTOR: 1 - # Reduce memory usage - KAFKA_HEAP_OPTS: "-Xms256m -Xmx512m" - networks: - - ci-network - healthcheck: - test: /opt/kafka/bin/kafka-broker-api-versions.sh --bootstrap-server localhost:9092 || exit 1 - interval: 2s - timeout: 5s - retries: 30 - start_period: 10s - - schema-registry: - image: confluentinc/cp-schema-registry:7.5.0 - container_name: schema-registry - ports: - - "8081:8081" - environment: - SCHEMA_REGISTRY_HOST_NAME: schema-registry - SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: kafka:29092 - SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 - SCHEMA_REGISTRY_HEAP_OPTS: "-Xms128m -Xmx256m" - depends_on: - kafka: - condition: service_healthy - networks: - - ci-network - healthcheck: - test: curl -f http://localhost:8081/config || exit 1 - interval: 2s - timeout: 3s - retries: 20 - start_period: 3s - - # ============================================================================= - # APPLICATION SERVICES (profile: full - only for E2E tests) - # ============================================================================= - - # Shared base image for backend - base: - build: - context: ./backend - dockerfile: Dockerfile.base - image: integr8scode-base:latest - profiles: ["full"] - - # Certificate generator for TLS - shared-ca: - image: alpine:latest - profiles: ["full"] - volumes: - - shared_ca:/shared_ca - command: sh -c "mkdir -p /shared_ca && chmod 777 /shared_ca && sleep 1" - networks: - - ci-network - - cert-generator: - build: - context: ./cert-generator - dockerfile: Dockerfile - image: integr8scode-cert-generator:latest - profiles: ["full"] - volumes: - - ./backend/certs:/backend-certs - - ./frontend/certs:/frontend-certs - - shared_ca:/shared_ca - - ./backend:/backend - environment: - - SHARED_CA_DIR=/shared_ca - - BACKEND_CERT_DIR=/backend-certs - - FRONTEND_CERT_DIR=/frontend-certs - - CI=true - extra_hosts: - - "host.docker.internal:host-gateway" - restart: "no" - network_mode: host - depends_on: - shared-ca: - condition: service_completed_successfully - - backend: - build: - context: ./backend - dockerfile: Dockerfile - additional_contexts: - base: service:base - image: integr8scode-backend:latest - profiles: ["full"] - container_name: backend - ports: - - "443:443" - environment: - - SERVER_HOST=0.0.0.0 - - TESTING=true - - MONGODB_URL=mongodb://root:rootpassword@mongo:27017/integr8scode?authSource=admin - - KAFKA_BOOTSTRAP_SERVERS=kafka:29092 - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - - REDIS_HOST=redis - - REDIS_PORT=6379 - - OTEL_SDK_DISABLED=true - - ENABLE_TRACING=false - - SECRET_KEY=ci-test-secret-key-for-testing-only-32chars!! - volumes: - - ./backend/certs:/app/certs:ro - - shared_ca:/shared_ca:ro - - ./backend/kubeconfig.yaml:/app/kubeconfig.yaml:ro - extra_hosts: - - "host.docker.internal:host-gateway" - depends_on: - base: - condition: service_completed_successfully - cert-generator: - condition: service_completed_successfully - mongo: - condition: service_healthy - redis: - condition: service_healthy - kafka: - condition: service_healthy - schema-registry: - condition: service_healthy - networks: - - ci-network - healthcheck: - test: ["CMD-SHELL", "curl -k -f -s https://localhost:443/api/v1/health/live || exit 1"] - interval: 5s - timeout: 5s - retries: 20 - start_period: 30s - - frontend: - build: - context: ./frontend - dockerfile: Dockerfile - image: integr8scode-frontend:latest - profiles: ["full"] - container_name: frontend - ports: - - "5001:5001" - environment: - - VITE_BACKEND_URL=https://backend:443 - - NODE_EXTRA_CA_CERTS=/shared_ca/mkcert-ca.pem - volumes: - - ./frontend/certs:/app/certs:ro - - shared_ca:/shared_ca:ro - depends_on: - cert-generator: - condition: service_completed_successfully - backend: - condition: service_healthy - networks: - - ci-network - healthcheck: - test: ["CMD-SHELL", "curl -k -f -s https://localhost:5001/ || exit 1"] - interval: 5s - timeout: 5s - retries: 20 - start_period: 30s - -volumes: - shared_ca: - -networks: - ci-network: - driver: bridge diff --git a/docker-compose.yaml b/docker-compose.yaml index 9611c4e8..ed5a3379 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -22,14 +22,18 @@ services: - ./backend/certs:/backend-certs - ./frontend/certs:/frontend-certs - ~/.kube:/root/.kube + - /etc/rancher/k3s:/etc/rancher/k3s:ro - shared_ca:/shared_ca - ./backend:/backend environment: - SHARED_CA_DIR=/shared_ca - BACKEND_CERT_DIR=/backend-certs - FRONTEND_CERT_DIR=/frontend-certs + extra_hosts: + - "host.docker.internal:host-gateway" restart: "no" - network_mode: host + networks: + - app-network depends_on: shared-ca: condition: service_completed_successfully @@ -53,10 +57,10 @@ services: hard: 65536 healthcheck: test: echo 'db.runCommand("ping").ok' | mongosh localhost/integr8scode -u ${MONGO_ROOT_USER:-root} -p ${MONGO_ROOT_PASSWORD:-rootpassword} --authenticationDatabase admin --quiet - interval: 10s - timeout: 10s - retries: 5 - start_period: 30s + interval: 5s + timeout: 5s + retries: 10 + start_period: 10s redis: image: redis:7-alpine @@ -100,7 +104,9 @@ services: - ./backend/app:/app/app - ./backend/workers:/app/workers - ./backend/scripts:/app/scripts + - ./backend/tests:/app/tests:ro - ./backend/certs:/app/certs:ro + - ./backend/.env.test:/app/.env.test:ro - shared_ca:/shared_ca:ro - ./backend/kubeconfig.yaml:/app/kubeconfig.yaml:ro ports: @@ -115,9 +121,7 @@ services: - ./backend/.env environment: - SERVER_HOST=0.0.0.0 - - OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4317 - - OTEL_SERVICE_NAME=integr8scode-backend - - OTEL_SERVICE_VERSION=1.0.0 + - KUBECONFIG=/app/kubeconfig.yaml healthcheck: # Simpler, reliable healthcheck: curl fails non-zero for HTTP >=400 with -f test: ["CMD-SHELL", "curl -k -f -s https://localhost:443/api/v1/health/live >/dev/null || exit 1"] @@ -148,12 +152,18 @@ services: environment: - VITE_BACKEND_URL=https://backend:443 - NODE_EXTRA_CA_CERTS=/shared_ca/mkcert-ca.pem - + healthcheck: + test: ["CMD-SHELL", "curl -k -f -s https://localhost:5001 >/dev/null || exit 1"] + interval: 3s + timeout: 3s + retries: 30 + start_period: 10s grafana: container_name: grafana - image: grafana/grafana:latest + image: grafana/grafana:12.3.1 + profiles: ["observability"] user: "472" ports: - "3000:3000" @@ -180,7 +190,7 @@ services: restart: "no" zookeeper: - image: confluentinc/cp-zookeeper:7.5.0 + image: confluentinc/cp-zookeeper:7.8.2 container_name: zookeeper depends_on: zookeeper-certgen: @@ -251,13 +261,13 @@ services: hard: 65536 healthcheck: test: ["CMD-SHELL", "echo ruok | nc localhost 2181 | grep imok"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 40s + interval: 5s + timeout: 5s + retries: 10 + start_period: 10s kafka: - image: confluentinc/cp-kafka:7.5.0 + image: confluentinc/cp-kafka:7.8.2 container_name: kafka depends_on: zookeeper: @@ -312,13 +322,13 @@ services: hard: 65536 healthcheck: test: ["CMD-SHELL", "kafka-broker-api-versions --bootstrap-server localhost:9092"] - interval: 30s + interval: 5s timeout: 10s - retries: 3 - start_period: 60s + retries: 12 + start_period: 15s schema-registry: - image: confluentinc/cp-schema-registry:7.5.0 + image: confluentinc/cp-schema-registry:7.8.2 container_name: schema-registry depends_on: kafka: @@ -333,10 +343,10 @@ services: - app-network healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8081/config"] - interval: 10s + interval: 5s timeout: 5s - retries: 5 - start_period: 30s + retries: 10 + start_period: 10s kafdrop: image: obsidiandynamics/kafdrop:3.31.0 @@ -413,19 +423,14 @@ services: condition: service_completed_successfully mongo: condition: service_started - jaeger: - condition: service_started env_file: - ./backend/.env environment: - - MONGODB_URL=mongodb://${MONGO_ROOT_USER:-root}:${MONGO_ROOT_PASSWORD:-rootpassword}@mongo:27017/integr8scode?authSource=admin - - KAFKA_BOOTSTRAP_SERVERS=kafka:29092 - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - - ENABLE_TRACING=true - - JAEGER_AGENT_HOST=jaeger - - JAEGER_AGENT_PORT=6831 - TRACING_SERVICE_NAME=execution-coordinator - KAFKA_CONSUMER_GROUP_ID=execution-coordinator + volumes: + - ./backend/app:/app/app:ro + - ./backend/workers:/app/workers:ro networks: - app-network restart: unless-stopped @@ -444,24 +449,16 @@ services: condition: service_completed_successfully mongo: condition: service_started - jaeger: - condition: service_started env_file: - ./backend/.env environment: - - MONGODB_URL=mongodb://${MONGO_ROOT_USER:-root}:${MONGO_ROOT_PASSWORD:-rootpassword}@mongo:27017/integr8scode?authSource=admin - - KAFKA_BOOTSTRAP_SERVERS=kafka:29092 - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - - ENABLE_TRACING=true - - JAEGER_AGENT_HOST=jaeger - - JAEGER_AGENT_PORT=6831 - TRACING_SERVICE_NAME=k8s-worker - - K8S_NAMESPACE=integr8scode - - KUBECONFIG=/app/kubeconfig.yaml - KAFKA_CONSUMER_GROUP_ID=k8s-worker + - KUBECONFIG=/app/kubeconfig.yaml volumes: - ./backend/app:/app/app:ro - ./backend/workers:/app/workers:ro + - ./backend/kubeconfig.yaml:/app/kubeconfig.yaml:ro networks: - app-network extra_hosts: @@ -480,23 +477,16 @@ services: condition: service_completed_successfully kafka-init: condition: service_completed_successfully - jaeger: - condition: service_started env_file: - ./backend/.env environment: - - KAFKA_BOOTSTRAP_SERVERS=kafka:29092 - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - - ENABLE_TRACING=true - - JAEGER_AGENT_HOST=jaeger - - JAEGER_AGENT_PORT=6831 - TRACING_SERVICE_NAME=pod-monitor - - K8S_NAMESPACE=integr8scode - - KUBECONFIG=/app/kubeconfig.yaml - KAFKA_CONSUMER_GROUP_ID=pod-monitor + - KUBECONFIG=/app/kubeconfig.yaml volumes: - ./backend/app:/app/app:ro - ./backend/workers:/app/workers:ro + - ./backend/kubeconfig.yaml:/app/kubeconfig.yaml:ro networks: - app-network extra_hosts: @@ -517,23 +507,16 @@ services: condition: service_completed_successfully mongo: condition: service_started - jaeger: - condition: service_started env_file: - ./backend/.env environment: - - MONGODB_URL=mongodb://${MONGO_ROOT_USER:-root}:${MONGO_ROOT_PASSWORD:-rootpassword}@mongo:27017/integr8scode?authSource=admin - - KAFKA_BOOTSTRAP_SERVERS=kafka:29092 - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - - ENABLE_TRACING=true - - JAEGER_AGENT_HOST=jaeger - - JAEGER_AGENT_PORT=6831 - TRACING_SERVICE_NAME=result-processor - KAFKA_CONSUMER_GROUP_ID=result-processor-group - KUBECONFIG=/app/kubeconfig.yaml volumes: - ./backend/app:/app/app:ro - ./backend/workers:/app/workers:ro + - ./backend/kubeconfig.yaml:/app/kubeconfig.yaml:ro networks: - app-network extra_hosts: @@ -554,18 +537,14 @@ services: condition: service_completed_successfully mongo: condition: service_started - jaeger: - condition: service_started env_file: - ./backend/.env environment: - - MONGODB_URL=mongodb://${MONGO_ROOT_USER:-root}:${MONGO_ROOT_PASSWORD:-rootpassword}@mongo:27017/integr8scode?authSource=admin - - KAFKA_BOOTSTRAP_SERVERS=kafka:29092 - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - - ENABLE_TRACING=true - - JAEGER_AGENT_HOST=jaeger - - JAEGER_AGENT_PORT=6831 - TRACING_SERVICE_NAME=saga-orchestrator + - KAFKA_CONSUMER_GROUP_ID=saga-orchestrator + volumes: + - ./backend/app:/app/app:ro + - ./backend/workers:/app/workers:ro networks: - app-network restart: unless-stopped @@ -574,6 +553,7 @@ services: jaeger: image: jaegertracing/all-in-one:1.52 container_name: jaeger + profiles: ["observability"] ports: - "5775:5775/udp" # Zipkin/thrift compact - "6831:6831/udp" # Thrift compact @@ -608,14 +588,11 @@ services: env_file: - ./backend/.env environment: - - MONGODB_URL=mongodb://${MONGO_ROOT_USER:-root}:${MONGO_ROOT_PASSWORD:-rootpassword}@mongo:27017/integr8scode?authSource=admin - - KAFKA_BOOTSTRAP_SERVERS=kafka:29092 - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - - SERVICE_NAME=event-replay - TRACING_SERVICE_NAME=event-replay - - ENABLE_TRACING=true - - JAEGER_AGENT_HOST=jaeger - - JAEGER_AGENT_PORT=6831 + - KAFKA_CONSUMER_GROUP_ID=event-replay + volumes: + - ./backend/app:/app/app:ro + - ./backend/workers:/app/workers:ro networks: - app-network restart: unless-stopped @@ -639,18 +616,11 @@ services: env_file: - ./backend/.env environment: - - MONGODB_URL=mongodb://${MONGO_ROOT_USER:-root}:${MONGO_ROOT_PASSWORD:-rootpassword}@mongo:27017/integr8scode?authSource=admin - - KAFKA_BOOTSTRAP_SERVERS=kafka:29092 - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - - SERVICE_NAME=dlq-processor - TRACING_SERVICE_NAME=dlq-processor - - ENABLE_TRACING=true - - JAEGER_AGENT_HOST=jaeger - - JAEGER_AGENT_PORT=6831 - - DLQ_MAX_RETRY_ATTEMPTS=5 - - DLQ_RETRY_DELAY_HOURS=1 - - DLQ_MAX_AGE_DAYS=7 - - DLQ_BATCH_SIZE=100 + - KAFKA_CONSUMER_GROUP_ID=dlq-processor + volumes: + - ./backend/app:/app/app:ro + - ./backend/workers:/app/workers:ro networks: - app-network restart: unless-stopped @@ -660,6 +630,7 @@ services: victoria-metrics: image: victoriametrics/victoria-metrics:v1.96.0 container_name: victoria-metrics + profiles: ["observability"] ports: - "8428:8428" volumes: @@ -671,16 +642,17 @@ services: networks: - app-network healthcheck: - test: ["CMD", "wget", "--spider", "-q", "http://localhost:8428/health"] - interval: 30s - timeout: 10s - retries: 3 + test: ["CMD", "wget", "-qO-", "http://127.0.0.1:8428/-/healthy"] + interval: 5s + timeout: 5s + retries: 12 start_period: 10s # Kafka Exporter for metrics kafka-exporter: image: danielqsj/kafka-exporter:latest container_name: kafka-exporter + profiles: ["observability"] command: - "--kafka.server=kafka:29092" - "--web.listen-address=:9308" @@ -699,6 +671,7 @@ services: otel-collector: image: otel/opentelemetry-collector-contrib:0.91.0 container_name: otel-collector + profiles: ["observability"] command: ["--config=/etc/otel-collector-config.yaml"] volumes: - ./backend/otel-collector-config.yaml:/etc/otel-collector-config.yaml:ro @@ -712,12 +685,8 @@ services: depends_on: - victoria-metrics - jaeger - healthcheck: - test: ["CMD", "wget", "--spider", "-q", "http://localhost:13133/"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 10s + # Note: otel-collector uses scratch image - no wget/curl available for healthcheck + # The service exposes health endpoint at :13133 but we can't check it from inside container volumes: mongo_data: diff --git a/docs/architecture/event-bus.md b/docs/architecture/event-bus.md index 3041b22f..90654cfe 100644 --- a/docs/architecture/event-bus.md +++ b/docs/architecture/event-bus.md @@ -146,7 +146,7 @@ The `EventBusManager` provides singleton access to the EventBus with proper life async def get_event_bus(self) -> EventBus: async with self._lock: if self._event_bus is None: - self._event_bus = EventBus(self.settings, self.logger) + self._event_bus = EventBus(self.settings, self.logger, self._connection_metrics) await self._event_bus.__aenter__() return self._event_bus ``` diff --git a/docs/architecture/rate-limiting.md b/docs/architecture/rate-limiting.md index dfd8d1a1..706f4e99 100644 --- a/docs/architecture/rate-limiting.md +++ b/docs/architecture/rate-limiting.md @@ -141,7 +141,6 @@ Rate limiting is controlled by environment variables: | Variable | Default | Description | |---------------------------|------------------|------------------------------------------------------| -| `RATE_LIMIT_ENABLED` | `true` | Enable/disable rate limiting globally | | `RATE_LIMIT_REDIS_PREFIX` | `rate_limit:` | Redis key prefix for isolation | | `RATE_LIMIT_ALGORITHM` | `sliding_window` | Algorithm to use (`sliding_window` or `token_bucket`)| | `RATE_LIMIT_DEFAULT_REQUESTS` | `100` | Default request limit | diff --git a/docs/operations/cicd.md b/docs/operations/cicd.md index 07440893..51766210 100644 --- a/docs/operations/cicd.md +++ b/docs/operations/cicd.md @@ -1,7 +1,7 @@ # CI/CD Pipeline The project uses GitHub Actions to automate code quality checks, security scanning, testing, and documentation -deployment. Every push to `main` or `dev` and every pull request triggers the pipeline, with each workflow running in +deployment. Every push to `main` or `dev` and every pull request triggers the pipeline, with workflows running in parallel to provide fast feedback. ## Pipeline overview @@ -11,6 +11,7 @@ graph LR subgraph "Code Quality" Ruff["Ruff Linting"] MyPy["MyPy Type Check"] + ESLint["ESLint + TypeScript"] end subgraph "Security" @@ -29,8 +30,12 @@ graph LR Frontend --> ScanFE end - subgraph "Testing" - Integration["Integration Tests"] + subgraph "Testing (stack-tests.yml)" + UnitBE["Backend Unit"] + UnitFE["Frontend Unit"] + Stack["Stack Tests"] + UnitBE --> Stack + UnitFE --> Stack end subgraph "Documentation" @@ -40,36 +45,38 @@ graph LR Push["Push / PR"] --> Ruff Push --> MyPy + Push --> ESLint Push --> Bandit Push --> Base - Push --> Integration + Push --> UnitBE + Push --> UnitFE Push --> Docs Docs -->|main only| Pages ``` All workflows trigger on pushes to `main` and `dev` branches, pull requests against those branches, and can be triggered -manually via `workflow_dispatch`. The documentation workflow additionally filters on path changes to avoid unnecessary -rebuilds. +manually via `workflow_dispatch`. Path filters ensure workflows only run when relevant files change. ## Linting and type checking -Two lightweight workflows run first since they catch obvious issues quickly. +Three lightweight workflows run first since they catch obvious issues quickly. -The linting workflow installs dependencies with [uv](https://docs.astral.sh/uv/) and -runs [Ruff](https://docs.astral.sh/ruff/) against the backend codebase. Ruff checks for style violations, import -ordering, and common bugs in a single pass. The configuration lives in `pyproject.toml` under `[tool.ruff]`, selecting -rules from the E, F, B, I, and W categories. +**Backend (Python):** +- [Ruff](https://docs.astral.sh/ruff/) checks for style violations, import ordering, and common bugs +- [mypy](https://mypy.readthedocs.io/) with strict settings catches type mismatches and missing return types -The type checking workflow runs [mypy](https://mypy.readthedocs.io/) with strict settings. It catches type mismatches, -missing return types, and incorrect function signatures before they reach production. Both workflows use uv's dependency -caching to skip reinstallation when the lockfile hasn't changed. +**Frontend (TypeScript):** +- ESLint checks for code quality issues +- TypeScript compiler (`tsc --noEmit`) verifies type correctness + +Both use dependency caching to skip reinstallation when lockfiles haven't changed. ## Security scanning The security workflow uses [Bandit](https://bandit.readthedocs.io/) to perform static analysis on Python source files, flagging issues like hardcoded credentials, SQL injection patterns, and unsafe deserialization. It excludes the test directory and reports only medium-severity and above findings. Container-level vulnerability scanning with Trivy runs -as part of the Docker workflow (see below). +as part of the Docker workflow. ## Docker build and scan @@ -106,93 +113,90 @@ graph TD The base image (`Dockerfile.base`) contains Python, system dependencies, and all pip packages. It uses [uv](https://docs.astral.sh/uv/) to install dependencies from the lockfile with `uv sync --locked --no-dev`, -ensuring reproducible builds without development tools. The base includes gcc, curl, and compression libraries needed -by some Python packages. - -The image sets `PATH="/app/.venv/bin:$PATH"` so services can run Python directly without `uv run` at startup. This -avoids dependency resolution at container start, making services launch in seconds rather than minutes. Separating base -from application means dependency changes rebuild the base layer while code changes only rebuild the thin application -layer. See [Docker build strategy](deployment.md#docker-build-strategy) for details on the local development setup. - -### Build contexts - -Backend and frontend builds reference the base image via Docker's `build-contexts` feature. The workflow passes the -appropriate tag (`pr-` for pull requests, `latest` for main branch) so each build uses the correct base. +ensuring reproducible builds without development tools. ### Security scanning After each image builds, [Trivy](https://trivy.dev/) scans it for known vulnerabilities in OS packages and Python -dependencies. The scan fails if it finds any critical or high severity issues with available fixes. Results upload to -GitHub Security for tracking. The backend scan respects `.trivyignore` for acknowledged vulnerabilities. +dependencies. The scan fails if it finds any critical or high severity issues with available fixes. -## Integration tests +## Stack tests (unified testing) -The integration test workflow is the most complex. It spins up the entire stack on a GitHub Actions runner to verify -that services work together correctly. +The `stack-tests.yml` workflow consolidates all testing that requires infrastructure into a single job, avoiding +redundant stack setup across multiple jobs. ```mermaid -sequenceDiagram - participant GHA as GitHub Actions - participant K3s as K3s Cluster - participant Docker as Docker Compose - participant Tests as pytest - - GHA->>K3s: Install k3s - GHA->>Docker: Pre-pull base images - GHA->>Docker: Build services (bake) - GHA->>Docker: Start compose stack - Docker->>Docker: Wait for health checks - GHA->>Tests: Run pytest - Tests->>Docker: HTTP requests - Tests-->>GHA: Coverage report - GHA->>GHA: Upload to Codecov +graph TD + subgraph "Parallel (fast)" + A[Backend Unit Tests] + B[Frontend Unit Tests] + end + + subgraph "Sequential (single stack)" + C[Setup k3s] + D[Build Images] + E[Start Stack] + F[Backend Integration] + G[Backend E2E] + H[Frontend E2E] + C --> D --> E --> F --> G --> H + end + + A --> C + B --> C + + style A fill:#e8f5e9 + style B fill:#e8f5e9 + style C fill:#e1f5fe + style D fill:#e1f5fe + style E fill:#e1f5fe + style F fill:#fff3e0 + style G fill:#fff3e0 + style H fill:#fff3e0 ``` -The workflow starts by installing [k3s](https://k3s.io/), a lightweight Kubernetes distribution, so the backend can -interact with a real cluster during tests. It pre-pulls container images in parallel to avoid cold-start delays during -the build step. +### Why unified? -Before building, the workflow modifies `docker-compose.yaml` using [yq](https://github.com/mikefarah/yq) to create a -CI-specific configuration. These modifications disable SASL authentication on Kafka and Zookeeper (unnecessary for -isolated CI), remove volume mounts that cause permission conflicts, inject test credentials for MongoDB, and disable -OpenTelemetry export to avoid connection errors. The result is a `docker-compose.ci.yaml` that works reliably in the -ephemeral CI environment. +Previously, backend integration, backend E2E, and frontend E2E tests each started their own full stack independently. +This caused: +- **3x setup time**: k3s installation, image builds, and docker-compose startup repeated for each job +- **~15 minutes total**: Each job took ~5 minutes, running in parallel but with redundant work -The [docker/bake-action](https://github.com/docker/bake-action) builds all services with GitHub Actions cache support. -It reads cache layers from previous runs and writes new layers back, so unchanged dependencies don't rebuild. The cache -scopes are branch-specific with a fallback to main, meaning feature branches benefit from the main branch cache even on -their first run. +The unified approach: +- **1x setup time**: Stack starts once, all tests run sequentially against it +- **~10 minutes total**: Single setup (~5 min) + all tests (~5 min) +- **Better resource efficiency**: One runner instead of three -Once images are built, `docker compose up -d` starts the stack. The workflow then uses curl's built-in retry mechanism -to wait for the backend health endpoint: +### Test execution order -```bash -curl --retry 60 --retry-delay 5 --retry-all-errors -ksf https://127.0.0.1:443/api/v1/health/live -``` +1. **Unit tests (parallel)**: Backend and frontend unit tests run simultaneously. They require no infrastructure and + complete quickly (~1-2 min each). -This approach is cleaner than shell loops and more reliable than Docker Compose's `--wait` flag (which has issues with -init containers that exit after completion). The backend's `depends_on` configuration ensures MongoDB, Redis, Kafka, -and Schema Registry are healthy before backend starts, so waiting for backend health implicitly waits for all -dependencies. Once the health check passes, the workflow runs pytest against the integration and unit test suites with -coverage reporting. Test isolation uses -per-worker database names and schema registry prefixes to avoid conflicts when pytest-xdist runs tests in parallel. +2. **Stack setup**: After unit tests pass, the stack-tests job: + - Installs k3s for Kubernetes functionality + - Builds all Docker images (with GHA layer caching) + - Starts the full stack via `./deploy.sh dev --ci` + - Seeds test users -Coverage reports go to [Codecov](https://codecov.io/) for tracking over time. The workflow always collects container -logs and Kubernetes events as artifacts, which helps debug failures without reproducing them locally. +3. **Integration & E2E tests (sequential)**: All tests run against the same stack: + - Backend integration tests (pytest) + - Backend E2E tests (pytest with k8s) + - Frontend E2E tests (Playwright) + +### Coverage reporting + +Each test suite reports coverage to [Codecov](https://codecov.io/): +- `backend-unit` flag for unit tests +- `backend-stack` flag for integration + E2E tests (combined) +- `frontend-unit` flag for frontend unit tests ## Documentation The docs workflow builds this documentation site using [MkDocs](https://www.mkdocs.org/) with the [Material theme](https://squidfunk.github.io/mkdocs-material/). It triggers only when files under `docs/`, -`mkdocs.yml`, or the workflow itself change, avoiding rebuilds for unrelated commits. - -Before building, the workflow fetches the current OpenAPI spec from the production API and injects it into the docs -directory. The [swagger-ui-tag](https://github.com/blueswen/mkdocs-swagger-ui-tag) plugin renders this spec as an -interactive API reference. +`mkdocs.yml`, or the workflow itself change. -On pushes to main, the workflow deploys the built site to GitHub Pages. Pull requests only build without deploying, so -you can verify the build succeeds before merging. The deployment uses GitHub's native Pages action with artifact -uploads, which handles cache invalidation and atomic deployments automatically. +On pushes to main, the workflow deploys the built site to GitHub Pages. ## Running locally @@ -210,133 +214,88 @@ uv run mypy . # Security scan uv tool run bandit -r . -x tests/ -ll -# Unit tests only (fast) +# Unit tests only (fast, no infrastructure needed) uv run pytest tests/unit -v - -# Full integration tests (requires docker compose up) -uv run pytest tests/integration tests/unit -v ``` -For the full integration test experience, start the stack with `docker compose up -d`, wait for the backend to be -healthy, then run pytest. Alternatively, use `./deploy.sh test` which handles startup, health checks, testing, and -cleanup automatically. The CI workflow's yq modifications aren't necessary locally since your environment -likely has the expected configuration already. - -## Build optimizations - -The CI pipeline employs several caching strategies to minimize build times. Without caching, a full frontend E2E build -takes 3+ minutes; with caching, subsequent runs complete in under 30 seconds. +```bash +cd frontend -### Docker layer caching +# Linting +npm run lint -The frontend E2E workflow uses [docker/build-push-action](https://github.com/docker/build-push-action) with GitHub -Actions cache for each image: +# Type checking +npx tsc --noEmit -```yaml -- name: Build frontend image - uses: docker/build-push-action@v6 - with: - context: ./frontend - file: ./frontend/Dockerfile - load: true - tags: integr8scode-frontend:latest - cache-from: type=gha,scope=frontend - cache-to: type=gha,mode=max,scope=frontend +# Unit tests +npm run test ``` -Each service has its own cache scope (`backend-base`, `backend`, `frontend`, `cert-generator`), preventing cache -pollution between unrelated builds. The `mode=max` setting caches all layers, not just the final image, so even -intermediate layers benefit from caching. +For integration and E2E tests, use the same deployment as CI: -### Local registry for dependent builds +```bash +# Start full stack (requires k8s configured locally) +./deploy.sh dev -The `docker-container` buildx driver runs in isolation and cannot access images in the local Docker daemon. This -creates a problem when the backend image needs to reference the base image via `FROM base`. The workflow solves this -using a local registry: +# Run tests inside the running backend container +docker compose exec -T backend uv run pytest tests/integration -v +docker compose exec -T backend uv run pytest tests/e2e -v -```yaml -services: - registry: - image: registry:2 - ports: - - 5000:5000 - -steps: - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - driver-opts: network=host - - - name: Build and push base image - uses: docker/build-push-action@v6 - with: - push: true - tags: localhost:5000/integr8scode-base:latest - cache-from: type=gha,scope=backend-base - cache-to: type=gha,mode=max,scope=backend-base - - - name: Build backend image - uses: docker/build-push-action@v6 - with: - build-contexts: | - base=docker-image://localhost:5000/integr8scode-base:latest - cache-from: type=gha,scope=backend - cache-to: type=gha,mode=max,scope=backend +# Run frontend E2E tests +cd frontend && npx playwright test ``` -The `network=host` driver option allows buildx to reach `localhost:5000`. After pushing the base image to the local -registry, subsequent builds can reference it with `docker-image://localhost:5000/...`. This preserves full GHA layer -caching for all images while allowing dependent builds to work correctly. +Or use `./deploy.sh test` which handles everything automatically. -### Infrastructure image caching +## Build optimizations -A reusable action at `.github/actions/docker-cache` handles infrastructure images (MongoDB, Redis, Kafka, Schema -Registry). It stores pulled images as zstd-compressed tarballs in the GitHub Actions cache: +The CI pipeline employs several caching strategies to minimize build times. + +### Docker layer caching + +All image builds use [docker/build-push-action](https://github.com/docker/build-push-action) with GitHub Actions cache: ```yaml -- name: Cache and load Docker images - uses: ./.github/actions/docker-cache +- name: Build base image + uses: docker/build-push-action@v6 with: - images: mongo:8.0 redis:7-alpine apache/kafka:3.9.0 confluentinc/cp-schema-registry:7.5.0 + context: ./backend + file: ./backend/Dockerfile.base + load: true + tags: integr8scode-base:latest + cache-from: type=gha,scope=backend-base + cache-to: type=gha,mode=max,scope=backend-base ``` -On cache hit, images load from local tarballs instead of pulling from registries. This saves ~30 seconds per run and -avoids Docker Hub rate limits. - -### Frontend Dockerfile optimizations - -The frontend Dockerfile uses several techniques to minimize build time and image size: - -| Optimization | Before | After | Impact | -|-----------------|-----------------------------|------------------------|-------------------------| -| Base image | `node:22` (1GB) | `node:22-slim` (200MB) | -80% image size | -| Package install | `npm install` | `npm ci` | 3x faster, reproducible | -| Lockfile | Excluded in `.dockerignore` | Included | Enables `npm ci` | +Each service has its own cache scope (`backend-base`, `backend`, `frontend`, `cert-generator`), preventing cache +pollution between unrelated builds. -The `npm ci` command requires `package-lock.json` and installs dependencies exactly as specified, skipping dependency -resolution. This is faster than `npm install` and ensures reproducible builds. +### Infrastructure image caching -### Cache invalidation +A reusable action at `.github/actions/docker-cache` handles infrastructure images (MongoDB, Redis, Kafka, Schema +Registry). It stores pulled images as zstd-compressed tarballs in the GitHub Actions cache, saving ~30 seconds per run +and avoiding Docker Hub rate limits. -Docker layer caching works best when layers change infrequently. The Dockerfiles are structured to maximize cache hits: +### k3s setup action -1. **System dependencies** - Rarely change, cached long-term -2. **Package lockfiles** - Change only when dependencies update -3. **Application code** - Changes frequently, rebuilt on each commit +A reusable composite action at `.github/actions/k3s-setup` handles Kubernetes setup: +- Installs k3s with traefik disabled +- Creates the `integr8scode` namespace +- Generates a kubeconfig accessible from Docker containers (via `host.docker.internal`) -By copying lockfiles before application code, dependency installation layers remain cached even when code changes. +This eliminates copy-paste across workflows and ensures consistent k8s setup. ## Workflow files -| Workflow | File | Purpose | -|---------------------|-------------------------------------|-------------------------------| -| Ruff Linting | `.github/workflows/ruff.yml` | Code style and import checks | -| MyPy Type Checking | `.github/workflows/mypy.yml` | Static type analysis | -| Security Scanning | `.github/workflows/security.yml` | Bandit SAST | -| Docker Build & Scan | `.github/workflows/docker.yml` | Image build and Trivy scan | -| Backend CI | `.github/workflows/backend-ci.yml` | Unit, integration, E2E tests | -| Frontend CI | `.github/workflows/frontend-ci.yml` | Unit tests and Playwright E2E | -| Documentation | `.github/workflows/docs.yml` | MkDocs build and deploy | - -All workflows use [uv](https://docs.astral.sh/uv/) for Python dependency management, with caching enabled via -`astral-sh/setup-uv`. The lockfile at `backend/uv.lock` ensures reproducible installs across CI runs. +| Workflow | File | Purpose | +|--------------------|--------------------------------------|------------------------------------| +| Ruff Linting | `.github/workflows/ruff.yml` | Python code style and import checks | +| MyPy Type Checking | `.github/workflows/mypy.yml` | Python static type analysis | +| Frontend CI | `.github/workflows/frontend-ci.yml` | TypeScript lint and type check | +| Security Scanning | `.github/workflows/security.yml` | Bandit SAST | +| Docker Build & Scan| `.github/workflows/docker.yml` | Image build and Trivy scan | +| Stack Tests | `.github/workflows/stack-tests.yml` | All unit, integration, and E2E tests | +| Documentation | `.github/workflows/docs.yml` | MkDocs build and deploy | + +All workflows use [uv](https://docs.astral.sh/uv/) for Python dependency management and npm for Node.js, with caching +enabled for both. diff --git a/docs/operations/deployment.md b/docs/operations/deployment.md index e326e618..12dd323c 100644 --- a/docs/operations/deployment.md +++ b/docs/operations/deployment.md @@ -336,6 +336,91 @@ Topics should be prefixed (e.g., `prefexecution_events` not `execution_events`). kubectl get secret -n integr8scode integr8scode-mongodb -o jsonpath='{.data.mongodb-root-password}' | base64 -d ``` +### k3s crash loop after VPN or IP change + +**Symptoms:** + +- `systemctl status k3s` shows `Active: activating (auto-restart) (Result: exit-code)` +- k3s repeatedly crashes with `status=1/FAILURE` +- `kubectl` commands fail with `connection refused` or `ServiceUnavailable` +- API intermittently responds then stops + +**Root cause:** + +When the host IP changes (VPN on/off, network switch, DHCP renewal), k3s stores stale IP references in two locations: + +1. **SQLite database** (`/var/lib/rancher/k3s/server/db/`) — contains cluster state with old IP +2. **TLS certificates** (`/var/lib/rancher/k3s/server/tls/`) — generated with old IP in SAN field + +k3s detects the mismatch between config (`node-ip` in `/etc/rancher/k3s/config.yaml`) and stored data, causing the crash loop. + +**Solution:** + +> **WARNING: DATA LOSS** — The steps below will permanently delete all cluster state, including: +> - All deployed workloads (pods, deployments, services) +> - All cluster configuration (namespaces, RBAC, ConfigMaps, Secrets) +> - All PersistentVolume data stored in the default local-path provisioner +> +> **Before proceeding, back up:** +> - etcd snapshots: `sudo k3s etcd-snapshot save` +> - kubeconfig files +> - Application manifests +> - Any critical PersistentVolume data +> +> Confirm backups are complete before continuing. + +```bash +# 1. Stop k3s +sudo systemctl stop k3s + +# 2. Delete corrupted database (k3s will rebuild it) +sudo rm -rf /var/lib/rancher/k3s/server/db/ + +# 3. Delete old TLS certificates (k3s will regenerate them) +sudo rm -rf /var/lib/rancher/k3s/server/tls/ + +# 4. Start k3s with clean state +sudo systemctl start k3s +``` + +After k3s restarts, regenerate the application kubeconfig: + +```bash +# Regenerate kubeconfig with fresh ServiceAccount token +docker compose restart cert-generator + +# Restart workers to pick up new kubeconfig +docker compose restart k8s-worker pod-monitor +``` + +**Verification:** + +```bash +# Check k3s is running +systemctl status k3s # Should show "active (running)" + +# Test API access +KUBECONFIG=/path/to/backend/kubeconfig.yaml kubectl get namespaces + +# Check workers connected +docker logs k8s-worker 2>&1 | tail -5 +docker logs pod-monitor 2>&1 | tail -5 +``` + +**VPN-specific notes:** + +When using VPN (e.g., NordVPN with WireGuard/NordLynx): + +- **LAN Discovery must be enabled**: `nordvpn set lan-discovery enabled` +- VPN can interfere with Docker's `host` network mode and k3s flannel networking +- Consider using bridge networking for containers that need to reach k3s + +**References:** + +- [k3s IP change issue #277](https://github.com/k3s-io/k3s/issues/277) +- [k3s crash loop troubleshooting](https://dev.to/shankar_t/my-k3s-pi-cluster-died-after-a-reboot-a-troubleshooting-war-story-m93) +- [k3s certificate documentation](https://docs.k3s.io/cli/certificate) + ## Pre-built images For production deployments, you can skip the local build step entirely by using pre-built images from GitHub Container diff --git a/docs/reference/environment-variables.md b/docs/reference/environment-variables.md index 75b32d9d..0c92f9e2 100644 --- a/docs/reference/environment-variables.md +++ b/docs/reference/environment-variables.md @@ -70,14 +70,13 @@ Complete reference of all environment variables used by the Integr8sCode backend | `REDIS_DB` | `0` | Redis database number | | `REDIS_PASSWORD` | *none* | Redis password | | `REDIS_SSL` | `false` | Enable SSL/TLS | -| `REDIS_MAX_CONNECTIONS` | `50` | Connection pool size | +| `REDIS_MAX_CONNECTIONS` | `200` | Connection pool size | | `REDIS_DECODE_RESPONSES` | `true` | Decode responses to strings | ## Rate Limiting Configuration | Variable | Default | Description | |-------------------------------|------------------|------------------------------------------------| -| `RATE_LIMIT_ENABLED` | `true` | Enable rate limiting | | `RATE_LIMITS` | `100/minute` | Default rate limit string | | `RATE_LIMIT_DEFAULT_REQUESTS` | `100` | Default request limit | | `RATE_LIMIT_DEFAULT_WINDOW` | `60` | Default window (seconds) | diff --git a/docs/testing/frontend-testing.md b/docs/testing/frontend-testing.md index 43aefcd4..4de97d44 100644 --- a/docs/testing/frontend-testing.md +++ b/docs/testing/frontend-testing.md @@ -89,6 +89,72 @@ Component tests render Svelte components in jsdom and verify their DOM output, p E2E tests run in Playwright against the real application. They exercise full user flows like registration, login, theme switching, and protected route access. +## Playwright authentication + +E2E tests use worker-scoped fixtures to authenticate once per worker and reuse the browser context across all tests. This avoids hammering the backend with 100+ login requests. + +### How it works + +1. **Worker-scoped fixtures** (`userContext`, `adminContext`) authenticate once when a worker starts +2. The authenticated browser context is kept alive for the entire worker lifetime +3. **Test-scoped fixtures** (`userPage`, `adminPage`) create new pages within the authenticated context + +```text +e2e/ +├── fixtures.ts # Worker-scoped auth fixtures +├── auth.spec.ts # Tests login flow itself (uses raw page) +├── editor.spec.ts # User tests (use userPage fixture) +├── settings.spec.ts # User tests (use userPage fixture) +├── home.spec.ts # Public tests (use raw page) +└── admin-*.spec.ts # Admin tests (use adminPage fixture) +``` + +### Fixture types + +Tests use different fixtures based on auth requirements: + +| Fixture | Scope | Auth State | +|---------|-------|------------| +| `userPage` | Test | Pre-authenticated as regular user | +| `adminPage` | Test | Pre-authenticated as admin | +| `page` | Test | No auth (for public pages, login flow tests) | + +### Writing tests + +Tests request the appropriate fixture—the browser is already authenticated: + +```typescript +// User tests: use userPage fixture +test('displays editor page', async ({ userPage }) => { + await userPage.goto('/editor'); + await expect(userPage.getByRole('heading', { name: 'Code Editor' })).toBeVisible(); +}); + +// Admin tests: use adminPage fixture +test('shows admin dashboard', async ({ adminPage }) => { + await adminPage.goto('/admin/users'); + await expect(adminPage.getByRole('heading', { name: 'User Management' })).toBeVisible(); +}); + +// Public page tests: use raw page +test('shows home page', async ({ page }) => { + await page.goto('/'); + await expect(page.getByText('Welcome')).toBeVisible(); +}); +``` + +### Testing unauthenticated flows + +For tests that need to verify login/logout behavior, use the raw `page` fixture with `clearSession()`: + +```typescript +test('redirects unauthenticated users to login', async ({ page }) => { + await clearSession(page); // Clears cookies and storage + await page.goto('/editor'); + await expect(page).toHaveURL(/\/login/); +}); +``` + ## Configuration Vitest configuration lives in [`vitest.config.ts`](https://github.com/HardMax71/Integr8sCode/blob/main/frontend/vitest.config.ts): diff --git a/frontend/e2e/.auth/.gitignore b/frontend/e2e/.auth/.gitignore new file mode 100644 index 00000000..a6c57f5f --- /dev/null +++ b/frontend/e2e/.auth/.gitignore @@ -0,0 +1 @@ +*.json diff --git a/frontend/e2e/admin-events.spec.ts b/frontend/e2e/admin-events.spec.ts new file mode 100644 index 00000000..71c12abe --- /dev/null +++ b/frontend/e2e/admin-events.spec.ts @@ -0,0 +1,58 @@ +import { test, expect, describeAdminCommonTests, describeAdminAccessControl, expectTableOrEmptyState, expectTableColumn } from './fixtures'; + +const PATH = '/admin/events' as const; +const EMPTY_PATTERN = /No events found/i; + +test.describe('Admin Events', () => { + describeAdminCommonTests(test, PATH); + + test('shows action buttons', async ({ adminPage }) => { + await adminPage.goto(PATH); + await expect(adminPage.getByRole('button', { name: /Filters/i })).toBeVisible(); + await expect(adminPage.getByRole('button', { name: /Export/i })).toBeVisible(); + await expect(adminPage.getByRole('button', { name: /Refresh/i })).toBeVisible(); + }); + + test.describe('Filtering', () => { + test('filter panel shows date range inputs', async ({ adminPage }) => { + await adminPage.goto(PATH); + await adminPage.getByRole('button', { name: /Filters/i }).click(); + await expect(adminPage.locator('input[type="datetime-local"], input[type="date"]').first()).toBeVisible(); + }); + }); + + test.describe('Export', () => { + test('can open export dropdown', async ({ adminPage }) => { + await adminPage.goto(PATH); + await adminPage.getByRole('button', { name: /Export/i }).click(); + await expect(adminPage.getByText('CSV')).toBeVisible(); + await expect(adminPage.getByText('JSON')).toBeVisible(); + }); + }); + + test.describe('Table', () => { + test('shows events table or empty state', async ({ adminPage }) => { + await adminPage.goto(PATH); + await expectTableOrEmptyState(adminPage, EMPTY_PATTERN); + }); + + test('shows Time column when data exists', async ({ adminPage }) => { + await adminPage.goto(PATH); + await expectTableColumn(adminPage, 'Time', EMPTY_PATTERN); + }); + }); + + test.describe('Refresh', () => { + test('can manually refresh events', async ({ adminPage }) => { + await adminPage.goto(PATH); + const [response] = await Promise.all([ + adminPage.waitForResponse(resp => resp.url().includes('/events') && resp.status() === 200), + adminPage.getByRole('button', { name: /Refresh/i }).click(), + ]); + expect(response.ok()).toBe(true); + await expect(adminPage.getByRole('heading', { name: 'Event Browser' })).toBeVisible(); + }); + }); +}); + +describeAdminAccessControl(test, PATH); diff --git a/frontend/e2e/admin-sagas.spec.ts b/frontend/e2e/admin-sagas.spec.ts new file mode 100644 index 00000000..01599991 --- /dev/null +++ b/frontend/e2e/admin-sagas.spec.ts @@ -0,0 +1,64 @@ +import { test, expect, describeAdminCommonTests, describeAdminAccessControl, expectTableOrEmptyState, expectTableColumn } from './fixtures'; + +const PATH = '/admin/sagas' as const; +const EMPTY_PATTERN = /No sagas found/i; + +test.describe('Admin Sagas', () => { + describeAdminCommonTests(test, PATH); + + test('shows auto-refresh control', async ({ adminPage }) => { + await adminPage.goto(PATH); + await expect(adminPage.getByText(/Auto-refresh/i)).toBeVisible(); + }); + + test.describe('Filtering', () => { + test('shows search input', async ({ adminPage }) => { + await adminPage.goto(PATH); + await expect(adminPage.locator('input[placeholder*="Search"], input[type="search"]').first()).toBeVisible(); + }); + + test('shows state filter dropdown', async ({ adminPage }) => { + await adminPage.goto(PATH); + await expect(adminPage.locator('select, button').filter({ hasText: /All States|running|completed|failed/i }).first()).toBeVisible(); + }); + + test('can clear filters', async ({ adminPage }) => { + await adminPage.goto(PATH); + const searchInput = adminPage.locator('input[placeholder*="Search"], input[type="search"]').first(); + await searchInput.fill('test-filter'); + await adminPage.getByRole('button', { name: /Clear/i }).click(); + await expect(searchInput).toHaveValue(''); + }); + }); + + test.describe('Table', () => { + test('shows sagas table or empty state', async ({ adminPage }) => { + await adminPage.goto(PATH); + await expectTableOrEmptyState(adminPage, EMPTY_PATTERN); + }); + + test('shows State column when data exists', async ({ adminPage }) => { + await adminPage.goto(PATH); + await expectTableColumn(adminPage, 'State', EMPTY_PATTERN); + }); + }); + + test.describe('Auto-Refresh', () => { + test('can toggle auto-refresh', async ({ adminPage }) => { + await adminPage.goto(PATH); + const toggle = adminPage.locator('input[type="checkbox"]').first(); + const initial = await toggle.isChecked(); + await toggle.click(); + expect(await toggle.isChecked()).toBe(!initial); + }); + + test('can change refresh rate', async ({ adminPage }) => { + await adminPage.goto(PATH); + const rateSelect = adminPage.locator('#refresh-rate'); + await rateSelect.selectOption('10'); + await expect(rateSelect).toHaveValue('10'); + }); + }); +}); + +describeAdminAccessControl(test, PATH); diff --git a/frontend/e2e/admin-settings.spec.ts b/frontend/e2e/admin-settings.spec.ts new file mode 100644 index 00000000..f2322cf0 --- /dev/null +++ b/frontend/e2e/admin-settings.spec.ts @@ -0,0 +1,72 @@ +import { test, expect, describeAdminCommonTests, describeAdminAccessControl, expectToastVisible } from './fixtures'; + +const PATH = '/admin/settings' as const; + +const SETTINGS_SECTIONS = [ + { name: 'Execution Limits', inputs: ['#max-timeout', '#max-memory', '#max-cpu', '#max-concurrent'] }, + { name: 'Security Settings', inputs: ['#min-password', '#session-timeout', '#max-login', '#lockout-duration'] }, + { name: 'Monitoring Settings', inputs: ['#metrics-retention', '#log-level', '#enable-tracing', '#sampling-rate'] }, +] as const; + +test.describe('Admin Settings', () => { + describeAdminCommonTests(test, PATH); + + test('shows configuration card', async ({ adminPage }) => { + await adminPage.goto(PATH); + await expect(adminPage.getByText('Configuration')).toBeVisible(); + }); + + for (const section of SETTINGS_SECTIONS) { + test(`shows ${section.name} section with all inputs`, async ({ adminPage }) => { + await adminPage.goto(PATH); + await expect(adminPage.getByText(section.name)).toBeVisible(); + for (const input of section.inputs) { + await expect(adminPage.locator(input)).toBeVisible(); + } + }); + } + + test('can modify max timeout value', async ({ adminPage }) => { + await adminPage.goto(PATH); + const input = adminPage.locator('#max-timeout'); + const current = await input.inputValue(); + await input.fill('120'); + await expect(input).toHaveValue('120'); + await input.fill(current); + }); + + test('log level select has correct options', async ({ adminPage }) => { + await adminPage.goto(PATH); + // Wait for select to be visible and have options loaded + await expect(adminPage.locator('#log-level')).toBeVisible(); + await expect(adminPage.locator('#log-level option').first()).toBeAttached(); + const options = await adminPage.locator('#log-level option').allTextContents(); + expect(options).toContain('DEBUG'); + expect(options).toContain('INFO'); + expect(options).toContain('WARNING'); + expect(options).toContain('ERROR'); + }); + + test('can change log level', async ({ adminPage }) => { + await adminPage.goto(PATH); + const select = adminPage.locator('#log-level'); + const original = await select.inputValue(); + await select.selectOption('DEBUG'); + await expect(select).toHaveValue('DEBUG'); + await select.selectOption(original); + }); + + test('shows save and reset buttons', async ({ adminPage }) => { + await adminPage.goto(PATH); + await expect(adminPage.getByRole('button', { name: 'Save Settings' })).toBeVisible(); + await expect(adminPage.getByRole('button', { name: 'Reset to Defaults' })).toBeVisible(); + }); + + test('can save settings', async ({ adminPage }) => { + await adminPage.goto(PATH); + await adminPage.getByRole('button', { name: 'Save Settings' }).click(); + await expectToastVisible(adminPage); + }); +}); + +describeAdminAccessControl(test, PATH); diff --git a/frontend/e2e/admin-users.spec.ts b/frontend/e2e/admin-users.spec.ts new file mode 100644 index 00000000..92d8c5ba --- /dev/null +++ b/frontend/e2e/admin-users.spec.ts @@ -0,0 +1,102 @@ +import { test, expect, describeAdminCommonTests, describeAdminAccessControl } from './fixtures'; + +const PATH = '/admin/users' as const; + +// Helper to navigate and wait for users data to load +async function gotoAndWaitForUsers(adminPage: import('@playwright/test').Page) { + await adminPage.goto(PATH); + // Wait for table rows to appear (seeded users exist), not "Users (0)" empty state + await adminPage.locator('table tbody tr').first().waitFor({ timeout: 15000 }); +} + +test.describe('Admin Users', () => { + // Increase timeout for tests that wait for API data to load + test.describe.configure({ timeout: 20000 }); + + describeAdminCommonTests(test, PATH); + + test('shows create user and refresh buttons', async ({ adminPage }) => { + await adminPage.goto(PATH); + await expect(adminPage.getByRole('button', { name: /Create User/i }).first()).toBeVisible(); + await expect(adminPage.getByRole('button', { name: /Refresh/i })).toBeVisible(); + }); + + test('shows users table with correct columns', async ({ adminPage }) => { + await gotoAndWaitForUsers(adminPage); + await expect(adminPage.locator('table').first()).toBeVisible(); + await expect(adminPage.getByRole('columnheader', { name: 'Username' })).toBeVisible(); + await expect(adminPage.getByRole('columnheader', { name: 'Email' })).toBeVisible(); + await expect(adminPage.getByRole('columnheader', { name: 'Role' })).toBeVisible(); + await expect(adminPage.getByRole('columnheader', { name: 'Status' })).toBeVisible(); + }); + + test('displays seeded users in table', async ({ adminPage }) => { + await gotoAndWaitForUsers(adminPage); + await expect(adminPage.locator('text=user').first()).toBeVisible(); + }); + + test('can search for users', async ({ adminPage }) => { + await gotoAndWaitForUsers(adminPage); + const searchInput = adminPage.locator('input[placeholder*="Search"]').first(); + await searchInput.fill('admin'); + await expect(adminPage.locator('td, [class*="card"]').filter({ hasText: 'admin' }).first()).toBeVisible(); + }); + + test.describe('Create Modal', () => { + test('can open create user modal', async ({ adminPage }) => { + await adminPage.goto(PATH); + await adminPage.getByRole('button', { name: /Create User/i }).first().click(); + await expect(adminPage.getByRole('heading', { name: 'Create New User' })).toBeVisible(); + }); + + test('create modal shows all form fields', async ({ adminPage }) => { + await adminPage.goto(PATH); + await adminPage.getByRole('button', { name: /Create User/i }).first().click(); + await expect(adminPage.locator('#user-form-username')).toBeVisible(); + await expect(adminPage.locator('#user-form-email')).toBeVisible(); + await expect(adminPage.locator('#user-form-password')).toBeVisible(); + await expect(adminPage.locator('#user-form-role')).toBeVisible(); + }); + + test('can close create modal with cancel button', async ({ adminPage }) => { + await adminPage.goto(PATH); + await adminPage.getByRole('button', { name: /Create User/i }).first().click(); + await expect(adminPage.getByRole('heading', { name: 'Create New User' })).toBeVisible(); + await adminPage.getByRole('button', { name: 'Cancel' }).click(); + await expect(adminPage.getByRole('heading', { name: 'Create New User' })).not.toBeVisible(); + }); + + test('can fill and submit create user form', async ({ adminPage }) => { + await adminPage.goto(PATH); + await adminPage.getByRole('button', { name: /Create User/i }).first().click(); + const uniqueUsername = `testuser_${Date.now()}`; + await adminPage.locator('#user-form-username').fill(uniqueUsername); + await adminPage.locator('#user-form-email').fill(`${uniqueUsername}@example.com`); + await adminPage.locator('#user-form-password').fill('TestPassword123!'); + await adminPage.getByLabel('Create New User').getByRole('button', { name: 'Create User' }).click(); + await expect(adminPage.getByRole('heading', { name: 'Create New User' })).not.toBeVisible({ timeout: 10000 }); + }); + }); + + test.describe('Edit', () => { + test('can open edit modal for existing user', async ({ adminPage }) => { + await gotoAndWaitForUsers(adminPage); + const editButton = adminPage.locator('table tbody tr').first().locator('button[title="Edit User"]'); + await expect(editButton).toBeVisible(); + await editButton.click(); + await expect(adminPage.getByRole('heading', { name: 'Edit User' })).toBeVisible(); + }); + + test('edit modal pre-fills user data', async ({ adminPage }) => { + await gotoAndWaitForUsers(adminPage); + const editButton = adminPage.locator('table tbody tr').first().locator('button[title="Edit User"]'); + await expect(editButton).toBeVisible(); + await editButton.click(); + await expect(adminPage.getByRole('heading', { name: 'Edit User' })).toBeVisible(); + const value = await adminPage.locator('#user-form-username').inputValue(); + expect(value.length).toBeGreaterThan(0); + }); + }); +}); + +describeAdminAccessControl(test, PATH); diff --git a/frontend/e2e/auth.spec.ts b/frontend/e2e/auth.spec.ts index 97323320..1f79b311 100644 --- a/frontend/e2e/auth.spec.ts +++ b/frontend/e2e/auth.spec.ts @@ -1,21 +1,25 @@ -import { test, expect } from '@playwright/test'; +import { test, expect, clearSession, loginAsUser, TEST_USERS } from './fixtures'; + +const PATH = '/login'; + +async function navigateToLogin(page: import('@playwright/test').Page): Promise { + await clearSession(page); + await page.goto(PATH); + await page.waitForSelector('#username'); +} + +async function fillLoginForm(page: import('@playwright/test').Page, username: string, password: string): Promise { + await page.fill('#username', username); + await page.fill('#password', password); + await page.click('button[type="submit"]'); +} test.describe('Authentication', () => { test.beforeEach(async ({ page }) => { - // Clear ALL auth state: cookies (HTTP-only auth token) + localStorage (cached state) - await page.context().clearCookies(); - await page.goto('/login'); - await page.evaluate(() => { - localStorage.clear(); - sessionStorage.clear(); - }); + await navigateToLogin(page); }); test('shows login page with form elements', async ({ page }) => { - await page.goto('/login'); - // Wait for the login form to render - await page.waitForSelector('#username'); - await expect(page.getByRole('heading', { name: 'Sign in to your account' })).toBeVisible(); await expect(page.locator('#username')).toBeVisible(); await expect(page.locator('#password')).toBeVisible(); @@ -23,134 +27,70 @@ test.describe('Authentication', () => { }); test('prevents submission and shows validation for empty form', async ({ page }) => { - await page.goto('/login'); - await page.waitForSelector('#username'); - - // Click submit without filling any fields await page.click('button[type="submit"]'); - - // Form should not submit - still on login page await expect(page).toHaveURL(/\/login/); - - // Browser focuses first invalid required field and shows validation const usernameInput = page.locator('#username'); await expect(usernameInput).toBeFocused(); - - // Check HTML5 validity state const isInvalid = await usernameInput.evaluate((el: HTMLInputElement) => !el.validity.valid); expect(isInvalid).toBe(true); - - // Verify validation message exists (browser shows "Please fill out this field" or similar) const validationMessage = await usernameInput.evaluate((el: HTMLInputElement) => el.validationMessage); expect(validationMessage.length).toBeGreaterThan(0); }); test('shows error with invalid credentials', async ({ page }) => { - await page.goto('/login'); - await page.waitForSelector('#username'); - - await page.fill('#username', 'invaliduser'); - await page.fill('#password', 'wrongpassword'); - await page.click('button[type="submit"]'); - + await fillLoginForm(page, 'invaliduser', 'wrongpassword'); await expect(page.locator('p.text-red-600, p.text-red-400')).toBeVisible(); }); test('redirects to editor on successful login', async ({ page }) => { - await page.goto('/login'); - await page.waitForSelector('#username'); - - await page.fill('#username', 'user'); - await page.fill('#password', 'user123'); - await page.click('button[type="submit"]'); - - // Wait for Editor page content (router updates DOM before URL) + await fillLoginForm(page, TEST_USERS.user.username, TEST_USERS.user.password); await expect(page.getByRole('heading', { name: 'Code Editor' })).toBeVisible(); await expect(page).toHaveURL(/\/editor/); }); test('shows loading state during login', async ({ page }) => { - await page.goto('/login'); - await page.waitForSelector('#username'); - - await page.fill('#username', 'user'); - await page.fill('#password', 'user123'); - + await page.fill('#username', TEST_USERS.user.username); + await page.fill('#password', TEST_USERS.user.password); const submitButton = page.locator('button[type="submit"]'); await submitButton.click(); - await expect(submitButton).toContainText(/Logging in|Sign in/); }); test('redirects unauthenticated users from protected routes', async ({ page }) => { await page.goto('/editor'); - // Should redirect to login and show login form await page.waitForSelector('#username'); await expect(page).toHaveURL(/\/login/); }); test('preserves redirect path after login', async ({ page }) => { await page.goto('/settings'); - // Should redirect to login await page.waitForSelector('#username'); await expect(page).toHaveURL(/\/login/); - - // Login - await page.fill('#username', 'user'); - await page.fill('#password', 'user123'); - await page.click('button[type="submit"]'); - - // Wait for Settings page content (redirect target) + await fillLoginForm(page, TEST_USERS.user.username, TEST_USERS.user.password); await expect(page.getByRole('heading', { name: 'Settings', level: 1 })).toBeVisible(); await expect(page).toHaveURL(/\/settings/); }); test('has link to registration page', async ({ page }) => { - await page.goto('/login'); - await page.waitForSelector('#username'); - - // Use specific text to avoid matching the Register button in header const registerLink = page.getByRole('link', { name: 'create a new account' }); await expect(registerLink).toBeVisible(); }); test('can navigate to registration page', async ({ page }) => { - await page.goto('/login'); - await page.waitForSelector('#username'); - - // Click the specific link in the form, not the header button await page.getByRole('link', { name: 'create a new account' }).click(); - await expect(page).toHaveURL(/\/register/); }); }); test.describe('Logout', () => { test.beforeEach(async ({ page }) => { - // Clear all state first - await page.context().clearCookies(); - await page.goto('/login'); - await page.evaluate(() => { - localStorage.clear(); - sessionStorage.clear(); - }); - await page.waitForSelector('#username'); - - // Login - await page.fill('#username', 'user'); - await page.fill('#password', 'user123'); - await page.click('button[type="submit"]'); - // Wait for Editor page content (router updates DOM before URL) - await expect(page.getByRole('heading', { name: 'Code Editor' })).toBeVisible(); + await loginAsUser(page); }); test('can logout from authenticated state', async ({ page }) => { - // Open user dropdown (contains the logout button) const userDropdown = page.locator('.user-dropdown-container button').first(); await expect(userDropdown).toBeVisible(); await userDropdown.click(); - - // Click logout button inside the dropdown const logoutButton = page.locator('button:has-text("Logout")').first(); await expect(logoutButton).toBeVisible(); await logoutButton.click(); diff --git a/frontend/e2e/editor.spec.ts b/frontend/e2e/editor.spec.ts new file mode 100644 index 00000000..cf2ab346 --- /dev/null +++ b/frontend/e2e/editor.spec.ts @@ -0,0 +1,136 @@ +import { test, expect, runExampleAndExecute, expectToastVisible, describeAuthRequired } from './fixtures'; + +const PATH = '/editor'; + +test.describe('Editor Page', () => { + test('displays editor page with all main elements', async ({ userPage }) => { + await userPage.goto(PATH); + await expect(userPage.getByRole('heading', { name: 'Code Editor' })).toBeVisible(); + await expect(userPage.locator('.cm-editor')).toBeVisible(); + await expect(userPage.getByText('Execution Output')).toBeVisible(); + await expect(userPage.getByRole('button', { name: /Run Script/i })).toBeVisible(); + }); + + test('shows language selector with available languages', async ({ userPage }) => { + await userPage.goto(PATH); + const languageButton = userPage.locator('button[aria-haspopup="menu"]').first(); + await expect(languageButton).toBeVisible(); + await languageButton.click(); + await expect(userPage.getByRole('menu', { name: 'Select language and version' })).toBeVisible(); + }); + + test('can select different language', async ({ userPage }) => { + await userPage.goto(PATH); + const languageButton = userPage.locator('button[aria-haspopup="menu"]').first(); + await languageButton.click(); + const pythonButton = userPage.getByRole('menuitem', { name: /python/i }); + await expect(pythonButton).toBeVisible(); + await pythonButton.hover(); + const versionMenu = userPage.getByRole('menu', { name: /python versions/i }); + await expect(versionMenu).toBeVisible(); + const versionOption = versionMenu.getByRole('menuitemradio').first(); + await versionOption.click({ force: true }); + await expect(languageButton).toContainText(/python/i); + }); + + test('shows file actions when panel opened', async ({ userPage }) => { + await userPage.goto(PATH); + // Use the button's accessible name from sr-only text + const optionsToggle = userPage.getByRole('button', { name: 'Toggle Script Options' }); + await expect(optionsToggle).toBeVisible(); + await optionsToggle.click(); + await expect(userPage.getByText('File Actions')).toBeVisible(); + await expect(userPage.getByRole('button', { name: /New/i })).toBeVisible(); + await expect(userPage.getByRole('button', { name: /Upload/i })).toBeVisible(); + await expect(userPage.locator('button[title="Save current script"]')).toBeVisible(); + await expect(userPage.getByRole('button', { name: /Export/i })).toBeVisible(); + }); + + test('can load example script', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: /Example/i }).click(); + const editor = userPage.locator('.cm-content'); + await expect(editor).not.toBeEmpty({ timeout: 3000 }); + const content = await editor.textContent(); + expect(content).toBeTruthy(); + expect(content!.length).toBeGreaterThan(0); + }); + + test('can input script name', async ({ userPage }) => { + await userPage.goto(PATH); + const scriptNameInput = userPage.locator('#scriptNameInput'); + await expect(scriptNameInput).toBeVisible(); + await scriptNameInput.fill(''); + await scriptNameInput.fill('My Test Script'); + await expect(scriptNameInput).toHaveValue('My Test Script'); + }); +}); + +test.describe('Editor Execution', () => { + // K8s pod execution can take 15-20s in CI (pod creation + execution + result processing) + test.describe.configure({ timeout: 30000 }); + + test('can execute simple python script', async ({ userPage }) => { + await userPage.goto(PATH); + await runExampleAndExecute(userPage); + await expect(userPage.locator('text=Status:').first()).toBeVisible(); + }); + + test('shows execution output on successful run', async ({ userPage }) => { + await userPage.goto(PATH); + await runExampleAndExecute(userPage); + await expect(userPage.locator('text=Output:').first()).toBeVisible({ timeout: 5000 }); + await expect(userPage.locator('.output-pre').first()).toBeVisible(); + }); + + test('shows resource usage after execution', async ({ userPage }) => { + await userPage.goto(PATH); + await runExampleAndExecute(userPage); + await expect(userPage.getByText('Resource Usage:')).toBeVisible({ timeout: 5000 }); + await expect(userPage.getByText(/CPU:/)).toBeVisible(); + await expect(userPage.getByText(/Memory:/)).toBeVisible(); + }); + + test('run button is disabled during execution', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: /Example/i }).click(); + await expect(userPage.locator('.cm-content')).not.toBeEmpty({ timeout: 3000 }); + const runButton = userPage.getByRole('button', { name: /Run Script/i }); + await runButton.click(); + const executingButton = userPage.getByRole('button', { name: /Executing/i }); + await expect(executingButton).toBeVisible({ timeout: 5000 }); + await expect(executingButton).toBeDisabled(); + }); +}); + +test.describe('Editor Script Management', () => { + test('can save script when authenticated', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: /Example/i }).click(); + await expect(userPage.locator('.cm-content')).not.toBeEmpty({ timeout: 3000 }); + await userPage.locator('#scriptNameInput').fill(`Test Script ${Date.now()}`); + const optionsToggle = userPage.getByRole('button', { name: 'Toggle Script Options' }); + await optionsToggle.click(); + await userPage.locator('button[title="Save current script"]').click(); + await expectToastVisible(userPage); + }); + + test('can create new script', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: /Example/i }).click(); + await expect(userPage.locator('.cm-content')).not.toBeEmpty({ timeout: 3000 }); + const optionsToggle = userPage.getByRole('button', { name: 'Toggle Script Options' }); + await optionsToggle.click(); + await userPage.getByRole('button', { name: /New/i }).click(); + await expect(userPage.locator('#scriptNameInput')).toHaveValue(''); + }); + + test('shows saved scripts section when authenticated', async ({ userPage }) => { + await userPage.goto(PATH); + const optionsToggle = userPage.getByRole('button', { name: 'Toggle Script Options' }); + await optionsToggle.click(); + await expect(userPage.getByRole('heading', { name: 'Saved Scripts' })).toBeVisible(); + }); +}); + +describeAuthRequired(test, PATH); diff --git a/frontend/e2e/fixtures.ts b/frontend/e2e/fixtures.ts new file mode 100644 index 00000000..2b476a6a --- /dev/null +++ b/frontend/e2e/fixtures.ts @@ -0,0 +1,211 @@ +import { test as base, expect, type Page, type BrowserContext } from '@playwright/test'; +import { ADMIN_ROUTES, type AdminPath } from '../src/lib/admin/constants'; + +export const TEST_USERS = { + user: { username: 'user', password: 'user123' }, + admin: { username: 'admin', password: 'admin123' }, +} as const; + +// Worker-scoped fixtures: authenticate ONCE per worker, reuse context for all tests +type WorkerFixtures = { + userContext: BrowserContext; + adminContext: BrowserContext; +}; + +type TestFixtures = { + userPage: Page; + adminPage: Page; +}; + +export const test = base.extend({ + // Worker-scoped: one login per worker, shared across all tests in that worker + userContext: [async ({ browser }, use) => { + const context = await browser.newContext({ ignoreHTTPSErrors: true }); + const page = await context.newPage(); + await page.goto('/login'); + await page.locator('#username').fill(TEST_USERS.user.username); + await page.locator('#password').fill(TEST_USERS.user.password); + await page.locator('button[type="submit"]').click(); + await expect(page.getByRole('heading', { name: 'Code Editor' })).toBeVisible(); + await page.close(); + await use(context); + await context.close(); + }, { scope: 'worker' }], + + adminContext: [async ({ browser }, use) => { + const context = await browser.newContext({ ignoreHTTPSErrors: true }); + const page = await context.newPage(); + await page.goto('/login'); + await page.locator('#username').fill(TEST_USERS.admin.username); + await page.locator('#password').fill(TEST_USERS.admin.password); + await page.locator('button[type="submit"]').click(); + await expect(page.getByRole('heading', { name: 'Code Editor' })).toBeVisible(); + await page.close(); + await use(context); + await context.close(); + }, { scope: 'worker' }], + + // Test-scoped: new page per test, but reuses authenticated context + userPage: async ({ userContext }, use) => { + const page = await userContext.newPage(); + await use(page); + await page.close(); + }, + + adminPage: async ({ adminContext }, use) => { + const page = await adminContext.newPage(); + await use(page); + await page.close(); + }, +}); + +// Helper functions using the default page (for tests that don't need pre-auth) +export async function loginAsUser(page: Page): Promise { + await page.goto('/login'); + await page.locator('#username').fill(TEST_USERS.user.username); + await page.locator('#password').fill(TEST_USERS.user.password); + await page.locator('button[type="submit"]').click(); + await expect(page.getByRole('heading', { name: 'Code Editor' })).toBeVisible(); +} + +export async function loginAsAdmin(page: Page): Promise { + await page.goto('/login'); + await page.locator('#username').fill(TEST_USERS.admin.username); + await page.locator('#password').fill(TEST_USERS.admin.password); + await page.locator('button[type="submit"]').click(); + await expect(page.getByRole('heading', { name: 'Code Editor' })).toBeVisible(); +} + +export async function clearSession(page: Page): Promise { + await page.context().clearCookies(); + const url = page.url(); + // Navigate to root first if on about:blank so we can access storage + if (!url || url === 'about:blank') { + await page.goto('/'); + await page.waitForLoadState('domcontentloaded'); + } + await page.evaluate(() => { + localStorage.clear(); + sessionStorage.clear(); + }); +} + +export function getAdminRoute(path: AdminPath) { + const route = ADMIN_ROUTES.find(r => r.path === path); + if (!route) throw new Error(`Unknown admin path: ${path}`); + return route; +} + +export async function navigateToAdminPage(page: Page, path: AdminPath): Promise { + const route = getAdminRoute(path); + await page.goto(path); + await expect(page.getByRole('heading', { name: route.pageHeading })).toBeVisible(); +} + +export async function expectAdminSidebar(page: Page): Promise { + await expect(page.getByText('Admin Panel')).toBeVisible({ timeout: 10000 }); + for (const route of ADMIN_ROUTES) { + await expect(page.getByRole('link', { name: route.sidebarLabel })).toBeVisible(); + } +} + +export async function expectActiveNavLink(page: Page, linkName: string): Promise { + await expect(page.getByRole('link', { name: linkName })).toHaveClass(/bg-primary/); +} + +export async function expectToastVisible(page: Page, timeout = 5000): Promise { + await expect(page.locator('[class*="toast"]').first()).toBeVisible({ timeout }); +} + +export async function expectRedirectToLogin(page: Page): Promise { + await expect(page).toHaveURL(/\/login/); +} + +export async function expectRedirectToHome(page: Page): Promise { + await expect(page).toHaveURL('/'); +} + +export async function expectTableOrEmptyState( + page: Page, + emptyTextPattern: RegExp, + timeout = 10000 +): Promise { + const tableRow = page.locator('table tbody tr').first(); + const emptyState = page.getByText(emptyTextPattern).first(); + await expect(tableRow.or(emptyState).first()).toBeVisible({ timeout }); + return await tableRow.isVisible().catch(() => false); +} + +export async function expectTableColumn(page: Page, columnName: string, emptyPattern: RegExp): Promise { + const hasTable = await expectTableOrEmptyState(page, emptyPattern); + if (hasTable) { + await expect(page.getByRole('columnheader', { name: columnName })).toBeVisible(); + } +} + +export async function runExampleAndExecute(page: Page): Promise { + await page.getByRole('button', { name: /Example/i }).click(); + await expect(page.locator('.cm-content')).not.toBeEmpty({ timeout: 2000 }); + await page.getByRole('button', { name: /Run Script/i }).click(); + await expect(page.getByRole('button', { name: /Executing/i })).toBeVisible({ timeout: 5000 }); + const success = page.locator('text=Status:').first(); + const failure = page.getByText('Execution Failed'); + // K8s pod creation + execution can take 15-20s in CI + await expect(success.or(failure).first()).toBeVisible({ timeout: 25000 }); + await expect(success).toBeVisible({ timeout: 1000 }); +} + +export async function expectAuthRequired(page: Page, path: string): Promise { + await clearSession(page); + await page.goto(path); + await expectRedirectToLogin(page); +} + +export async function navigateToPage(page: Page, path: string, headingName: string, headingLevel: 1 | 2 = 1): Promise { + await page.goto(path); + await expect(page.getByRole('heading', { name: headingName, level: headingLevel })).toBeVisible(); +} + +export function describeAuthRequired(testFn: typeof base, path: string): void { + testFn.describe('Access Control', () => { + testFn('redirects to login when not authenticated', async ({ page }) => { + await expectAuthRequired(page, path); + }); + }); +} + +export function describeAdminAccessControl(testFn: typeof base, path: AdminPath): void { + testFn.describe('Access Control', () => { + testFn('redirects non-admin users to home', async ({ userPage }) => { + await userPage.goto(path); + await expectRedirectToHome(userPage); + }); + + testFn('redirects unauthenticated users to login', async ({ page }) => { + await clearSession(page); + await page.goto(path); + await expectRedirectToLogin(page); + }); + }); +} + +export function describeAdminCommonTests(testFn: typeof base, path: AdminPath): void { + const route = getAdminRoute(path); + + testFn('displays page with header', async ({ adminPage }) => { + await adminPage.goto(path); + await expect(adminPage.getByRole('heading', { name: route.pageHeading })).toBeVisible({ timeout: 10000 }); + }); + + testFn('shows admin sidebar navigation', async ({ adminPage }) => { + await adminPage.goto(path); + await expectAdminSidebar(adminPage); + }); + + testFn('nav link is active in sidebar', async ({ adminPage }) => { + await adminPage.goto(path); + await expectActiveNavLink(adminPage, route.sidebarLabel); + }); +} + +export { expect, ADMIN_ROUTES, type AdminPath, type Page }; diff --git a/frontend/e2e/home.spec.ts b/frontend/e2e/home.spec.ts new file mode 100644 index 00000000..805d7bff --- /dev/null +++ b/frontend/e2e/home.spec.ts @@ -0,0 +1,150 @@ +import { test, expect, clearSession } from './fixtures'; + +test.describe('Home Page', () => { + test.beforeEach(async ({ page }) => { + await clearSession(page); + await page.goto('/'); + await page.waitForLoadState('domcontentloaded'); + }); + + test('displays hero section with main heading', async ({ page }) => { + await page.waitForSelector('h1'); + await expect(page.getByRole('heading', { level: 1 })).toContainText('Code, Run'); + await expect(page.getByRole('heading', { level: 1 })).toContainText('Integrate'); + }); + + test('shows welcome message with product name', async ({ page }) => { + await page.waitForSelector('h1'); // Wait for hero to render + await expect(page.getByText('Welcome to Integr8sCode')).toBeVisible(); + await expect(page.getByText('seamless online execution environment')).toBeVisible(); + }); + + test('shows start coding CTA button', async ({ page }) => { + const ctaButton = page.getByRole('link', { name: 'Start Coding Now' }); + await expect(ctaButton).toBeVisible(); + await expect(ctaButton).toHaveAttribute('href', '/editor'); + }); + + test('displays features section', async ({ page }) => { + await expect(page.getByText('Core Features')).toBeVisible(); + await expect(page.getByText('Everything you need for quick execution')).toBeVisible(); + }); + + test('shows all three feature cards', async ({ page }) => { + await expect(page.getByText('Instant Execution')).toBeVisible(); + await expect(page.getByText('Secure & Efficient')).toBeVisible(); + await expect(page.getByText('Real-time Results')).toBeVisible(); + }); +}); + +test.describe('Home Page Header', () => { + test.beforeEach(async ({ page }) => { + await clearSession(page); + await page.goto('/'); + await page.waitForLoadState('domcontentloaded'); + }); + + test('shows header with logo', async ({ page }) => { + await expect(page.locator('header')).toBeVisible(); + await expect(page.locator('header').getByText('Integr8sCode')).toBeVisible(); + }); + + test('logo links to home page', async ({ page }) => { + const logoLink = page.locator('header a').filter({ hasText: 'Integr8sCode' }); + await expect(logoLink).toHaveAttribute('href', '/'); + }); + + test('shows login and register buttons when not authenticated', async ({ page }) => { + await page.waitForSelector('header'); // Wait for header to render + await expect(page.locator('header').getByRole('link', { name: 'Login' })).toBeVisible(); + await expect(page.locator('header').getByRole('link', { name: 'Register' })).toBeVisible(); + }); + + test('shows theme toggle button', async ({ page }) => { + await expect(page.locator('header button[title="Toggle theme"]')).toBeVisible(); + }); +}); + +test.describe('Home Page Navigation', () => { + test.beforeEach(async ({ page }) => { + await clearSession(page); + await page.goto('/'); + await page.waitForLoadState('domcontentloaded'); + }); + + test('clicking CTA navigates to login or editor', async ({ page }) => { + await expect(page.getByRole('link', { name: 'Start Coding Now' })).toBeVisible(); + await page.getByRole('link', { name: 'Start Coding Now' }).click(); + await expect(page).toHaveURL(/\/login|\/editor/); + }); + + test('can navigate to login from header', async ({ page }) => { + await page.locator('header').getByRole('link', { name: 'Login' }).click(); + await expect(page).toHaveURL(/\/login/); + }); + + test('can navigate to register from header', async ({ page }) => { + await page.locator('header').getByRole('link', { name: 'Register' }).click(); + await expect(page).toHaveURL(/\/register/); + }); + + test('clicking logo returns to home', async ({ page }) => { + await expect(page.locator('header')).toBeVisible(); + await page.locator('header').getByRole('link', { name: 'Login' }).click(); + await expect(page).toHaveURL(/\/login/); + await expect(page.locator('header a').filter({ hasText: 'Integr8sCode' })).toBeVisible(); + await page.locator('header a').filter({ hasText: 'Integr8sCode' }).click(); + await expect(page).toHaveURL('/'); + }); +}); + +test.describe('Home Page Responsive', () => { + test('displays correctly on mobile viewport', async ({ page }) => { + await page.setViewportSize({ width: 375, height: 667 }); + await clearSession(page); + await page.goto('/'); + await expect(page.getByRole('heading', { level: 1 })).toBeVisible(); + await expect(page.getByRole('link', { name: 'Start Coding Now' })).toBeVisible(); + }); + + test('shows mobile menu button on small screens', async ({ page }) => { + await page.setViewportSize({ width: 375, height: 667 }); + await clearSession(page); + await page.goto('/'); + const menuButton = page.locator('header button').filter({ has: page.locator('svg') }).last(); + await expect(menuButton).toBeVisible(); + }); + + test('can open mobile menu', async ({ page }) => { + await page.setViewportSize({ width: 375, height: 667 }); + await clearSession(page); + await page.goto('/'); + const menuButton = page.locator('header button').filter({ has: page.locator('svg') }).last(); + await menuButton.click(); + // Mobile menu Login link should be visible after opening menu (assertion waits for animation) + await expect(page.getByRole('link', { name: 'Login' }).first()).toBeVisible({ timeout: 3000 }); + }); +}); + +test.describe('Privacy Page', () => { + test.beforeEach(async ({ page }) => { + await clearSession(page); + await page.goto('/privacy'); + }); + + test('displays privacy policy heading', async ({ page }) => { + await expect(page.getByRole('heading', { name: 'Privacy Policy' })).toBeVisible(); + }); + + test('shows last updated date', async ({ page }) => { + await expect(page.getByText('Last updated:')).toBeVisible(); + }); + + test('shows key privacy sections', async ({ page }) => { + await expect(page.getByText("Who's responsible for your data?")).toBeVisible(); + await expect(page.getByText('What information do I collect?')).toBeVisible(); + await expect(page.getByText('Your rights (GDPR stuff)')).toBeVisible(); + await expect(page.getByText('About cookies')).toBeVisible(); + await expect(page.getByText('Get in touch')).toBeVisible(); + }); +}); diff --git a/frontend/e2e/notifications.spec.ts b/frontend/e2e/notifications.spec.ts new file mode 100644 index 00000000..637c53ea --- /dev/null +++ b/frontend/e2e/notifications.spec.ts @@ -0,0 +1,90 @@ +import { test, expect, describeAuthRequired } from './fixtures'; + +const PATH = '/notifications'; +const HEADING = 'Notifications'; + +// Helper to navigate and wait for notifications API response +async function gotoAndWaitForNotifications(page: import('@playwright/test').Page) { + const notificationsResponse = page.waitForResponse( + response => response.url().includes('/api/v1/notifications') && response.status() === 200 + ); + await page.goto(PATH); + await notificationsResponse; +} + +test.describe('Notifications Page', () => { + test('displays notifications page with header', async ({ userPage }) => { + await userPage.goto(PATH); + await expect(userPage.getByRole('heading', { name: HEADING, level: 1 })).toBeVisible(); + }); + + test('shows filter controls', async ({ userPage }) => { + await userPage.goto(PATH); + await expect(userPage.getByLabel('Include tags')).toBeVisible(); + await expect(userPage.getByLabel('Exclude tags')).toBeVisible(); + await expect(userPage.getByLabel('Tag prefix')).toBeVisible(); + await expect(userPage.getByRole('button', { name: 'Filter' })).toBeVisible(); + }); + + test('can enter filter values', async ({ userPage }) => { + await userPage.goto(PATH); + const includeTagsInput = userPage.getByLabel('Include tags'); + await includeTagsInput.fill('execution,completed'); + await expect(includeTagsInput).toHaveValue('execution,completed'); + const excludeTagsInput = userPage.getByLabel('Exclude tags'); + await excludeTagsInput.fill('external_alert'); + await expect(excludeTagsInput).toHaveValue('external_alert'); + }); + + test('can apply filters', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByLabel('Include tags').fill('test'); + await userPage.getByRole('button', { name: 'Filter' }).click(); + await expect(userPage.getByRole('heading', { name: HEADING, level: 1 })).toBeVisible(); + }); + + test('shows empty state or notifications', async ({ userPage }) => { + await gotoAndWaitForNotifications(userPage); + const emptyState = userPage.getByText('No notifications yet'); + const notificationCard = userPage.locator('[class*="card"]').filter({ hasText: /notification/i }); + const hasEmptyState = await emptyState.isVisible({ timeout: 3000 }).catch(() => false); + const hasNotifications = await notificationCard.first().isVisible({ timeout: 3000 }).catch(() => false); + expect(hasEmptyState || hasNotifications).toBe(true); + }); +}); + +test.describe('Notifications Interaction', () => { + test('notification cards show severity badges when present', async ({ userPage }) => { + await userPage.goto(PATH); + const notificationCard = userPage.locator('[class*="card"]').first(); + if (await notificationCard.isVisible({ timeout: 3000 }).catch(() => false)) { + const severityBadge = userPage.locator('[class*="badge"]').filter({ hasText: /low|medium|high|urgent/i }).first(); + const hasBadge = await severityBadge.isVisible({ timeout: 2000 }).catch(() => false); + if (hasBadge) { + await expect(severityBadge).toContainText(/low|medium|high|urgent/i); + } + } + }); + + test('notification cards show timestamp when present', async ({ userPage }) => { + await userPage.goto(PATH); + const notificationCard = userPage.locator('[class*="card"]').first(); + if (await notificationCard.isVisible({ timeout: 3000 }).catch(() => false)) { + const timeIndicator = userPage.locator('text=/ago|Just now|\\d{1,2}:\\d{2}|\\d{4}-\\d{2}-\\d{2}/').first(); + const hasTime = await timeIndicator.isVisible({ timeout: 2000 }).catch(() => false); + if (hasTime) { + await expect(timeIndicator).toBeVisible(); + } + } + }); +}); + +test.describe('Notification Center Header Component', () => { + test('shows notification icon in header when authenticated', async ({ userPage }) => { + await userPage.goto(PATH); + const bellIcon = userPage.locator('header').locator('[aria-label*="notification"], button').filter({ has: userPage.locator('svg') }); + await expect(bellIcon.first()).toBeVisible(); + }); +}); + +describeAuthRequired(test, PATH); diff --git a/frontend/e2e/register.spec.ts b/frontend/e2e/register.spec.ts new file mode 100644 index 00000000..92dc12ab --- /dev/null +++ b/frontend/e2e/register.spec.ts @@ -0,0 +1,115 @@ +import { test, expect, clearSession } from './fixtures'; + +const PATH = '/register'; + +async function navigateToRegister(page: import('@playwright/test').Page): Promise { + await clearSession(page); + await page.goto(PATH); + await page.waitForSelector('#username'); +} + +async function fillRegistrationForm( + page: import('@playwright/test').Page, + data: { username: string; email: string; password: string; confirmPassword: string } +): Promise { + await page.fill('#username', data.username); + await page.fill('#email', data.email); + await page.fill('#password', data.password); + await page.fill('#confirm-password', data.confirmPassword); +} + +test.describe('Registration', () => { + test.beforeEach(async ({ page }) => { + await navigateToRegister(page); + }); + + test('shows registration form with all required fields', async ({ page }) => { + await expect(page.getByRole('heading', { name: 'Create a new account' })).toBeVisible(); + await expect(page.locator('#username')).toBeVisible(); + await expect(page.locator('#email')).toBeVisible(); + await expect(page.locator('#password')).toBeVisible(); + await expect(page.locator('#confirm-password')).toBeVisible(); + await expect(page.locator('button[type="submit"]')).toHaveText('Create Account'); + }); + + test('has link to login page', async ({ page }) => { + await expect(page.getByRole('link', { name: 'sign in to your existing account' })).toBeVisible(); + }); + + test('can navigate to login page', async ({ page }) => { + await page.getByRole('link', { name: 'sign in to your existing account' }).click(); + await expect(page).toHaveURL(/\/login/); + }); + + test('validates required fields on empty submission', async ({ page }) => { + await page.click('button[type="submit"]'); + await expect(page).toHaveURL(/\/register/); + const usernameInput = page.locator('#username'); + await expect(usernameInput).toBeFocused(); + const isInvalid = await usernameInput.evaluate((el: HTMLInputElement) => !el.validity.valid); + expect(isInvalid).toBe(true); + }); + + test('shows error when passwords do not match', async ({ page }) => { + await fillRegistrationForm(page, { + username: 'testuser', + email: 'test@example.com', + password: 'Password123!', + confirmPassword: 'DifferentPassword123!', + }); + await page.click('button[type="submit"]'); + await expect(page.locator('p.text-red-600, p.text-red-400')).toContainText('Passwords do not match'); + }); + + test('shows error when password is too short', async ({ page }) => { + await fillRegistrationForm(page, { + username: 'testuser', + email: 'test@example.com', + password: 'short', + confirmPassword: 'short', + }); + await page.click('button[type="submit"]'); + await expect(page.locator('p.text-red-600, p.text-red-400')).toContainText('at least 8 characters'); + }); + + test('submits form and shows loading or redirects', async ({ page }) => { + const uniqueId = Date.now(); + await fillRegistrationForm(page, { + username: `newuser_${uniqueId}`, + email: `newuser_${uniqueId}@example.com`, + password: 'ValidPassword123!', + confirmPassword: 'ValidPassword123!', + }); + const submitButton = page.locator('button[type="submit"]'); + await submitButton.click(); + // Either see loading state OR redirect to login (both indicate successful submission) + const loadingOrRedirect = await Promise.race([ + expect(submitButton).toContainText(/Registering/).then(() => 'loading'), + expect(page).toHaveURL(/\/login/, { timeout: 10000 }).then(() => 'redirect'), + ]).catch(() => 'timeout'); + expect(['loading', 'redirect']).toContain(loadingOrRedirect); + }); + + test('shows error for duplicate username', async ({ page }) => { + await fillRegistrationForm(page, { + username: 'user', + email: 'unique@example.com', + password: 'ValidPassword123!', + confirmPassword: 'ValidPassword123!', + }); + await page.click('button[type="submit"]'); + await expect(page.locator('p.text-red-600, p.text-red-400')).toBeVisible({ timeout: 5000 }); + }); + + test('successful registration redirects to login', async ({ page }) => { + const uniqueId = Date.now(); + await fillRegistrationForm(page, { + username: `newuser_${uniqueId}`, + email: `newuser_${uniqueId}@example.com`, + password: 'ValidPassword123!', + confirmPassword: 'ValidPassword123!', + }); + await page.click('button[type="submit"]'); + await expect(page).toHaveURL(/\/login/, { timeout: 10000 }); + }); +}); diff --git a/frontend/e2e/settings.spec.ts b/frontend/e2e/settings.spec.ts new file mode 100644 index 00000000..c08b1753 --- /dev/null +++ b/frontend/e2e/settings.spec.ts @@ -0,0 +1,169 @@ +import { test, expect, expectToastVisible, describeAuthRequired, clearSession, TEST_USERS } from './fixtures'; + +const PATH = '/settings'; +const HEADING = 'Settings'; + +test.describe('Settings Page', () => { + test('displays settings page with all tabs', async ({ userPage }) => { + await userPage.goto(PATH); + await expect(userPage.getByRole('heading', { name: HEADING, level: 1 })).toBeVisible(); + await expect(userPage.getByRole('button', { name: 'General' })).toBeVisible(); + await expect(userPage.getByRole('button', { name: 'Editor' })).toBeVisible(); + await expect(userPage.locator('main').getByText('Notifications')).toBeVisible(); + await expect(userPage.getByRole('button', { name: 'View History' })).toBeVisible(); + }); + + test('general tab shows theme selection', async ({ userPage }) => { + await userPage.goto(PATH); + await expect(userPage.getByRole('heading', { name: 'General Settings' })).toBeVisible(); + await expect(userPage.getByText('Theme')).toBeVisible(); + await expect(userPage.locator('#theme-select')).toBeVisible(); + }); + + test('can open theme dropdown and see options', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.locator('#theme-select').click(); + await expect(userPage.getByRole('button', { name: 'Light', exact: true })).toBeVisible(); + await expect(userPage.getByRole('button', { name: 'Dark', exact: true })).toBeVisible(); + await expect(userPage.getByRole('button', { name: 'Auto (System)', exact: true })).toBeVisible(); + }); + + test('can change theme to dark', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.locator('#theme-select').click(); + await userPage.getByText('Dark').click(); + const hasDarkClass = await userPage.evaluate(() => document.documentElement.classList.contains('dark')); + expect(hasDarkClass).toBe(true); + }); + + test('can change theme to light', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.locator('#theme-select').click(); + await userPage.getByText('Light').click(); + const hasDarkClass = await userPage.evaluate(() => document.documentElement.classList.contains('dark')); + expect(hasDarkClass).toBe(false); + }); +}); + +test.describe('Settings Editor Tab', () => { + test('shows editor settings section', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: 'Editor' }).click(); + await expect(userPage.getByRole('heading', { name: 'Editor Settings' })).toBeVisible(); + await expect(userPage.getByText('Editor Theme')).toBeVisible(); + await expect(userPage.getByText('Font Size')).toBeVisible(); + await expect(userPage.getByText('Tab Size')).toBeVisible(); + }); + + test('shows editor theme dropdown', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: 'Editor' }).click(); + await userPage.locator('#editor-theme-select').click(); + await expect(userPage.getByRole('button', { name: 'Auto (Follow App Theme)', exact: true })).toBeVisible(); + await expect(userPage.getByRole('button', { name: 'One Dark', exact: true })).toBeVisible(); + }); + + test('can change font size', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: 'Editor' }).click(); + const fontSizeInput = userPage.locator('#font-size'); + await fontSizeInput.fill(''); + await fontSizeInput.fill('16'); + await expect(fontSizeInput).toHaveValue('16'); + }); + + test('can change tab size', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: 'Editor' }).click(); + const tabSizeInput = userPage.locator('#tab-size'); + await tabSizeInput.fill(''); + await tabSizeInput.fill('2'); + await expect(tabSizeInput).toHaveValue('2'); + }); + + test('can toggle word wrap setting', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: 'Editor' }).click(); + const wordWrapLabel = userPage.locator('label').filter({ hasText: 'Word Wrap' }); + const checkbox = wordWrapLabel.locator('input[type="checkbox"]'); + const initialState = await checkbox.isChecked(); + await wordWrapLabel.click(); + expect(await checkbox.isChecked()).toBe(!initialState); + }); +}); + +test.describe('Settings Notifications Tab', () => { + test('shows notification settings section', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.locator('main').getByText('Notifications').click(); + await expect(userPage.getByRole('heading', { name: 'Notification Settings' })).toBeVisible(); + await expect(userPage.getByText('Notification Types')).toBeVisible(); + }); + + test('shows all notification type toggles', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.locator('main').getByText('Notifications').click(); + await expect(userPage.getByText('Execution Completed')).toBeVisible(); + await expect(userPage.getByText('Execution Failed')).toBeVisible(); + await expect(userPage.getByText('System Updates')).toBeVisible(); + await expect(userPage.getByText('Security Alerts')).toBeVisible(); + }); + + test('can toggle notification preferences', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.locator('main').getByText('Notifications').click(); + const label = userPage.locator('label').filter({ hasText: 'Execution Completed' }); + const checkbox = label.locator('input[type="checkbox"]'); + const initialState = await checkbox.isChecked(); + await label.click(); + expect(await checkbox.isChecked()).toBe(!initialState); + }); +}); + +test.describe('Settings Save and History', () => { + test('shows save button', async ({ userPage }) => { + await userPage.goto(PATH); + await expect(userPage.getByRole('button', { name: 'Save Settings' })).toBeVisible(); + }); + + test('can save settings', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: 'Editor' }).click(); + const fontSizeInput = userPage.locator('#font-size'); + const currentValue = await fontSizeInput.inputValue(); + await fontSizeInput.fill(currentValue === '14' ? '15' : '14'); + await userPage.getByRole('button', { name: 'Save Settings' }).click(); + await expectToastVisible(userPage); + }); + + test('can open settings history modal', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: 'View History' }).click(); + await expect(userPage.getByRole('heading', { name: 'Settings History' })).toBeVisible(); + }); + + test('can close settings history modal', async ({ userPage }) => { + await userPage.goto(PATH); + await userPage.getByRole('button', { name: 'View History' }).click(); + await userPage.getByRole('button', { name: 'Close', exact: true }).click(); + await expect(userPage.getByRole('heading', { name: 'Settings History' })).not.toBeVisible(); + }); +}); + +test.describe('Settings Access Control', () => { + test('redirects to login when not authenticated', async ({ page }) => { + await clearSession(page); + await page.goto(PATH); + await expect(page).toHaveURL(/\/login/); + }); + + test('preserves settings page as redirect target after login', async ({ page }) => { + await clearSession(page); + await page.goto(PATH); + await expect(page).toHaveURL(/\/login/); + await page.fill('#username', TEST_USERS.user.username); + await page.fill('#password', TEST_USERS.user.password); + await page.click('button[type="submit"]'); + await expect(page.getByRole('heading', { name: HEADING, level: 1 })).toBeVisible({ timeout: 10000 }); + }); +}); diff --git a/frontend/e2e/theme.spec.ts b/frontend/e2e/theme.spec.ts index 16104862..c660765f 100644 --- a/frontend/e2e/theme.spec.ts +++ b/frontend/e2e/theme.spec.ts @@ -1,34 +1,27 @@ import { test, expect } from '@playwright/test'; +async function hasDarkTheme(page: import('@playwright/test').Page): Promise { + return page.evaluate(() => document.documentElement.classList.contains('dark')); +} + test.describe('Theme', () => { test.beforeEach(async ({ page }) => { - // Clear theme storage before each test await page.goto('/login'); - await page.evaluate(() => { - localStorage.removeItem('app-theme'); - }); + await page.evaluate(() => localStorage.removeItem('app-theme')); }); test('auto theme follows system light preference', async ({ page }) => { await page.emulateMedia({ colorScheme: 'light' }); await page.goto('/login'); await page.waitForLoadState('networkidle'); - - const hasDarkClass = await page.evaluate(() => - document.documentElement.classList.contains('dark') - ); - expect(hasDarkClass).toBe(false); + expect(await hasDarkTheme(page)).toBe(false); }); test('auto theme follows system dark preference', async ({ page }) => { await page.emulateMedia({ colorScheme: 'dark' }); await page.goto('/login'); await page.waitForLoadState('networkidle'); - - const hasDarkClass = await page.evaluate(() => - document.documentElement.classList.contains('dark') - ); - expect(hasDarkClass).toBe(true); + expect(await hasDarkTheme(page)).toBe(true); }); test('explicit dark theme overrides system preference', async ({ page }) => { @@ -37,11 +30,7 @@ test.describe('Theme', () => { await page.evaluate(() => localStorage.setItem('app-theme', 'dark')); await page.reload(); await page.waitForLoadState('networkidle'); - - const hasDarkClass = await page.evaluate(() => - document.documentElement.classList.contains('dark') - ); - expect(hasDarkClass).toBe(true); + expect(await hasDarkTheme(page)).toBe(true); }); test('explicit light theme overrides system preference', async ({ page }) => { @@ -50,11 +39,7 @@ test.describe('Theme', () => { await page.evaluate(() => localStorage.setItem('app-theme', 'light')); await page.reload(); await page.waitForLoadState('networkidle'); - - const hasDarkClass = await page.evaluate(() => - document.documentElement.classList.contains('dark') - ); - expect(hasDarkClass).toBe(false); + expect(await hasDarkTheme(page)).toBe(false); }); test('theme persists across page navigation', async ({ page }) => { @@ -62,13 +47,8 @@ test.describe('Theme', () => { await page.evaluate(() => localStorage.setItem('app-theme', 'dark')); await page.goto('/register'); await page.waitForLoadState('networkidle'); - const storedTheme = await page.evaluate(() => localStorage.getItem('app-theme')); expect(storedTheme).toBe('dark'); - - const hasDarkClass = await page.evaluate(() => - document.documentElement.classList.contains('dark') - ); - expect(hasDarkClass).toBe(true); + expect(await hasDarkTheme(page)).toBe(true); }); }); diff --git a/frontend/eslint.config.js b/frontend/eslint.config.js new file mode 100644 index 00000000..40d06c64 --- /dev/null +++ b/frontend/eslint.config.js @@ -0,0 +1,78 @@ +import eslint from '@eslint/js'; +import tseslint from '@typescript-eslint/eslint-plugin'; +import tsparser from '@typescript-eslint/parser'; +import svelte from 'eslint-plugin-svelte'; +import svelteParser from 'svelte-eslint-parser'; +import globals from 'globals'; + +// Svelte 5 runes +const svelteRunes = { + $state: 'readonly', + $derived: 'readonly', + $effect: 'readonly', + $props: 'readonly', + $bindable: 'readonly', + $inspect: 'readonly', + $host: 'readonly', +}; + +export default [ + eslint.configs.recommended, + { + files: ['src/**/*.ts'], + ignores: ['**/__tests__/**', '**/tests/**'], + languageOptions: { + parser: tsparser, + parserOptions: { + ecmaVersion: 2022, + sourceType: 'module', + }, + globals: { + ...globals.browser, + ...globals.node, + ...svelteRunes, + }, + }, + plugins: { + '@typescript-eslint': tseslint, + }, + rules: { + ...tseslint.configs.recommended.rules, + '@typescript-eslint/no-unused-vars': ['warn', { argsIgnorePattern: '^_' }], + '@typescript-eslint/no-explicit-any': 'off', // Too noisy for generated code + 'no-unused-vars': 'off', + }, + }, + { + files: ['src/**/*.svelte'], + languageOptions: { + parser: svelteParser, + parserOptions: { + parser: tsparser, + }, + globals: { + ...globals.browser, + ...svelteRunes, + }, + }, + plugins: { + svelte, + }, + rules: { + ...svelte.configs.recommended.rules, + 'no-unused-vars': 'off', + 'no-undef': 'off', + }, + }, + { + ignores: [ + 'public/', + 'node_modules/', + '*.config.js', + 'playwright-report/', + 'e2e/', + 'src/lib/api/', // Generated API client + '**/__tests__/**', // Test files + ], + }, +]; diff --git a/frontend/package-lock.json b/frontend/package-lock.json index c405f832..3cac758d 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -47,6 +47,7 @@ }, "devDependencies": { "@babel/runtime": "^7.24.7", + "@eslint/js": "^9.39.2", "@hey-api/openapi-ts": "0.90.1", "@playwright/test": "^1.52.0", "@rollup/plugin-alias": "^6.0.0", @@ -57,11 +58,18 @@ "@testing-library/jest-dom": "^6.6.3", "@testing-library/svelte": "^5.3.1", "@testing-library/user-event": "^14.6.1", + "@typescript-eslint/eslint-plugin": "^8.53.0", + "@typescript-eslint/parser": "^8.53.0", "@vitest/coverage-v8": "^4.0.16", + "eslint": "^9.39.2", + "eslint-plugin-svelte": "^3.14.0", "express": "^5.2.1", + "globals": "^17.0.0", "http-proxy": "^1.18.1", "jsdom": "^27.4.0", "rollup-plugin-serve": "^3.0.0", + "svelte-check": "^4.3.5", + "svelte-eslint-parser": "^1.4.1", "tailwindcss": "^4.1.13", "tslib": "^2.8.1", "typescript": "^5.7.2", @@ -878,6 +886,193 @@ "node": ">=18" } }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", + "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", + "dev": true, + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-array/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + "dev": true, + "dependencies": { + "@eslint/core": "^0.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz", + "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/eslintrc/node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz", + "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + "dev": true, + "dependencies": { + "@eslint/core": "^0.17.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "node_modules/@exodus/bytes": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/@exodus/bytes/-/bytes-1.7.0.tgz", @@ -969,6 +1164,54 @@ "node": ">=20" } }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.13", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", @@ -2027,6 +2270,228 @@ "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", "optional": true }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.53.0.tgz", + "integrity": "sha512-eEXsVvLPu8Z4PkFibtuFJLJOTAV/nPdgtSjkGoPpddpFk3/ym2oy97jynY6ic2m6+nc5M8SE1e9v/mHKsulcJg==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.12.2", + "@typescript-eslint/scope-manager": "8.53.0", + "@typescript-eslint/type-utils": "8.53.0", + "@typescript-eslint/utils": "8.53.0", + "@typescript-eslint/visitor-keys": "8.53.0", + "ignore": "^7.0.5", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.53.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.53.0.tgz", + "integrity": "sha512-npiaib8XzbjtzS2N4HlqPvlpxpmZ14FjSJrteZpPxGUaYPlvhzlzUZ4mZyABo0EFrOWnvyd0Xxroq//hKhtAWg==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "8.53.0", + "@typescript-eslint/types": "8.53.0", + "@typescript-eslint/typescript-estree": "8.53.0", + "@typescript-eslint/visitor-keys": "8.53.0", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.53.0.tgz", + "integrity": "sha512-Bl6Gdr7NqkqIP5yP9z1JU///Nmes4Eose6L1HwpuVHwScgDPPuEWbUVhvlZmb8hy0vX9syLk5EGNL700WcBlbg==", + "dev": true, + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.53.0", + "@typescript-eslint/types": "^8.53.0", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.53.0.tgz", + "integrity": "sha512-kWNj3l01eOGSdVBnfAF2K1BTh06WS0Yet6JUgb9Cmkqaz3Jlu0fdVUjj9UI8gPidBWSMqDIglmEXifSgDT/D0g==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "8.53.0", + "@typescript-eslint/visitor-keys": "8.53.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.53.0.tgz", + "integrity": "sha512-K6Sc0R5GIG6dNoPdOooQ+KtvT5KCKAvTcY8h2rIuul19vxH5OTQk7ArKkd4yTzkw66WnNY0kPPzzcmWA+XRmiA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.53.0.tgz", + "integrity": "sha512-BBAUhlx7g4SmcLhn8cnbxoxtmS7hcq39xKCgiutL3oNx1TaIp+cny51s8ewnKMpVUKQUGb41RAUWZ9kxYdovuw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "8.53.0", + "@typescript-eslint/typescript-estree": "8.53.0", + "@typescript-eslint/utils": "8.53.0", + "debug": "^4.4.3", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.53.0.tgz", + "integrity": "sha512-Bmh9KX31Vlxa13+PqPvt4RzKRN1XORYSLlAE+sO1i28NkisGbTtSLFVB3l7PWdHtR3E0mVMuC7JilWJ99m2HxQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.53.0.tgz", + "integrity": "sha512-pw0c0Gdo7Z4xOG987u3nJ8akL9093yEEKv8QTJ+Bhkghj1xyj8cgPaavlr9rq8h7+s6plUJ4QJYw2gCZodqmGw==", + "dev": true, + "dependencies": { + "@typescript-eslint/project-service": "8.53.0", + "@typescript-eslint/tsconfig-utils": "8.53.0", + "@typescript-eslint/types": "8.53.0", + "@typescript-eslint/visitor-keys": "8.53.0", + "debug": "^4.4.3", + "minimatch": "^9.0.5", + "semver": "^7.7.3", + "tinyglobby": "^0.2.15", + "ts-api-utils": "^2.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.53.0.tgz", + "integrity": "sha512-XDY4mXTez3Z1iRDI5mbRhH4DFSt46oaIFsLg+Zn97+sYrXACziXSQcSelMybnVZ5pa1P6xYkPr5cMJyunM1ZDA==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.9.1", + "@typescript-eslint/scope-manager": "8.53.0", + "@typescript-eslint/types": "8.53.0", + "@typescript-eslint/typescript-estree": "8.53.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.53.0.tgz", + "integrity": "sha512-LZ2NqIHFhvFwxG0qZeLL9DvdNAHPGCY5dIRwBhyYeU+LfLhcStE1ImjsuTG/WaVh3XysGaeLW8Rqq7cGkPCFvw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "8.53.0", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, "node_modules/@uiw/codemirror-theme-bbedit": { "version": "4.25.4", "resolved": "https://registry.npmjs.org/@uiw/codemirror-theme-bbedit/-/codemirror-theme-bbedit-4.25.4.tgz", @@ -2268,6 +2733,15 @@ "node": ">=0.4.0" } }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, "node_modules/agent-base": { "version": "7.1.4", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", @@ -2277,6 +2751,22 @@ "node": ">= 14" } }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, "node_modules/ansi-colors": { "version": "4.1.3", "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz", @@ -2403,6 +2893,12 @@ "node": ">= 0.4" } }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, "node_modules/baseline-browser-mapping": { "version": "2.9.10", "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.10.tgz", @@ -2460,6 +2956,15 @@ "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" }, + "node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, "node_modules/braces": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", @@ -2617,6 +3122,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/caniuse-api": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", @@ -2768,6 +3282,12 @@ "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==" }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, "node_modules/concat-with-sourcemaps": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/concat-with-sourcemaps/-/concat-with-sourcemaps-1.1.0.tgz", @@ -2844,6 +3364,20 @@ "resolved": "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz", "integrity": "sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==" }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/css-declaration-sorter": { "version": "6.4.1", "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.4.1.tgz", @@ -3071,6 +3605,12 @@ "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", "dev": true }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, "node_modules/deepmerge": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", @@ -3385,11 +3925,251 @@ "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", "dev": true }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz", + "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.39.2", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-svelte": { + "version": "3.14.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-svelte/-/eslint-plugin-svelte-3.14.0.tgz", + "integrity": "sha512-Isw0GvaMm0yHxAj71edAdGFh28ufYs+6rk2KlbbZphnqZAzrH3Se3t12IFh2H9+1F/jlDhBBL4oiOJmLqmYX0g==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.6.1", + "@jridgewell/sourcemap-codec": "^1.5.0", + "esutils": "^2.0.3", + "globals": "^16.0.0", + "known-css-properties": "^0.37.0", + "postcss": "^8.4.49", + "postcss-load-config": "^3.1.4", + "postcss-safe-parser": "^7.0.0", + "semver": "^7.6.3", + "svelte-eslint-parser": "^1.4.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://github.com/sponsors/ota-meshi" + }, + "peerDependencies": { + "eslint": "^8.57.1 || ^9.0.0", + "svelte": "^3.37.0 || ^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "svelte": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-svelte/node_modules/globals": { + "version": "16.5.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-16.5.0.tgz", + "integrity": "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/eslint/node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/esm-env": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/esm-env/-/esm-env-1.2.2.tgz", "integrity": "sha512-Epxrv+Nr/CaL4ZcFGPJIYLWFom+YeV1DqMLHJoEd9SYRxNbaFruBwfEX/kkHUJf55j2+TUbmDcmuilbP1TmXHA==" }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, "node_modules/esrap": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/esrap/-/esrap-2.2.1.tgz", @@ -3398,11 +4178,41 @@ "@jridgewell/sourcemap-codec": "^1.4.15" } }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, "node_modules/estree-walker": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==" }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/etag": { "version": "1.8.1", "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", @@ -3475,6 +4285,24 @@ "integrity": "sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==", "dev": true }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, "node_modules/fdir": { "version": "6.5.0", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", @@ -3491,6 +4319,18 @@ } } }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, "node_modules/fill-range": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", @@ -3523,6 +4363,41 @@ "url": "https://opencollective.com/express" } }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true + }, "node_modules/follow-redirects": { "version": "1.15.11", "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", @@ -3666,6 +4541,18 @@ "node": ">= 6" } }, + "node_modules/globals": { + "version": "17.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-17.0.0.tgz", + "integrity": "sha512-gv5BeD2EssA793rlFWVPMMCqefTlpusw6/2TbAVMy0FzcG8wKJn4O+NqJ4+XWmmwrayJgw5TzrmWjFgmz1XPqw==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/gopd": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", @@ -3825,6 +4712,15 @@ "postcss": "^8.1.0" } }, + "node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, "node_modules/import-cwd": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/import-cwd/-/import-cwd-3.0.0.tgz", @@ -3836,6 +4732,31 @@ "node": ">=8" } }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-fresh/node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, "node_modules/import-from": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/import-from/-/import-from-3.0.0.tgz", @@ -3847,6 +4768,15 @@ "node": ">=8" } }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, "node_modules/indent-string": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", @@ -4008,6 +4938,12 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, "node_modules/istanbul-lib-coverage": { "version": "3.2.2", "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", @@ -4145,6 +5081,33 @@ } } }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, "node_modules/kleur": { "version": "4.1.5", "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", @@ -4153,6 +5116,25 @@ "node": ">=6" } }, + "node_modules/known-css-properties": { + "version": "0.37.0", + "resolved": "https://registry.npmjs.org/known-css-properties/-/known-css-properties-0.37.0.tgz", + "integrity": "sha512-JCDrsP4Z1Sb9JwG0aJ8Eo2r7k4Ou5MwmThS/6lcIe1ICyb7UBJKGRIUUdqc2ASdE/42lgz6zFUnzAIhtXnBVrQ==", + "dev": true + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/lightningcss": { "version": "1.30.2", "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.30.2.tgz", @@ -4453,6 +5435,21 @@ "resolved": "https://registry.npmjs.org/locate-character/-/locate-character-3.0.0.tgz", "integrity": "sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==" }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/lodash": { "version": "4.17.21", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", @@ -4469,6 +5466,12 @@ "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, "node_modules/lodash.uniq": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", @@ -4620,6 +5623,21 @@ "mini-svg-data-uri": "cli.js" } }, + "node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/mri": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz", @@ -4659,6 +5677,12 @@ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, "node_modules/negotiator": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", @@ -4806,6 +5830,23 @@ "opener": "bin/opener-bin.js" } }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/opts": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/opts/-/opts-2.0.2.tgz", @@ -4819,6 +5860,36 @@ "node": ">=4" } }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/p-queue": { "version": "6.6.2", "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", @@ -4845,6 +5916,18 @@ "node": ">=8" } }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/parse5": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/parse5/-/parse5-8.0.0.tgz", @@ -4878,6 +5961,24 @@ "node": ">= 0.8" } }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, "node_modules/path-parse": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", @@ -5496,6 +6597,58 @@ "postcss": "^8.2.15" } }, + "node_modules/postcss-safe-parser": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-7.0.1.tgz", + "integrity": "sha512-0AioNCJZ2DPYz5ABT6bddIqlhgwhpHZ/l65YAYo0BCIn0xiDpsnTHz0gnoTGk0OXZW0JRs+cDwL8u/teRdz+8A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss-safe-parser" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-scss": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/postcss-scss/-/postcss-scss-4.0.9.tgz", + "integrity": "sha512-AjKOeiwAitL/MXxQW2DliT28EKukvvbEWx3LBmJIRN8KfBGZbRTxNYW0kSqi1COiTZ57nZ9NW06S6ux//N1c9A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss-scss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.4.29" + } + }, "node_modules/postcss-selector-parser": { "version": "6.1.2", "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", @@ -5554,6 +6707,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/pretty-format": { "version": "27.5.1", "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", @@ -6071,6 +7233,27 @@ "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", "dev": true }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, "node_modules/side-channel": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", @@ -6257,6 +7440,18 @@ "node": ">=8" } }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/style-inject": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/style-inject/-/style-inject-0.3.0.tgz", @@ -6329,6 +7524,111 @@ "node": ">=18" } }, + "node_modules/svelte-check": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/svelte-check/-/svelte-check-4.3.5.tgz", + "integrity": "sha512-e4VWZETyXaKGhpkxOXP+B/d0Fp/zKViZoJmneZWe/05Y2aqSKj3YN2nLfYPJBQ87WEiY4BQCQ9hWGu9mPT1a1Q==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.25", + "chokidar": "^4.0.1", + "fdir": "^6.2.0", + "picocolors": "^1.0.0", + "sade": "^1.7.4" + }, + "bin": { + "svelte-check": "bin/svelte-check" + }, + "engines": { + "node": ">= 18.0.0" + }, + "peerDependencies": { + "svelte": "^4.0.0 || ^5.0.0-next.0", + "typescript": ">=5.0.0" + } + }, + "node_modules/svelte-check/node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "dev": true, + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/svelte-check/node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "dev": true, + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/svelte-eslint-parser": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/svelte-eslint-parser/-/svelte-eslint-parser-1.4.1.tgz", + "integrity": "sha512-1eqkfQ93goAhjAXxZiu1SaKI9+0/sxp4JIWQwUpsz7ybehRE5L8dNuz7Iry7K22R47p5/+s9EM+38nHV2OlgXA==", + "dev": true, + "dependencies": { + "eslint-scope": "^8.2.0", + "eslint-visitor-keys": "^4.0.0", + "espree": "^10.0.0", + "postcss": "^8.4.49", + "postcss-scss": "^4.0.9", + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0", + "pnpm": "10.24.0" + }, + "funding": { + "url": "https://github.com/sponsors/ota-meshi" + }, + "peerDependencies": { + "svelte": "^3.37.0 || ^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "svelte": { + "optional": true + } + } + }, + "node_modules/svelte-eslint-parser/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/svelte-eslint-parser/node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "dev": true, + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/svelte-preprocess": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/svelte-preprocess/-/svelte-preprocess-6.0.3.tgz", @@ -6593,12 +7893,36 @@ "node": ">=20" } }, + "node_modules/ts-api-utils": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.4.0.tgz", + "integrity": "sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==", + "dev": true, + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "dev": true }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/type-is": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz", @@ -6664,6 +7988,15 @@ "browserslist": ">= 4.21.0" } }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -6891,6 +8224,21 @@ "node": ">=20" } }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/why-is-node-running": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", @@ -6907,6 +8255,15 @@ "node": ">=8" } }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", @@ -6981,6 +8338,18 @@ "url": "https://github.com/sponsors/eemeli" } }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/zimmerframe": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/zimmerframe/-/zimmerframe-1.1.4.tgz", diff --git a/frontend/package.json b/frontend/package.json index 8e3940f5..d730b45f 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -8,6 +8,8 @@ "dev": "npx rollup -c -w", "start": "sirv public --single --no-clear --dev --host", "generate:api": "openapi-ts", + "lint": "eslint src --ext .ts,.svelte", + "check": "svelte-check --tsconfig ./tsconfig.json", "test": "vitest run", "test:watch": "vitest", "test:ui": "vitest --ui", @@ -54,6 +56,7 @@ }, "devDependencies": { "@babel/runtime": "^7.24.7", + "@eslint/js": "^9.39.2", "@hey-api/openapi-ts": "0.90.1", "@playwright/test": "^1.52.0", "@rollup/plugin-alias": "^6.0.0", @@ -64,11 +67,18 @@ "@testing-library/jest-dom": "^6.6.3", "@testing-library/svelte": "^5.3.1", "@testing-library/user-event": "^14.6.1", + "@typescript-eslint/eslint-plugin": "^8.53.0", + "@typescript-eslint/parser": "^8.53.0", "@vitest/coverage-v8": "^4.0.16", + "eslint": "^9.39.2", + "eslint-plugin-svelte": "^3.14.0", "express": "^5.2.1", + "globals": "^17.0.0", "http-proxy": "^1.18.1", "jsdom": "^27.4.0", "rollup-plugin-serve": "^3.0.0", + "svelte-check": "^4.3.5", + "svelte-eslint-parser": "^1.4.1", "tailwindcss": "^4.1.13", "tslib": "^2.8.1", "typescript": "^5.7.2", diff --git a/frontend/playwright.config.ts b/frontend/playwright.config.ts index 4a60cea7..0a4b9303 100644 --- a/frontend/playwright.config.ts +++ b/frontend/playwright.config.ts @@ -10,11 +10,11 @@ export default defineConfig({ expect: { timeout: 3000, // 3s for assertions }, - reporter: process.env.CI ? [['html'], ['github']] : 'html', + reporter: process.env.CI ? [['list'], ['html'], ['github']] : 'list', use: { baseURL: 'https://localhost:5001', ignoreHTTPSErrors: true, - trace: 'on', + trace: 'retain-on-failure', screenshot: 'only-on-failure', }, projects: [ @@ -23,7 +23,6 @@ export default defineConfig({ use: { ...devices['Desktop Chrome'] }, }, ], - // In CI, frontend runs via docker-compose; locally, start dev server if needed webServer: process.env.CI ? undefined : { command: 'npm run dev', url: 'https://localhost:5001', diff --git a/frontend/rollup.config.js b/frontend/rollup.config.js index 818837e3..d94ab9d4 100644 --- a/frontend/rollup.config.js +++ b/frontend/rollup.config.js @@ -53,8 +53,6 @@ function startServer() { const proxyAgent = new https.Agent({ ca: fs.readFileSync(caPath), rejectUnauthorized: false, // Accept self-signed certificates in development - keepAlive: true, // Reuse connections to avoid TLS handshake per request - keepAliveMsecs: 1000 }); server = https.createServer(httpsOptions, (req, res) => { @@ -78,15 +76,6 @@ function startServer() { proxyRes.pipe(res, { end: true }); }); - // Socket timeout prevents hanging when backend is unreachable - proxyReq.on('socket', (socket) => { - socket.setTimeout(2000); - socket.on('timeout', () => { - console.error('Proxy socket timeout - backend unreachable'); - proxyReq.destroy(new Error('Socket timeout')); - }); - }); - proxyReq.on('error', (e) => { console.error(`Proxy request error: ${e.message}`); if (!res.headersSent) { diff --git a/frontend/src/App.svelte b/frontend/src/App.svelte index 956d912b..0e0372a3 100644 --- a/frontend/src/App.svelte +++ b/frontend/src/App.svelte @@ -1,6 +1,6 @@ - {#if event} + {#if event && eventData}

Basic Information

@@ -25,32 +45,32 @@ Event ID - {event.event.event_id} + {eventData.event_id} Event Type
- - + + - - {event.event.event_type} + + {eventData.event_type}
Timestamp - {formatTimestamp(event.event.timestamp)} + {formatTimestamp(eventData.timestamp)} Correlation ID - {event.event.correlation_id} + {eventData.correlation_id} Aggregate ID - {event.event.aggregate_id || '-'} + {eventData.aggregate_id || '-'} @@ -58,19 +78,19 @@

Metadata

-
{JSON.stringify(event.event.metadata, null, 2)}
+
{JSON.stringify(eventData.metadata, null, 2)}

Payload

-
{JSON.stringify(event.event.payload, null, 2)}
+
{JSON.stringify(eventData.payload, null, 2)}
- {#if event.related_events && event.related_events.length > 0} + {#if relatedEvents.length > 0}

Related Events

- {#each event.related_events as related} + {#each relatedEvents as related} diff --git a/frontend/src/components/admin/events/UserOverviewModal.svelte b/frontend/src/components/admin/events/UserOverviewModal.svelte index ad66f3c6..2fc90a72 100644 --- a/frontend/src/components/admin/events/UserOverviewModal.svelte +++ b/frontend/src/components/admin/events/UserOverviewModal.svelte @@ -1,10 +1,16 @@ @@ -75,11 +84,11 @@
- {#if overview.recent_events && overview.recent_events.length > 0} + {#if recentEvents.length > 0}

Recent Execution Events

- {#each overview.recent_events as ev} + {#each recentEvents as ev}
{getEventTypeLabel(ev.event_type) || ev.event_type} diff --git a/frontend/src/components/admin/sagas/SagaDetailsModal.svelte b/frontend/src/components/admin/sagas/SagaDetailsModal.svelte index a5f6dfd9..59bcd920 100644 --- a/frontend/src/components/admin/sagas/SagaDetailsModal.svelte +++ b/frontend/src/components/admin/sagas/SagaDetailsModal.svelte @@ -182,14 +182,6 @@
{/if} - {#if saga.context_data && Object.keys(saga.context_data).length > 0} -
-

Context Data

-
-
{JSON.stringify(saga.context_data, null, 2)}
-
-
- {/if} {/if} {/snippet} diff --git a/frontend/src/components/admin/users/RateLimitsModal.svelte b/frontend/src/components/admin/users/RateLimitsModal.svelte index ae056eb8..72cebf75 100644 --- a/frontend/src/components/admin/users/RateLimitsModal.svelte +++ b/frontend/src/components/admin/users/RateLimitsModal.svelte @@ -189,9 +189,9 @@ disabled={config.bypass_rate_limit} /> s - {#if config.global_multiplier !== 1.0} + {#if config.global_multiplier && config.global_multiplier !== 1.0} - (→ {Math.floor(rule.requests * config.global_multiplier)}/{rule.window_seconds}s) + (→ {Math.floor(rule.requests * (config.global_multiplier ?? 1))}/{rule.window_seconds}s) {/if}
diff --git a/frontend/src/components/editor/SavedScripts.svelte b/frontend/src/components/editor/SavedScripts.svelte index b9dfc971..c4a3e165 100644 --- a/frontend/src/components/editor/SavedScripts.svelte +++ b/frontend/src/components/editor/SavedScripts.svelte @@ -5,6 +5,7 @@ interface SavedScript { id: string; name: string; + script: string; lang?: string; lang_version?: string; } diff --git a/frontend/src/lib/admin/constants.ts b/frontend/src/lib/admin/constants.ts index ae15e73e..69470d92 100644 --- a/frontend/src/lib/admin/constants.ts +++ b/frontend/src/lib/admin/constants.ts @@ -2,6 +2,17 @@ * Shared constants for admin pages */ +// Admin route definitions - single source of truth for sidebar and tests +export const ADMIN_ROUTES = [ + { path: '/admin/events', sidebarLabel: 'Event Browser', pageHeading: 'Event Browser' }, + { path: '/admin/sagas', sidebarLabel: 'Sagas', pageHeading: 'Saga Management' }, + { path: '/admin/users', sidebarLabel: 'Users', pageHeading: 'User Management' }, + { path: '/admin/settings', sidebarLabel: 'Settings', pageHeading: 'System Settings' }, +] as const; + +export type AdminRoute = (typeof ADMIN_ROUTES)[number]; +export type AdminPath = AdminRoute['path']; + // Common badge/status color classes export const STATUS_COLORS = { success: 'badge-success', diff --git a/frontend/src/lib/admin/users/rateLimits.ts b/frontend/src/lib/admin/users/rateLimits.ts index 2dcb23f3..7b94cbdf 100644 --- a/frontend/src/lib/admin/users/rateLimits.ts +++ b/frontend/src/lib/admin/users/rateLimits.ts @@ -30,7 +30,7 @@ export const ENDPOINT_GROUP_PATTERNS: Array<{ pattern: RegExp; group: EndpointGr export function detectGroupFromEndpoint(endpoint: string): EndpointGroup { // Strip regex anchors: leading ^, trailing $, and .* wildcards - const cleanEndpoint = endpoint.replace(/^\^/, '').replace(/\$$/, '').replaceAll('.*', ''); + const cleanEndpoint = endpoint.replace(/^\^/, '').replace(/\$$/, '').replace(/\.\*/g, ''); for (const { pattern, group } of ENDPOINT_GROUP_PATTERNS) { if (pattern.test(cleanEndpoint)) return group; } diff --git a/frontend/src/routes/Editor.svelte b/frontend/src/routes/Editor.svelte index 27ea05dd..de6fafac 100644 --- a/frontend/src/routes/Editor.svelte +++ b/frontend/src/routes/Editor.svelte @@ -117,10 +117,10 @@ await verifyAuth(); - unsubscribeSettings = editorSettingsStore.subscribe(s => editorSettings = s); + unsubscribeSettings = editorSettingsStore.subscribe(s => editorSettings = { ...editorSettings, ...s }); unsubscribeAuth = isAuthenticated.subscribe(async authStatus => { const wasAuthenticated = authenticated; - authenticated = authStatus; + authenticated = authStatus ?? false; if (!wasAuthenticated && authenticated) await loadSavedScripts(); else if (wasAuthenticated && !authenticated) { savedScripts = []; @@ -133,7 +133,7 @@ if (limitsError) { addToast('Failed to load runtime configuration. Execution disabled.', 'error'); } else { - k8sLimits = limitsData; + k8sLimits = limitsData ?? null; supportedRuntimes = k8sLimits?.supported_runtimes || {}; const lang = get(selectedLang); const ver = get(selectedVersion); @@ -163,7 +163,7 @@ async function loadSavedScripts() { if (!authenticated) return; const data = unwrapOr(await listSavedScriptsApiV1ScriptsGet({}), null); - savedScripts = (data || []).map((s, i) => ({ ...s, id: s.id || s._id || `temp_${i}_${Date.now()}` })); + savedScripts = (data || []).map((s, i) => ({ ...s, id: s.script_id || `temp_${i}_${Date.now()}` })); } function loadScript(s: SavedScript) { @@ -192,7 +192,7 @@ if (response?.status === 404) { currentScriptId.set(null); const data = unwrap(await createSavedScriptApiV1ScriptsPost({ body })); - currentScriptId.set(data.id); + currentScriptId.set(data.script_id); addToast('Script saved successfully.', 'success'); } return; @@ -200,7 +200,7 @@ addToast('Script updated successfully.', 'success'); } else { const data = unwrap(await createSavedScriptApiV1ScriptsPost({ body })); - currentScriptId.set(data.id); + currentScriptId.set(data.script_id); addToast('Script saved successfully.', 'success'); } await loadSavedScripts(); diff --git a/frontend/src/routes/Login.svelte b/frontend/src/routes/Login.svelte index c1a35fda..d1790205 100644 --- a/frontend/src/routes/Login.svelte +++ b/frontend/src/routes/Login.svelte @@ -43,8 +43,9 @@ } else { goto("/editor"); // Default redirect to editor } - } catch (err) { - error = err.message || "Login failed. Please check your credentials."; + } catch (err: unknown) { + const message = err instanceof Error ? err.message : "Login failed. Please check your credentials."; + error = message; addToast(error, "error"); } finally { loading = false; diff --git a/frontend/src/routes/Register.svelte b/frontend/src/routes/Register.svelte index 4c0ab87f..e324e562 100644 --- a/frontend/src/routes/Register.svelte +++ b/frontend/src/routes/Register.svelte @@ -37,8 +37,9 @@ if (apiError) throw apiError; addToast("Registration successful! Please log in.", "success"); goto("/login"); - } catch (err) { - error = err?.detail || err?.message || "Registration failed. Please try again."; + } catch (err: unknown) { + const errObj = err as { detail?: string; message?: string } | null; + error = errObj?.detail || errObj?.message || "Registration failed. Please try again."; addToast(error, "error"); } finally { loading = false; diff --git a/frontend/src/routes/Settings.svelte b/frontend/src/routes/Settings.svelte index e5206a81..fbf1e8c4 100644 --- a/frontend/src/routes/Settings.svelte +++ b/frontend/src/routes/Settings.svelte @@ -68,25 +68,25 @@ { value: 'github', label: 'GitHub' } ]; - onMount(async () => { + onMount(() => { // First verify if user is authenticated if (!get(isAuthenticated)) { return; } - - await loadSettings(); - + + loadSettings(); + // Add click outside handler for dropdowns - const handleClickOutside = (event) => { - const target = event.target; - if (!target.closest('.dropdown-container')) { + const handleClickOutside = (event: MouseEvent) => { + const target = event.target as Element | null; + if (!target?.closest('.dropdown-container')) { showThemeDropdown = false; showEditorThemeDropdown = false; } }; - + document.addEventListener('click', handleClickOutside); - + return () => { document.removeEventListener('click', handleClickOutside); }; @@ -98,26 +98,28 @@ const { data, error } = await getUserSettingsApiV1UserSettingsGet({}); if (error) throw error; - setUserSettings(data); + setUserSettings(data ?? null); - formData = { - theme: data.theme || 'auto', - notifications: { - execution_completed: data.notifications?.execution_completed ?? true, - execution_failed: data.notifications?.execution_failed ?? true, - system_updates: data.notifications?.system_updates ?? true, - security_alerts: data.notifications?.security_alerts ?? true, - channels: [...(data.notifications?.channels || ['in_app'])] - }, - editor: { - theme: data.editor?.theme || 'auto', - font_size: data.editor?.font_size || 14, - tab_size: data.editor?.tab_size || 4, - use_tabs: data.editor?.use_tabs ?? false, - word_wrap: data.editor?.word_wrap ?? true, - show_line_numbers: data.editor?.show_line_numbers ?? true, - } - }; + if (data) { + formData = { + theme: data.theme || 'auto', + notifications: { + execution_completed: data.notifications?.execution_completed ?? true, + execution_failed: data.notifications?.execution_failed ?? true, + system_updates: data.notifications?.system_updates ?? true, + security_alerts: data.notifications?.security_alerts ?? true, + channels: [...(data.notifications?.channels || ['in_app'])] + }, + editor: { + theme: data.editor?.theme || 'auto', + font_size: data.editor?.font_size || 14, + tab_size: data.editor?.tab_size || 4, + use_tabs: data.editor?.use_tabs ?? false, + word_wrap: data.editor?.word_wrap ?? true, + show_line_numbers: data.editor?.show_line_numbers ?? true, + } + }; + } savedSnapshot = JSON.stringify(formData); } catch (err) { console.error('Failed to load settings:', err); @@ -184,10 +186,10 @@ } // Cache for history data - let historyCache = null; + let historyCache: typeof history | null = null; let historyCacheTime = 0; const HISTORY_CACHE_DURATION = 30000; // Cache for 30 seconds - + async function loadHistory() { showHistory = true; @@ -210,7 +212,7 @@ displayField: item.field, isRestore: item.reason?.includes('restore') })) - .sort((a, b) => new Date(b.timestamp) - new Date(a.timestamp)); + .sort((a, b) => new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime()); historyCache = history; historyCacheTime = Date.now(); @@ -335,7 +337,7 @@ formData.theme = theme.value; showThemeDropdown = false; if (theme.value) { - setTheme(theme.value); + setTheme(theme.value as 'light' | 'dark' | 'auto'); } }} class:selected={formData.theme === theme.value} diff --git a/frontend/src/routes/admin/AdminEvents.svelte b/frontend/src/routes/admin/AdminEvents.svelte index 3fe75cfc..24fa5cae 100644 --- a/frontend/src/routes/admin/AdminEvents.svelte +++ b/frontend/src/routes/admin/AdminEvents.svelte @@ -105,7 +105,7 @@ } }), null); loading = false; - events = data?.events || []; + events = (data?.events ?? []) as EventResponse[]; totalEvents = data?.total || 0; } @@ -132,7 +132,7 @@ if (status.status === 'completed') { addToast(`Replay completed! Processed ${status.replayed_events} events successfully.`, 'success'); } else if (status.status === 'failed') { - addToast(`Replay failed: ${status.error || 'Unknown error'}`, 'error'); + addToast(`Replay failed: ${(status as { errors?: string[] }).errors?.[0] || 'Unknown error'}`, 'error'); } } } @@ -148,23 +148,28 @@ if (dryRun) { if (response?.events_preview && response.events_preview.length > 0) { - replayPreview = { ...response, eventId }; + replayPreview = { eventId, total_events: response.total_events, events_preview: (response.events_preview ?? []) as EventResponse[] }; showReplayPreview = true; } else { addToast(`Dry run: ${response?.total_events} events would be replayed`, 'info'); } } else { addToast(`Replay scheduled! Tracking progress...`, 'success'); - if (response?.session_id) { + const sessionId = response?.session_id; + if (sessionId) { activeReplaySession = { - session_id: response.session_id, + session_id: sessionId, status: 'scheduled', total_events: response.total_events, replayed_events: 0, - progress_percentage: 0 + progress_percentage: 0, + failed_events: 0, + skipped_events: 0, + correlation_id: '', + created_at: new Date().toISOString() }; - checkReplayStatus(response.session_id); - replayCheckInterval = setInterval(() => { checkReplayStatus(response.session_id); }, 2000); + checkReplayStatus(sessionId); + replayCheckInterval = setInterval(() => { checkReplayStatus(sessionId); }, 2000); } selectedEvent = null; } diff --git a/frontend/src/routes/admin/AdminLayout.svelte b/frontend/src/routes/admin/AdminLayout.svelte index 7dde4b78..8f4820b8 100644 --- a/frontend/src/routes/admin/AdminLayout.svelte +++ b/frontend/src/routes/admin/AdminLayout.svelte @@ -7,20 +7,13 @@ import Spinner from '$components/Spinner.svelte'; import type { Snippet } from 'svelte'; import { ShieldCheck } from '@lucide/svelte'; + import { ADMIN_ROUTES } from '$lib/admin/constants'; let { path = '', children }: { path?: string; children?: Snippet } = $props(); let user = $state<{ username: string; role: string } | null>(null); let loading = $state(true); - - const menuItems = [ - { href: '/admin/events', label: 'Event Browser' }, - { href: '/admin/sagas', label: 'Sagas' }, - { href: '/admin/users', label: 'Users' }, - { href: '/admin/settings', label: 'Settings' }, - ]; - onMount(async () => { // First verify authentication with the backend try { @@ -84,19 +77,19 @@
diff --git a/frontend/src/routes/admin/AdminSagas.svelte b/frontend/src/routes/admin/AdminSagas.svelte index 5a841c79..9405fbea 100644 --- a/frontend/src/routes/admin/AdminSagas.svelte +++ b/frontend/src/routes/admin/AdminSagas.svelte @@ -49,7 +49,7 @@ query: { state: stateFilter || undefined, limit: pageSize, - offset: (currentPage - 1) * pageSize + skip: (currentPage - 1) * pageSize } }), null); loading = false; diff --git a/frontend/src/routes/admin/AdminSettings.svelte b/frontend/src/routes/admin/AdminSettings.svelte index e4981ff9..fc1f47ff 100644 --- a/frontend/src/routes/admin/AdminSettings.svelte +++ b/frontend/src/routes/admin/AdminSettings.svelte @@ -4,16 +4,23 @@ getSystemSettingsApiV1AdminSettingsGet, updateSystemSettingsApiV1AdminSettingsPut, resetSystemSettingsApiV1AdminSettingsResetPost, + type SystemSettings, + type ExecutionLimitsSchema, + type SecuritySettingsSchema, + type MonitoringSettingsSchema, } from '$lib/api'; import { addToast } from '$stores/toastStore'; import AdminLayout from '$routes/admin/AdminLayout.svelte'; import Spinner from '$components/Spinner.svelte'; - let settings = $state<{ - execution_limits: Record; - security_settings: Record; - monitoring_settings: Record; - }>({ + // Required version of SystemSettings for local state + interface RequiredSettings { + execution_limits: ExecutionLimitsSchema; + security_settings: SecuritySettingsSchema; + monitoring_settings: MonitoringSettingsSchema; + } + + let settings = $state({ execution_limits: {}, security_settings: {}, monitoring_settings: {} @@ -31,7 +38,11 @@ try { const { data, error } = await getSystemSettingsApiV1AdminSettingsGet({}); if (error) throw error; - settings = data; + if (data) settings = { + execution_limits: data.execution_limits ?? {}, + security_settings: data.security_settings ?? {}, + monitoring_settings: data.monitoring_settings ?? {} + }; } catch (err) { console.error('Failed to load settings:', err); const msg = (err as Error)?.message || 'Unknown error'; @@ -64,7 +75,11 @@ try { const { data, error } = await resetSystemSettingsApiV1AdminSettingsResetPost({}); if (error) throw error; - settings = data; + if (data) settings = { + execution_limits: data.execution_limits ?? {}, + security_settings: data.security_settings ?? {}, + monitoring_settings: data.monitoring_settings ?? {} + }; addToast('Settings reset to defaults', 'success'); } catch (err) { const msg = (err as Error)?.message || 'Unknown error'; diff --git a/frontend/src/routes/admin/AdminUsers.svelte b/frontend/src/routes/admin/AdminUsers.svelte index 71a0ab75..ac5111f6 100644 --- a/frontend/src/routes/admin/AdminUsers.svelte +++ b/frontend/src/routes/admin/AdminUsers.svelte @@ -116,8 +116,8 @@ ); } if (role !== 'all') filtered = filtered.filter(user => user.role === role); - if (status === 'active') filtered = filtered.filter(user => !user.is_disabled); - else if (status === 'disabled') filtered = filtered.filter(user => user.is_disabled); + if (status === 'active') filtered = filtered.filter(user => user.is_active !== false); + else if (status === 'disabled') filtered = filtered.filter(user => user.is_active === false); if (advanced.bypassRateLimit === 'yes') filtered = filtered.filter(user => user.bypass_rate_limit === true); else if (advanced.bypassRateLimit === 'no') filtered = filtered.filter(user => user.bypass_rate_limit !== true); if (advanced.hasCustomLimits === 'yes') filtered = filtered.filter(user => user.has_custom_limits === true); @@ -136,7 +136,7 @@ function openEditUserModal(user: UserResponse): void { editingUser = user; - userForm = { username: user.username, email: user.email || '', password: '', role: user.role, is_active: !user.is_disabled }; + userForm = { username: user.username, email: user.email || '', password: '', role: user.role ?? 'user', is_active: user.is_active !== false }; showUserModal = true; } @@ -153,7 +153,7 @@ result = await updateUserApiV1AdminUsersUserIdPut({ path: { user_id: editingUser.user_id }, body: updateData }); } else { result = await createUserApiV1AdminUsersPost({ - body: { username: userForm.username, email: userForm.email || null, password: userForm.password, role: userForm.role, is_active: userForm.is_active } + body: { username: userForm.username, email: userForm.email, password: userForm.password, role: userForm.role as 'user' | 'admin' | undefined, is_active: userForm.is_active } }); } savingUser = false; @@ -186,7 +186,7 @@ path: { user_id: user.user_id } }); const response = unwrap(result); - rateLimitConfig = response?.rate_limit_config || { + rateLimitConfig = (response?.rate_limit_config as UserRateLimit | undefined) || { user_id: user.user_id, rules: [], global_multiplier: 1.0, bypass_rate_limit: false, notes: '' }; rateLimitUsage = response?.current_usage || {}; diff --git a/frontend/src/routes/admin/__tests__/AdminSagas.test.ts b/frontend/src/routes/admin/__tests__/AdminSagas.test.ts index fc620370..a74f2a43 100644 --- a/frontend/src/routes/admin/__tests__/AdminSagas.test.ts +++ b/frontend/src/routes/admin/__tests__/AdminSagas.test.ts @@ -324,23 +324,6 @@ describe('AdminSagas', () => { }); }); - it('shows context data when available', async () => { - const user = userEvent.setup({ advanceTimers: vi.advanceTimersByTime }); - const saga = createMockSaga({ - context_data: { user_id: 'user-123', language: 'python' }, - }); - mocks.getSagaStatusApiV1SagasSagaIdGet.mockResolvedValue({ data: saga, error: null }); - await renderWithSagas([saga]); - - const viewButtons = screen.getAllByText(/view details/i); - await user.click(viewButtons[0]); - - await waitFor(() => { - expect(screen.getByText(/context data/i)).toBeInTheDocument(); - expect(screen.getByText(/user-123/)).toBeInTheDocument(); - }); - }); - it('closes modal on close button click', async () => { const user = userEvent.setup({ advanceTimers: vi.advanceTimersByTime }); const saga = createMockSaga(); diff --git a/helm/integr8scode/templates/secrets/env-secret.yaml b/helm/integr8scode/templates/secrets/env-secret.yaml index 0176d3c8..c331f0b5 100644 --- a/helm/integr8scode/templates/secrets/env-secret.yaml +++ b/helm/integr8scode/templates/secrets/env-secret.yaml @@ -54,7 +54,6 @@ stringData: DLQ_RETENTION_DAYS: {{ .Values.env.DLQ_RETENTION_DAYS | default "7" | quote }} # Rate limiting - RATE_LIMIT_ENABLED: {{ .Values.env.RATE_LIMIT_ENABLED | default "true" | quote }} RATE_LIMIT_DEFAULT_REQUESTS: {{ .Values.env.RATE_LIMIT_DEFAULT_REQUESTS | default "100" | quote }} RATE_LIMIT_DEFAULT_WINDOW: {{ .Values.env.RATE_LIMIT_DEFAULT_WINDOW | default "60" | quote }} diff --git a/helm/integr8scode/values.yaml b/helm/integr8scode/values.yaml index 40d105f5..b697243c 100644 --- a/helm/integr8scode/values.yaml +++ b/helm/integr8scode/values.yaml @@ -92,7 +92,6 @@ env: DLQ_RETENTION_DAYS: "7" # Rate limiting - RATE_LIMIT_ENABLED: "true" RATE_LIMIT_DEFAULT_REQUESTS: "100" RATE_LIMIT_DEFAULT_WINDOW: "60" @@ -272,7 +271,7 @@ infrastructure: # Zookeeper zookeeper: enabled: true - image: confluentinc/cp-zookeeper:7.5.0 + image: confluentinc/cp-zookeeper:7.8.2 heapOpts: "-Xms256M -Xmx256M" resources: requests: @@ -287,7 +286,7 @@ infrastructure: # Kafka kafka: enabled: true - image: confluentinc/cp-kafka:7.5.0 + image: confluentinc/cp-kafka:7.8.2 heapOpts: "-Xms256M -Xmx256M" autoCreateTopics: "true" # JAAS authentication credentials for Zookeeper communication @@ -308,7 +307,7 @@ infrastructure: # Schema Registry schemaRegistry: enabled: true - image: confluentinc/cp-schema-registry:7.5.0 + image: confluentinc/cp-schema-registry:7.8.2 heapOpts: "-Xms256M -Xmx256M" resources: requests: