diff --git a/.github/workflows/archived/accessibility.yml b/.github/workflows/archived/accessibility.yml
deleted file mode 100644
index c1ecb60..0000000
--- a/.github/workflows/archived/accessibility.yml
+++ /dev/null
@@ -1,1824 +0,0 @@
-name: Accessibility Testing
-
-# Comprehensive accessibility testing with improved error handling and dynamic port allocation
-# Key improvements:
-# - Dynamic port allocation to prevent conflicts
-# - Proper error handling without masking failures
-# - More robust configuration management
-# - Better artifact collection and reporting
-
-on:
- workflow_dispatch:
- inputs:
- test_suite:
- description: "Which test suite to run"
- required: false
- default: "all"
- type: choice
- options:
- - all
- - lighthouse
- - axe-core
- - wave
- - color-contrast
- - keyboard
- pull_request:
- branches: [main, develop]
- push:
- branches: [main]
- schedule:
- # Run accessibility tests daily at 3 AM UTC
- - cron: "0 3 * * *"
-
-env:
- NODE_VERSION: "18"
- FRONTEND_TIMEOUT: "180" # 3 minutes timeout for frontend startup
-
-jobs:
- # Lighthouse Accessibility Audit
- lighthouse-a11y:
- name: Lighthouse Accessibility
- runs-on: ubuntu-latest
- if: github.event.inputs.test_suite == 'all' || github.event.inputs.test_suite == 'lighthouse' || github.event.inputs.test_suite == ''
- outputs:
- port: ${{ steps.setup.outputs.port }}
- lighthouse_score: ${{ steps.lighthouse.outputs.score }}
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
- cache: "npm"
-
- - name: Install dependencies
- run: |
- npm ci
- npm ci --workspace=frontend
-
- - name: Find available port and setup
- id: setup
- run: |
- # Find an available port starting from 3200
- for port in {3200..3299}; do
- if ! lsof -i:$port > /dev/null 2>&1; then
- echo "port=$port" >> $GITHUB_OUTPUT
- echo "Using port $port for Lighthouse tests"
- break
- fi
- done
-
- - name: Build frontend (with TypeScript bypass for accessibility testing)
- working-directory: ./frontend
- run: |
- # Create a temporary vite config that bypasses TypeScript errors for accessibility testing
- cat > vite.config.accessibility.ts << 'EOF'
- import { defineConfig } from 'vite';
- import react from '@vitejs/plugin-react';
- import { resolve } from 'path';
-
- export default defineConfig({
- plugins: [react()],
- resolve: {
- alias: {
- '@': resolve(__dirname, 'src'),
- '@components': resolve(__dirname, 'src/components'),
- '@pages': resolve(__dirname, 'src/pages'),
- '@hooks': resolve(__dirname, 'src/hooks'),
- '@services': resolve(__dirname, 'src/services'),
- '@utils': resolve(__dirname, 'src/utils'),
- '@types': resolve(__dirname, 'src/types'),
- '@store': resolve(__dirname, 'src/store'),
- '@styles': resolve(__dirname, 'src/styles'),
- '@assets': resolve(__dirname, 'src/assets'),
- '@routes': resolve(__dirname, 'src/routes'),
- },
- },
- build: {
- outDir: 'dist',
- sourcemap: false,
- minify: false,
- target: 'es2015'
- },
- esbuild: {
- logOverride: { 'this-is-undefined-in-esm': 'silent' }
- }
- });
- EOF
-
- echo "đ¨ Building frontend for accessibility testing (bypassing TypeScript errors)..."
- # Build without TypeScript checking - focus on accessibility testing
- npx vite build --config vite.config.accessibility.ts || {
- echo "â ī¸ Vite build failed, creating minimal build directory for testing..."
- mkdir -p dist
- # Create a minimal index.html for accessibility testing
- cat > dist/index.html << 'HTML'
-
-
-
-
-
- ConnectKit - Loading...
-
-
-
-
-
ConnectKit
-
Application is loading...
-
-
- Welcome to ConnectKit
- This is a placeholder page for accessibility testing.
-
-
-
-
-
-
-
- HTML
-
- # Create login page
- mkdir -p dist/login
- cat > dist/login/index.html << 'HTML'
-
-
-
-
-
- Login - ConnectKit
-
-
-
-
-
- HTML
-
- # Create register page
- mkdir -p dist/register
- cat > dist/register/index.html << 'HTML'
-
-
-
-
-
- Register - ConnectKit
-
-
-
-
-
- HTML
-
- echo "â Created minimal HTML structure for accessibility testing"
- }
-
- echo "â Frontend build completed for accessibility testing"
-
- - name: Start frontend server
- working-directory: ./frontend
- run: |
- npm install -g serve
- serve -s dist -l ${{ steps.setup.outputs.port }} &
- echo "SERVER_PID=$!" >> $GITHUB_ENV
-
- - name: Wait for frontend server
- run: |
- timeout ${{ env.FRONTEND_TIMEOUT }} bash -c '
- until curl -f http://localhost:${{ steps.setup.outputs.port }} > /dev/null 2>&1; do
- echo "Waiting for frontend server on port ${{ steps.setup.outputs.port }}..."
- sleep 5
- done
- '
- echo "â
Frontend server is ready on port ${{ steps.setup.outputs.port }}"
-
- - name: Install Lighthouse CI
- run: npm install -g @lhci/cli@0.12.x
-
- - name: Run Lighthouse accessibility tests
- id: lighthouse
- run: |
- echo "Running Lighthouse tests on port ${{ steps.setup.outputs.port }}"
-
- # Create lighthouse configuration
- cat > lighthouserc.json << 'EOF'
- {
- "ci": {
- "collect": {
- "url": [
- "http://localhost:${{ steps.setup.outputs.port }}/",
- "http://localhost:${{ steps.setup.outputs.port }}/login",
- "http://localhost:${{ steps.setup.outputs.port }}/register"
- ],
- "settings": {
- "chromeFlags": "--no-sandbox --disable-dev-shm-usage",
- "onlyCategories": ["accessibility"]
- }
- },
- "assert": {
- "assertions": {
- "categories:accessibility": ["error", {"minScore": 0.9}]
- }
- },
- "upload": {
- "target": "filesystem",
- "outputDir": "./lighthouse-results"
- }
- }
- }
- EOF
-
- # Run lighthouse
- lhci collect --config=lighthouserc.json
- lhci assert --config=lighthouserc.json || echo "lighthouse_failed=true" >> $GITHUB_ENV
-
- # Extract accessibility score
- if [ -d "lighthouse-results" ]; then
- SCORE=$(find lighthouse-results -name "*.json" -exec jq -r '.categories.accessibility.score // 0' {} \; | head -1)
- echo "score=$SCORE" >> $GITHUB_OUTPUT
- echo "Accessibility Score: $SCORE"
- fi
-
- - name: Upload Lighthouse results
- uses: actions/upload-artifact@v4
- if: always()
- with:
- name: lighthouse-results-${{ github.run_number }}
- path: |
- lighthouse-results/
- lighthouserc.json
- retention-days: 7
-
- - name: Stop frontend server
- if: always()
- run: |
- if [ ! -z "$SERVER_PID" ]; then
- kill $SERVER_PID 2>/dev/null || true
- fi
- pkill -f "serve.*${{ steps.setup.outputs.port }}" || true
-
- # Axe-core Tests via Playwright
- axe-core-tests:
- name: Axe-core Tests
- runs-on: ubuntu-latest
- if: github.event.inputs.test_suite == 'all' || github.event.inputs.test_suite == 'axe-core' || github.event.inputs.test_suite == ''
- outputs:
- port: ${{ steps.setup.outputs.port }}
- axe_violations: ${{ steps.axe.outputs.violations }}
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
- cache: "npm"
-
- - name: Install dependencies
- run: |
- npm ci
- npm ci --workspace=frontend
-
- - name: Find available port and setup
- id: setup
- run: |
- # Find an available port starting from 3200
- for port in {3200..3299}; do
- if ! lsof -i:$port > /dev/null 2>&1; then
- echo "port=$port" >> $GITHUB_OUTPUT
- echo "Using port $port for Axe tests"
- break
- fi
- done
-
- - name: Install Playwright
- working-directory: ./frontend
- run: |
- npm install @playwright/test @axe-core/playwright
- npx playwright install chromium
-
- - name: Build frontend (with TypeScript bypass for accessibility testing)
- working-directory: ./frontend
- run: |
- # Create a temporary vite config that bypasses TypeScript errors for accessibility testing
- cat > vite.config.accessibility.ts << 'EOF'
- import { defineConfig } from 'vite';
- import react from '@vitejs/plugin-react';
- import { resolve } from 'path';
-
- export default defineConfig({
- plugins: [react()],
- resolve: {
- alias: {
- '@': resolve(__dirname, 'src'),
- '@components': resolve(__dirname, 'src/components'),
- '@pages': resolve(__dirname, 'src/pages'),
- '@hooks': resolve(__dirname, 'src/hooks'),
- '@services': resolve(__dirname, 'src/services'),
- '@utils': resolve(__dirname, 'src/utils'),
- '@types': resolve(__dirname, 'src/types'),
- '@store': resolve(__dirname, 'src/store'),
- '@styles': resolve(__dirname, 'src/styles'),
- '@assets': resolve(__dirname, 'src/assets'),
- '@routes': resolve(__dirname, 'src/routes'),
- },
- },
- build: {
- outDir: 'dist',
- sourcemap: false,
- minify: false,
- target: 'es2015'
- },
- esbuild: {
- logOverride: { 'this-is-undefined-in-esm': 'silent' }
- }
- });
- EOF
-
- echo "đ¨ Building frontend for accessibility testing (bypassing TypeScript errors)..."
- # Build without TypeScript checking - focus on accessibility testing
- npx vite build --config vite.config.accessibility.ts || {
- echo "â ī¸ Vite build failed, creating minimal build directory for testing..."
- mkdir -p dist
- # Create a minimal index.html for accessibility testing
- cat > dist/index.html << 'HTML'
-
-
-
-
-
- ConnectKit - Loading...
-
-
-
-
-
ConnectKit
-
Application is loading...
-
-
- Welcome to ConnectKit
- This is a placeholder page for accessibility testing.
-
-
-
-
-
-
-
- HTML
-
- # Create login page
- mkdir -p dist/login
- cat > dist/login/index.html << 'HTML'
-
-
-
-
-
- Login - ConnectKit
-
-
-
-
-
- HTML
-
- # Create register page
- mkdir -p dist/register
- cat > dist/register/index.html << 'HTML'
-
-
-
-
-
- Register - ConnectKit
-
-
-
-
-
- HTML
-
- echo "â Created minimal HTML structure for accessibility testing"
- }
-
- echo "â Frontend build completed for accessibility testing"
-
- - name: Start frontend server
- working-directory: ./frontend
- run: |
- npm install -g serve
- serve -s dist -l ${{ steps.setup.outputs.port }} &
- echo "SERVER_PID=$!" >> $GITHUB_ENV
-
- - name: Wait for frontend server
- run: |
- timeout ${{ env.FRONTEND_TIMEOUT }} bash -c '
- until curl -f http://localhost:${{ steps.setup.outputs.port }} > /dev/null 2>&1; do
- echo "Waiting for frontend server on port ${{ steps.setup.outputs.port }}..."
- sleep 5
- done
- '
- echo "â
Frontend server is ready on port ${{ steps.setup.outputs.port }}"
-
- - name: Create Axe accessibility test
- working-directory: ./frontend
- run: |
- mkdir -p tests/accessibility
- cat > tests/accessibility/axe.spec.ts << 'EOF'
- import { test, expect } from '@playwright/test';
- import AxeBuilder from '@axe-core/playwright';
-
- const BASE_URL = process.env.BASE_URL || 'http://localhost:${{ steps.setup.outputs.port }}';
-
- test.describe('Axe Accessibility Tests', () => {
- test('should not have accessibility violations on home page', async ({ page }) => {
- await page.goto(BASE_URL);
-
- const accessibilityScanResults = await new AxeBuilder({ page })
- .withTags(['wcag2a', 'wcag2aa', 'wcag21aa'])
- .analyze();
-
- expect(accessibilityScanResults.violations).toEqual([]);
- });
-
- test('should not have accessibility violations on login page', async ({ page }) => {
- await page.goto(`${BASE_URL}/login`);
-
- const accessibilityScanResults = await new AxeBuilder({ page })
- .withTags(['wcag2a', 'wcag2aa', 'wcag21aa'])
- .analyze();
-
- expect(accessibilityScanResults.violations).toEqual([]);
- });
-
- test('should not have accessibility violations on register page', async ({ page }) => {
- await page.goto(`${BASE_URL}/register`);
-
- const accessibilityScanResults = await new AxeBuilder({ page })
- .withTags(['wcag2a', 'wcag2aa', 'wcag21aa'])
- .analyze();
-
- expect(accessibilityScanResults.violations).toEqual([]);
- });
- });
- EOF
-
- - name: Run Axe accessibility tests
- id: axe
- working-directory: ./frontend
- env:
- BASE_URL: http://localhost:${{ steps.setup.outputs.port }}
- run: |
- echo "Running Axe tests against $BASE_URL"
-
- # Create playwright config for JSON output
- cat > playwright.config.axe.ts << 'EOF'
- import { defineConfig } from '@playwright/test';
-
- export default defineConfig({
- testDir: './tests/accessibility',
- fullyParallel: true,
- forbidOnly: !!process.env.CI,
- retries: process.env.CI ? 2 : 0,
- workers: process.env.CI ? 1 : undefined,
- reporter: [
- ['json', { outputFile: 'axe-results.json' }],
- ['html', { outputFolder: 'playwright-report' }]
- ],
- use: {
- baseURL: process.env.BASE_URL,
- trace: 'on-first-retry',
- },
- });
- EOF
-
- # Run with custom config
- npx playwright test tests/accessibility/axe.spec.ts --config=playwright.config.axe.ts || echo "axe_failed=true" >> $GITHUB_ENV
-
- # Extract violation count if results exist
- if [ -f "axe-results.json" ]; then
- VIOLATIONS=$(jq '[.suites[].specs[].tests[] | select(.results[].status == "failed")] | length' axe-results.json 2>/dev/null || echo "0")
- echo "violations=$VIOLATIONS" >> $GITHUB_OUTPUT
- echo "Axe violations found: $VIOLATIONS"
- else
- echo "violations=0" >> $GITHUB_OUTPUT
- echo "No axe-results.json found, assuming 0 violations"
- fi
-
- - name: Upload Axe results
- uses: actions/upload-artifact@v4
- if: always()
- with:
- name: axe-results-${{ github.run_number }}
- path: |
- frontend/axe-results.json
- frontend/test-results/
- frontend/playwright-report/
- retention-days: 7
-
- - name: Stop frontend server
- if: always()
- run: |
- if [ ! -z "$SERVER_PID" ]; then
- kill $SERVER_PID 2>/dev/null || true
- fi
- pkill -f "serve.*${{ steps.setup.outputs.port }}" || true
-
- # WAVE-style Testing
- wave-testing:
- name: WAVE Testing
- runs-on: ubuntu-latest
- if: github.event.inputs.test_suite == 'all' || github.event.inputs.test_suite == 'wave' || github.event.inputs.test_suite == ''
- outputs:
- port: ${{ steps.setup.outputs.port }}
- wave_errors: ${{ steps.wave.outputs.errors }}
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
- cache: "npm"
-
- - name: Install dependencies
- run: |
- npm ci
- npm ci --workspace=frontend
-
- - name: Find available port and setup
- id: setup
- run: |
- # Find an available port starting from 3200
- for port in {3200..3299}; do
- if ! lsof -i:$port > /dev/null 2>&1; then
- echo "port=$port" >> $GITHUB_OUTPUT
- echo "Using port $port for WAVE tests"
- break
- fi
- done
-
- - name: Build frontend (with TypeScript bypass for accessibility testing)
- working-directory: ./frontend
- run: |
- # Create a temporary vite config that bypasses TypeScript errors for accessibility testing
- cat > vite.config.accessibility.ts << 'EOF'
- import { defineConfig } from 'vite';
- import react from '@vitejs/plugin-react';
- import { resolve } from 'path';
-
- export default defineConfig({
- plugins: [react()],
- resolve: {
- alias: {
- '@': resolve(__dirname, 'src'),
- '@components': resolve(__dirname, 'src/components'),
- '@pages': resolve(__dirname, 'src/pages'),
- '@hooks': resolve(__dirname, 'src/hooks'),
- '@services': resolve(__dirname, 'src/services'),
- '@utils': resolve(__dirname, 'src/utils'),
- '@types': resolve(__dirname, 'src/types'),
- '@store': resolve(__dirname, 'src/store'),
- '@styles': resolve(__dirname, 'src/styles'),
- '@assets': resolve(__dirname, 'src/assets'),
- '@routes': resolve(__dirname, 'src/routes'),
- },
- },
- build: {
- outDir: 'dist',
- sourcemap: false,
- minify: false,
- target: 'es2015'
- },
- esbuild: {
- logOverride: { 'this-is-undefined-in-esm': 'silent' }
- }
- });
- EOF
-
- echo "đ¨ Building frontend for accessibility testing (bypassing TypeScript errors)..."
- # Build without TypeScript checking - focus on accessibility testing
- npx vite build --config vite.config.accessibility.ts || {
- echo "â ī¸ Vite build failed, creating minimal build directory for testing..."
- mkdir -p dist
- # Create a minimal index.html for accessibility testing
- cat > dist/index.html << 'HTML'
-
-
-
-
-
- ConnectKit - Loading...
-
-
-
-
-
ConnectKit
-
Application is loading...
-
-
- Welcome to ConnectKit
- This is a placeholder page for accessibility testing.
-
-
-
-
-
-
-
- HTML
-
- # Create login page
- mkdir -p dist/login
- cat > dist/login/index.html << 'HTML'
-
-
-
-
-
- Login - ConnectKit
-
-
-
-
-
- HTML
-
- # Create register page
- mkdir -p dist/register
- cat > dist/register/index.html << 'HTML'
-
-
-
-
-
- Register - ConnectKit
-
-
-
-
-
- HTML
-
- echo "â Created minimal HTML structure for accessibility testing"
- }
-
- echo "â Frontend build completed for accessibility testing"
-
- - name: Start frontend server
- working-directory: ./frontend
- run: |
- npm install -g serve
- serve -s dist -l ${{ steps.setup.outputs.port }} &
- echo "SERVER_PID=$!" >> $GITHUB_ENV
-
- - name: Wait for frontend server
- run: |
- timeout ${{ env.FRONTEND_TIMEOUT }} bash -c '
- until curl -f http://localhost:${{ steps.setup.outputs.port }} > /dev/null 2>&1; do
- echo "Waiting for frontend server on port ${{ steps.setup.outputs.port }}..."
- sleep 5
- done
- '
- echo "â
Frontend server is ready on port ${{ steps.setup.outputs.port }}"
-
- - name: Install Puppeteer
- run: npm install puppeteer
-
- - name: Create and run WAVE-style test
- id: wave
- run: |
- cat > wave-test.js << 'EOF'
- const puppeteer = require('puppeteer');
- const fs = require('fs');
-
- async function runWaveStyleTest() {
- const browser = await puppeteer.launch({
- headless: 'new',
- args: ['--no-sandbox', '--disable-dev-shm-usage']
- });
-
- const results = {
- timestamp: new Date().toISOString(),
- tests: [],
- summary: { errors: 0, warnings: 0, passed: 0 }
- };
-
- const urls = [
- 'http://localhost:${{ steps.setup.outputs.port }}/',
- 'http://localhost:${{ steps.setup.outputs.port }}/login',
- 'http://localhost:${{ steps.setup.outputs.port }}/register'
- ];
-
- for (const url of urls) {
- console.log(`Testing ${url}...`);
- const page = await browser.newPage();
-
- try {
- await page.goto(url, { waitUntil: 'networkidle2', timeout: 30000 });
-
- // WAVE-style checks
- const pageResults = await page.evaluate(() => {
- const errors = [];
- const warnings = [];
-
- // Check for missing alt text
- const images = document.querySelectorAll('img');
- images.forEach((img, index) => {
- if (!img.alt && !img.getAttribute('aria-label')) {
- errors.push(`Image ${index + 1}: Missing alt text`);
- }
- });
-
- // Check for empty links
- const links = document.querySelectorAll('a');
- links.forEach((link, index) => {
- const text = link.textContent.trim();
- const ariaLabel = link.getAttribute('aria-label');
- if (!text && !ariaLabel) {
- errors.push(`Link ${index + 1}: Empty link text`);
- }
- });
-
- // Check for form labels
- const inputs = document.querySelectorAll('input[type]:not([type="hidden"])');
- inputs.forEach((input, index) => {
- const id = input.id;
- const ariaLabel = input.getAttribute('aria-label');
- const ariaLabelledby = input.getAttribute('aria-labelledby');
-
- if (!ariaLabel && !ariaLabelledby) {
- if (!id || !document.querySelector(`label[for="${id}"]`)) {
- warnings.push(`Input ${index + 1}: Missing label`);
- }
- }
- });
-
- // Check for heading structure
- const headings = document.querySelectorAll('h1, h2, h3, h4, h5, h6');
- if (headings.length === 0) {
- warnings.push('No headings found on page');
- }
-
- return { errors, warnings };
- });
-
- results.tests.push({
- url: url,
- status: pageResults.errors.length === 0 ? 'passed' : 'failed',
- errors: pageResults.errors,
- warnings: pageResults.warnings
- });
-
- results.summary.errors += pageResults.errors.length;
- results.summary.warnings += pageResults.warnings.length;
- if (pageResults.errors.length === 0) results.summary.passed++;
-
- } catch (error) {
- console.error(`Error testing ${url}:`, error.message);
- results.tests.push({
- url: url,
- status: 'error',
- errors: [`Navigation error: ${error.message}`],
- warnings: []
- });
- results.summary.errors++;
- }
-
- await page.close();
- }
-
- await browser.close();
-
- // Save results
- fs.writeFileSync('wave-results.json', JSON.stringify(results, null, 2));
- console.log('WAVE-style test completed');
- console.log(`Summary: ${results.summary.errors} errors, ${results.summary.warnings} warnings, ${results.summary.passed} passed`);
-
- return results.summary.errors;
- }
-
- runWaveStyleTest().catch(console.error);
- EOF
-
- node wave-test.js
-
- # Extract error count
- if [ -f "wave-results.json" ]; then
- ERRORS=$(jq -r '.summary.errors' wave-results.json)
- echo "errors=$ERRORS" >> $GITHUB_OUTPUT
- echo "WAVE errors found: $ERRORS"
- fi
-
- - name: Upload WAVE results
- uses: actions/upload-artifact@v4
- if: always()
- with:
- name: wave-results-${{ github.run_number }}
- path: |
- wave-results.json
- wave-test.js
- retention-days: 7
-
- - name: Stop frontend server
- if: always()
- run: |
- if [ ! -z "$SERVER_PID" ]; then
- kill $SERVER_PID 2>/dev/null || true
- fi
- pkill -f "serve.*${{ steps.setup.outputs.port }}" || true
-
- # Color Contrast Analysis
- color-contrast:
- name: Color Contrast
- runs-on: ubuntu-latest
- if: github.event.inputs.test_suite == 'all' || github.event.inputs.test_suite == 'color-contrast' || github.event.inputs.test_suite == ''
- outputs:
- port: ${{ steps.setup.outputs.port }}
- contrast_failures: ${{ steps.contrast.outputs.failures }}
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
- cache: "npm"
-
- - name: Install dependencies
- run: |
- npm ci
- npm ci --workspace=frontend
-
- - name: Find available port and setup
- id: setup
- run: |
- # Find an available port starting from 3200
- for port in {3200..3299}; do
- if ! lsof -i:$port > /dev/null 2>&1; then
- echo "port=$port" >> $GITHUB_OUTPUT
- echo "Using port $port for color contrast tests"
- break
- fi
- done
-
- - name: Build frontend (with TypeScript bypass for accessibility testing)
- working-directory: ./frontend
- run: |
- # Create a temporary vite config that bypasses TypeScript errors for accessibility testing
- cat > vite.config.accessibility.ts << 'EOF'
- import { defineConfig } from 'vite';
- import react from '@vitejs/plugin-react';
- import { resolve } from 'path';
-
- export default defineConfig({
- plugins: [react()],
- resolve: {
- alias: {
- '@': resolve(__dirname, 'src'),
- '@components': resolve(__dirname, 'src/components'),
- '@pages': resolve(__dirname, 'src/pages'),
- '@hooks': resolve(__dirname, 'src/hooks'),
- '@services': resolve(__dirname, 'src/services'),
- '@utils': resolve(__dirname, 'src/utils'),
- '@types': resolve(__dirname, 'src/types'),
- '@store': resolve(__dirname, 'src/store'),
- '@styles': resolve(__dirname, 'src/styles'),
- '@assets': resolve(__dirname, 'src/assets'),
- '@routes': resolve(__dirname, 'src/routes'),
- },
- },
- build: {
- outDir: 'dist',
- sourcemap: false,
- minify: false,
- target: 'es2015'
- },
- esbuild: {
- logOverride: { 'this-is-undefined-in-esm': 'silent' }
- }
- });
- EOF
-
- echo "đ¨ Building frontend for accessibility testing (bypassing TypeScript errors)..."
- # Build without TypeScript checking - focus on accessibility testing
- npx vite build --config vite.config.accessibility.ts || {
- echo "â ī¸ Vite build failed, creating minimal build directory for testing..."
- mkdir -p dist
- # Create a minimal index.html for accessibility testing
- cat > dist/index.html << 'HTML'
-
-
-
-
-
- ConnectKit - Loading...
-
-
-
-
-
ConnectKit
-
Application is loading...
-
-
- Welcome to ConnectKit
- This is a placeholder page for accessibility testing.
-
-
-
-
-
-
-
- HTML
-
- # Create login page
- mkdir -p dist/login
- cat > dist/login/index.html << 'HTML'
-
-
-
-
-
- Login - ConnectKit
-
-
-
-
-
- HTML
-
- # Create register page
- mkdir -p dist/register
- cat > dist/register/index.html << 'HTML'
-
-
-
-
-
- Register - ConnectKit
-
-
-
-
-
- HTML
-
- echo "â Created minimal HTML structure for accessibility testing"
- }
-
- echo "â Frontend build completed for accessibility testing"
-
- - name: Start frontend server
- working-directory: ./frontend
- run: |
- npm install -g serve
- serve -s dist -l ${{ steps.setup.outputs.port }} &
- echo "SERVER_PID=$!" >> $GITHUB_ENV
-
- - name: Wait for frontend server
- run: |
- timeout ${{ env.FRONTEND_TIMEOUT }} bash -c '
- until curl -f http://localhost:${{ steps.setup.outputs.port }} > /dev/null 2>&1; do
- echo "Waiting for frontend server on port ${{ steps.setup.outputs.port }}..."
- sleep 5
- done
- '
- echo "â
Frontend server is ready on port ${{ steps.setup.outputs.port }}"
-
- - name: Install color contrast tools
- run: npm install puppeteer color-contrast-checker
-
- - name: Create and run color contrast test
- id: contrast
- run: |
- cat > color-contrast-test.js << 'EOF'
- const puppeteer = require('puppeteer');
- const { colorContrast } = require('color-contrast-checker');
- const fs = require('fs');
-
- function hexToRgb(hex) {
- const result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
- return result ? {
- r: parseInt(result[1], 16),
- g: parseInt(result[2], 16),
- b: parseInt(result[3], 16)
- } : null;
- }
-
- function rgbToHex(r, g, b) {
- return "#" + ((1 << 24) + (r << 16) + (g << 8) + b).toString(16).slice(1);
- }
-
- async function runColorContrastTest() {
- const browser = await puppeteer.launch({
- headless: 'new',
- args: ['--no-sandbox', '--disable-dev-shm-usage']
- });
-
- const results = {
- timestamp: new Date().toISOString(),
- tests: [],
- summary: { total: 0, failures: 0, passed: 0 }
- };
-
- const urls = [
- 'http://localhost:${{ steps.setup.outputs.port }}/',
- 'http://localhost:${{ steps.setup.outputs.port }}/login',
- 'http://localhost:${{ steps.setup.outputs.port }}/register'
- ];
-
- for (const url of urls) {
- console.log(`Testing color contrast on ${url}...`);
- const page = await browser.newPage();
-
- try {
- await page.goto(url, { waitUntil: 'networkidle2', timeout: 30000 });
-
- const contrastResults = await page.evaluate(() => {
- const elements = document.querySelectorAll('*');
- const checks = [];
-
- elements.forEach((element, index) => {
- const style = window.getComputedStyle(element);
- const color = style.color;
- const backgroundColor = style.backgroundColor;
- const text = element.textContent?.trim();
-
- // Only check elements with visible text
- if (text && text.length > 0 && color && backgroundColor) {
- // Skip transparent backgrounds
- if (!backgroundColor.includes('rgba(0, 0, 0, 0)') && backgroundColor !== 'rgba(0, 0, 0, 0)') {
- checks.push({
- element: element.tagName,
- text: text.substring(0, 50),
- color: color,
- backgroundColor: backgroundColor,
- index: index
- });
- }
- }
- });
-
- return checks;
- });
-
- const pageFailures = [];
- let pagePassed = 0;
-
- contrastResults.forEach(check => {
- // Simple contrast check - this is a basic implementation
- // In practice, you'd want a more sophisticated color parsing and contrast calculation
- try {
- const hasGoodContrast = true; // Placeholder - implement proper contrast checking
-
- if (hasGoodContrast) {
- pagePassed++;
- } else {
- pageFailures.push({
- element: check.element,
- text: check.text,
- color: check.color,
- backgroundColor: check.backgroundColor,
- reason: 'Insufficient contrast ratio'
- });
- }
- } catch (error) {
- // Skip elements where color parsing fails
- }
- });
-
- results.tests.push({
- url: url,
- total: contrastResults.length,
- failures: pageFailures.length,
- passed: pagePassed,
- failedElements: pageFailures
- });
-
- results.summary.total += contrastResults.length;
- results.summary.failures += pageFailures.length;
- results.summary.passed += pagePassed;
-
- } catch (error) {
- console.error(`Error testing ${url}:`, error.message);
- results.tests.push({
- url: url,
- error: error.message
- });
- }
-
- await page.close();
- }
-
- await browser.close();
-
- // Save results
- fs.writeFileSync('color-contrast-results.json', JSON.stringify(results, null, 2));
- console.log('Color contrast test completed');
- console.log(`Summary: ${results.summary.failures} failures out of ${results.summary.total} checks`);
-
- return results.summary.failures;
- }
-
- runColorContrastTest().catch(console.error);
- EOF
-
- node color-contrast-test.js
-
- # Extract failure count
- if [ -f "color-contrast-results.json" ]; then
- FAILURES=$(jq -r '.summary.failures' color-contrast-results.json)
- echo "failures=$FAILURES" >> $GITHUB_OUTPUT
- echo "Color contrast failures: $FAILURES"
- fi
-
- - name: Upload color contrast results
- uses: actions/upload-artifact@v4
- if: always()
- with:
- name: color-contrast-results-${{ github.run_number }}
- path: |
- color-contrast-results.json
- color-contrast-test.js
- retention-days: 7
-
- - name: Stop frontend server
- if: always()
- run: |
- if [ ! -z "$SERVER_PID" ]; then
- kill $SERVER_PID 2>/dev/null || true
- fi
- pkill -f "serve.*${{ steps.setup.outputs.port }}" || true
-
- # Keyboard Navigation Testing
- keyboard-navigation:
- name: Keyboard Navigation
- runs-on: ubuntu-latest
- if: github.event.inputs.test_suite == 'all' || github.event.inputs.test_suite == 'keyboard' || github.event.inputs.test_suite == ''
- outputs:
- port: ${{ steps.setup.outputs.port }}
- keyboard_failures: ${{ steps.keyboard.outputs.failures }}
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
- cache: "npm"
-
- - name: Install dependencies
- run: |
- npm ci
- npm ci --workspace=frontend
-
- - name: Find available port and setup
- id: setup
- run: |
- # Find an available port starting from 3200
- for port in {3200..3299}; do
- if ! lsof -i:$port > /dev/null 2>&1; then
- echo "port=$port" >> $GITHUB_OUTPUT
- echo "Using port $port for keyboard navigation tests"
- break
- fi
- done
-
- - name: Install Playwright
- working-directory: ./frontend
- run: |
- npm install @playwright/test
- npx playwright install chromium
-
- - name: Build frontend (with TypeScript bypass for accessibility testing)
- working-directory: ./frontend
- run: |
- # Create a temporary vite config that bypasses TypeScript errors for accessibility testing
- cat > vite.config.accessibility.ts << 'EOF'
- import { defineConfig } from 'vite';
- import react from '@vitejs/plugin-react';
- import { resolve } from 'path';
-
- export default defineConfig({
- plugins: [react()],
- resolve: {
- alias: {
- '@': resolve(__dirname, 'src'),
- '@components': resolve(__dirname, 'src/components'),
- '@pages': resolve(__dirname, 'src/pages'),
- '@hooks': resolve(__dirname, 'src/hooks'),
- '@services': resolve(__dirname, 'src/services'),
- '@utils': resolve(__dirname, 'src/utils'),
- '@types': resolve(__dirname, 'src/types'),
- '@store': resolve(__dirname, 'src/store'),
- '@styles': resolve(__dirname, 'src/styles'),
- '@assets': resolve(__dirname, 'src/assets'),
- '@routes': resolve(__dirname, 'src/routes'),
- },
- },
- build: {
- outDir: 'dist',
- sourcemap: false,
- minify: false,
- target: 'es2015'
- },
- esbuild: {
- logOverride: { 'this-is-undefined-in-esm': 'silent' }
- }
- });
- EOF
-
- echo "đ¨ Building frontend for accessibility testing (bypassing TypeScript errors)..."
- # Build without TypeScript checking - focus on accessibility testing
- npx vite build --config vite.config.accessibility.ts || {
- echo "â ī¸ Vite build failed, creating minimal build directory for testing..."
- mkdir -p dist
- # Create a minimal index.html for accessibility testing
- cat > dist/index.html << 'HTML'
-
-
-
-
-
- ConnectKit - Loading...
-
-
-
-
-
ConnectKit
-
Application is loading...
-
-
- Welcome to ConnectKit
- This is a placeholder page for accessibility testing.
-
-
-
-
-
-
-
- HTML
-
- # Create login page
- mkdir -p dist/login
- cat > dist/login/index.html << 'HTML'
-
-
-
-
-
- Login - ConnectKit
-
-
-
-
-
- HTML
-
- # Create register page
- mkdir -p dist/register
- cat > dist/register/index.html << 'HTML'
-
-
-
-
-
- Register - ConnectKit
-
-
-
-
-
- HTML
-
- echo "â Created minimal HTML structure for accessibility testing"
- }
-
- echo "â Frontend build completed for accessibility testing"
-
- - name: Start frontend server
- working-directory: ./frontend
- run: |
- npm install -g serve
- serve -s dist -l ${{ steps.setup.outputs.port }} &
- echo "SERVER_PID=$!" >> $GITHUB_ENV
-
- - name: Wait for frontend server
- run: |
- timeout ${{ env.FRONTEND_TIMEOUT }} bash -c '
- until curl -f http://localhost:${{ steps.setup.outputs.port }} > /dev/null 2>&1; do
- echo "Waiting for frontend server on port ${{ steps.setup.outputs.port }}..."
- sleep 5
- done
- '
- echo "â
Frontend server is ready on port ${{ steps.setup.outputs.port }}"
-
- - name: Create keyboard navigation test
- working-directory: ./frontend
- run: |
- mkdir -p tests/accessibility
- cat > tests/accessibility/keyboard-navigation.spec.ts << 'EOF'
- import { test, expect } from '@playwright/test';
-
- const BASE_URL = process.env.BASE_URL || 'http://localhost:${{ steps.setup.outputs.port }}';
-
- test.describe('Keyboard Navigation Tests', () => {
- test('should allow navigation through main page with keyboard', async ({ page }) => {
- await page.goto(BASE_URL, { waitUntil: 'networkidle' });
-
- // Wait for page to be fully loaded
- await page.waitForTimeout(1000);
-
- // Find all interactive elements
- const interactiveElements = await page.locator('button, a, input, select, textarea, [tabindex]:not([tabindex="-1"])').all();
-
- console.log(`Found ${interactiveElements.length} interactive elements`);
-
- if (interactiveElements.length === 0) {
- console.log('No interactive elements found, checking basic page structure');
- await expect(page.locator('body')).toBeVisible();
- return;
- }
-
- // Test Tab navigation through a reasonable number of elements
- const elementsToTest = Math.min(interactiveElements.length, 5);
-
- for (let i = 0; i < elementsToTest; i++) {
- await page.keyboard.press('Tab');
- await page.waitForTimeout(200); // Small delay for focus to settle
-
- // Check if something is focused (may not always be the expected element)
- const focusedElement = page.locator(':focus').first();
- try {
- await expect(focusedElement).toBeVisible();
- } catch (error) {
- console.log(`Element ${i + 1} focus check failed, but continuing test`);
- }
- }
- });
-
- test('should handle keyboard navigation on login page', async ({ page }) => {
- await page.goto(`${BASE_URL}/login`, { waitUntil: 'networkidle' });
-
- // Wait for page to be fully loaded
- await page.waitForTimeout(1000);
-
- // Test that we can navigate to form elements
- const inputs = await page.locator('input').all();
- const buttons = await page.locator('button').all();
-
- console.log(`Found ${inputs.length} inputs and ${buttons.length} buttons on login page`);
-
- if (inputs.length > 0 || buttons.length > 0) {
- // Test basic tab navigation
- await page.keyboard.press('Tab');
- await page.waitForTimeout(200);
-
- const focusedElement = page.locator(':focus').first();
- try {
- await expect(focusedElement).toBeVisible();
- } catch (error) {
- console.log('Focus test on login page passed with minor issues');
- }
- } else {
- // No form elements, just verify page loads
- await expect(page.locator('body')).toBeVisible();
- }
- });
-
- test('should verify keyboard accessibility basics', async ({ page }) => {
- await page.goto(BASE_URL, { waitUntil: 'networkidle' });
-
- // Wait for page to be fully loaded
- await page.waitForTimeout(1000);
-
- // Basic accessibility checks that should always pass
- await expect(page).toHaveTitle(/.+/); // Page should have a title
-
- // Check if page has proper HTML structure
- await expect(page.locator('body')).toBeVisible();
-
- // Check for skip links or main content areas
- const mainContent = page.locator('main, [role="main"], #main, .main').first();
- const skipLinks = page.locator('a[href^="#"]').first();
-
- try {
- await expect(mainContent.or(skipLinks)).toBeVisible();
- console.log('Found main content area or skip links');
- } catch (error) {
- console.log('No main content area or skip links found, but test continues');
- }
- });
- });
- EOF
-
- - name: Run keyboard navigation tests
- id: keyboard
- working-directory: ./frontend
- env:
- BASE_URL: http://localhost:${{ steps.setup.outputs.port }}
- run: |
- echo "Running keyboard navigation tests against $BASE_URL"
-
- # Create playwright config for JSON output
- cat > playwright.config.keyboard.ts << 'EOF'
- import { defineConfig } from '@playwright/test';
-
- export default defineConfig({
- testDir: './tests/accessibility',
- fullyParallel: true,
- forbidOnly: !!process.env.CI,
- retries: process.env.CI ? 2 : 0,
- workers: process.env.CI ? 1 : undefined,
- reporter: [
- ['json', { outputFile: 'keyboard-results.json' }],
- ['html', { outputFolder: 'playwright-report' }]
- ],
- use: {
- baseURL: process.env.BASE_URL,
- trace: 'on-first-retry',
- },
- });
- EOF
-
- # Run with custom config and add retries for reliability
- npx playwright test tests/accessibility/keyboard-navigation.spec.ts --config=playwright.config.keyboard.ts || echo "keyboard_failed=true" >> $GITHUB_ENV
-
- # Extract failure count if results exist
- if [ -f "keyboard-results.json" ]; then
- FAILURES=$(jq '[.suites[].specs[].tests[] | select(.results[].status == "failed")] | length' keyboard-results.json 2>/dev/null || echo "0")
- echo "failures=$FAILURES" >> $GITHUB_OUTPUT
- echo "Keyboard navigation failures: $FAILURES"
- else
- echo "failures=0" >> $GITHUB_OUTPUT
- echo "No keyboard-results.json found, assuming 0 failures"
- fi
-
- - name: Upload keyboard navigation results
- uses: actions/upload-artifact@v4
- if: always()
- with:
- name: keyboard-results-${{ github.run_number }}
- path: |
- frontend/keyboard-results.json
- frontend/test-results/
- frontend/playwright-report/
- retention-days: 7
-
- - name: Stop frontend server
- if: always()
- run: |
- if [ ! -z "$SERVER_PID" ]; then
- kill $SERVER_PID 2>/dev/null || true
- fi
- pkill -f "serve.*${{ steps.setup.outputs.port }}" || true
-
- # Consolidated Accessibility Report
- accessibility-report:
- name: Accessibility Report
- runs-on: ubuntu-latest
- needs:
- [
- lighthouse-a11y,
- axe-core-tests,
- wave-testing,
- color-contrast,
- keyboard-navigation,
- ]
- if: always()
- steps:
- - uses: actions/checkout@v4
-
- - name: Download all test artifacts
- uses: actions/download-artifact@v4
- with:
- path: accessibility-artifacts
-
- - name: Generate accessibility summary
- run: |
- cat > accessibility-summary.md << 'EOF'
- # đ Accessibility Testing Report
-
- **Generated on:** $(date -u '+%Y-%m-%d %H:%M:%S UTC')
- **Repository:** ${{ github.repository }}
- **Branch:** ${{ github.ref_name }}
- **Commit:** ${{ github.sha }}
- **Trigger:** ${{ github.event_name }}
-
- ## đ Test Results Summary
-
- | Test Suite | Status | Key Metrics |
- |------------|--------|-------------|
- | đĻ **Lighthouse A11y** | ${{ needs.lighthouse-a11y.result == 'success' && 'â
Passed' || needs.lighthouse-a11y.result == 'skipped' && 'âī¸ Skipped' || 'â Failed' }} | Score: ${{ needs.lighthouse-a11y.outputs.lighthouse_score || 'N/A' }} |
- | đĒ **Axe-core Tests** | ${{ needs.axe-core-tests.result == 'success' && 'â
Passed' || needs.axe-core-tests.result == 'skipped' && 'âī¸ Skipped' || 'â Failed' }} | Violations: ${{ needs.axe-core-tests.outputs.axe_violations || 'N/A' }} |
- | đ **WAVE Testing** | ${{ needs.wave-testing.result == 'success' && 'â
Passed' || needs.wave-testing.result == 'skipped' && 'âī¸ Skipped' || 'â Failed' }} | Errors: ${{ needs.wave-testing.outputs.wave_errors || 'N/A' }} |
- | đ¨ **Color Contrast** | ${{ needs.color-contrast.result == 'success' && 'â
Passed' || needs.color-contrast.result == 'skipped' && 'âī¸ Skipped' || 'â Failed' }} | Failures: ${{ needs.color-contrast.outputs.contrast_failures || 'N/A' }} |
- | â¨ī¸ **Keyboard Navigation** | ${{ needs.keyboard-navigation.result == 'success' && 'â
Passed' || needs.keyboard-navigation.result == 'skipped' && 'âī¸ Skipped' || 'â Failed' }} | Failures: ${{ needs.keyboard-navigation.outputs.keyboard_failures || 'N/A' }} |
-
- ## đ¯ WCAG 2.1 AA Compliance Checklist
-
- The following items should be manually verified:
-
- ### Perceivable
- - [ ] All images have appropriate alt text
- - [ ] Color is not the only means of conveying information
- - [ ] Text has sufficient color contrast (4.5:1 for normal text, 3:1 for large text)
- - [ ] Content is meaningful when CSS is disabled
-
- ### Operable
- - [ ] All functionality is available via keyboard
- - [ ] No content flashes more than 3 times per second
- - [ ] Users can pause, stop, or hide moving content
- - [ ] Page has descriptive titles
-
- ### Understandable
- - [ ] Language of page is identified
- - [ ] Navigation is consistent across pages
- - [ ] Form errors are clearly identified and described
- - [ ] Help is available for complex forms
-
- ### Robust
- - [ ] HTML is valid and semantic
- - [ ] Content works with assistive technologies
- - [ ] No deprecated HTML elements are used
-
- ## đ Detailed Reports
-
- Detailed test results and artifacts are available in the workflow artifacts:
- - Lighthouse reports (HTML and JSON)
- - Axe-core test results (Playwright reports)
- - WAVE-style test results (JSON)
- - Color contrast analysis (JSON)
- - Keyboard navigation test results (Playwright reports)
-
- ## đ Recommendations
-
- 1. **Review failed tests**: Download and examine detailed reports for specific issues
- 2. **Manual testing**: Perform manual testing with screen readers (NVDA, JAWS, VoiceOver)
- 3. **User testing**: Conduct testing with users who rely on assistive technologies
- 4. **Regular monitoring**: Set up automated accessibility testing in your development workflow
-
- ## đ Additional Resources
-
- - [WCAG 2.1 Guidelines](https://www.w3.org/WAI/WCAG21/quickref/)
- - [WebAIM Accessibility Checklist](https://webaim.org/standards/wcag/checklist)
- - [axe DevTools Browser Extension](https://www.deque.com/axe/browser-extensions/)
- - [WAVE Web Accessibility Evaluation Tool](https://wave.webaim.org/)
- EOF
-
- echo "Accessibility summary generated"
-
- - name: Add summary to GitHub Step Summary
- run: |
- cat accessibility-summary.md >> $GITHUB_STEP_SUMMARY
-
- - name: Upload accessibility report
- uses: actions/upload-artifact@v4
- with:
- name: accessibility-report-${{ github.run_number }}
- path: |
- accessibility-summary.md
- accessibility-artifacts/
- retention-days: 30
-
- - name: Comment on PR (if applicable)
- if: github.event_name == 'pull_request'
- uses: actions/github-script@v7
- with:
- script: |
- const fs = require('fs');
-
- // Read the accessibility summary
- const summary = fs.readFileSync('accessibility-summary.md', 'utf8');
-
- // Post comment on PR
- github.rest.issues.createComment({
- issue_number: context.issue.number,
- owner: context.repo.owner,
- repo: context.repo.repo,
- body: summary
- });
diff --git a/.github/workflows/archived/ci.yml b/.github/workflows/archived/ci.yml
deleted file mode 100644
index 533f624..0000000
--- a/.github/workflows/archived/ci.yml
+++ /dev/null
@@ -1,392 +0,0 @@
-name: CI/CD Pipeline
-
-on:
- push:
- branches: [main, develop]
- pull_request:
- branches: [main, develop]
- workflow_dispatch:
-
-env:
- NODE_VERSION: "18"
- DOCKER_BUILDKIT: 1
-
-jobs:
- # Security scanning job
- security:
- name: Security Scan
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - name: Run Trivy vulnerability scanner
- uses: aquasecurity/trivy-action@master
- with:
- scan-type: "fs"
- scan-ref: "."
- format: "sarif"
- output: "trivy-results.sarif"
-
- # NOTE: SARIF upload requires GitHub Advanced Security - commented out
- # - name: Upload Trivy results to GitHub Security
- # uses: github/codeql-action/upload-sarif@v3
- # with:
- # sarif_file: "trivy-results.sarif"
-
- # Backend testing job
- backend-test:
- name: Backend Tests
- runs-on: ubuntu-latest
- services:
- postgres:
- image: postgres:15-alpine
- env:
- POSTGRES_USER: test
- POSTGRES_PASSWORD: test
- POSTGRES_DB: connectkit_test
- options: >-
- --health-cmd pg_isready
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- ports:
- - 5432:5432
-
- redis:
- image: redis:7-alpine
- options: >-
- --health-cmd "redis-cli ping"
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- ports:
- - 6379:6379
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
- cache: "npm"
-
- - name: Install dependencies
- run: |
- npm ci
- npm ci --workspace=backend
-
- - name: Run backend linting
- working-directory: ./backend
- run: npm run lint || true
-
- - name: Run backend type checking
- working-directory: ./backend
- run: npm run type-check
-
- - name: Run backend tests with coverage
- working-directory: ./backend
- env:
- NODE_ENV: test
- DB_HOST: localhost
- DB_PORT: 5432
- DB_USER: test
- DB_PASSWORD: test
- DB_NAME: connectkit_test
- REDIS_HOST: localhost
- REDIS_PORT: 6379
- JWT_SECRET: test-secret-must-be-at-least-32-chars-long
- JWT_REFRESH_SECRET: test-refresh-secret-must-be-at-least-32-chars
- ENCRYPTION_KEY: test-encryption-key-32-characters!!
- run: npm test -- --coverageReporters=lcov
-
- - name: Upload backend coverage to Codecov
- uses: codecov/codecov-action@v4
- with:
- files: ./backend/coverage/lcov.info
- flags: backend
- name: backend-coverage
- token: ${{ secrets.CODECOV_TOKEN }}
- continue-on-error: true
-
- # Frontend testing job
- frontend-test:
- name: Frontend Tests
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
- cache: "npm"
-
- - name: Install dependencies
- run: |
- npm ci
- npm ci --workspace=frontend
-
- - name: Run frontend linting
- working-directory: ./frontend
- run: npm run lint || true
-
- - name: Run frontend type checking
- working-directory: ./frontend
- run: npm run type-check
-
- - name: Run frontend tests with coverage
- working-directory: ./frontend
- run: npm run test:unit
-
- - name: Upload frontend coverage to Codecov
- uses: codecov/codecov-action@v4
- with:
- files: ./frontend/coverage/lcov.info
- flags: frontend
- name: frontend-coverage
- token: ${{ secrets.CODECOV_TOKEN }}
- continue-on-error: true
-
- # Build job
- build:
- name: Build Application
- runs-on: ubuntu-latest
- needs: [backend-test, frontend-test]
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
- cache: "npm"
-
- - name: Install dependencies
- run: |
- npm ci
- npm ci --workspace=backend
- npm ci --workspace=frontend
-
- - name: Build backend
- working-directory: ./backend
- run: npm run build
-
- - name: Build frontend
- working-directory: ./frontend
- run: npm run build
-
- - name: Upload backend build artifacts
- uses: actions/upload-artifact@v4
- with:
- name: backend-build
- path: backend/dist/
-
- - name: Upload frontend build artifacts
- uses: actions/upload-artifact@v4
- with:
- name: frontend-build
- path: frontend/dist/
-
- # Docker build job
- docker-build:
- name: Docker Build
- runs-on: ubuntu-latest
- needs: [build, security]
- if: github.event_name == 'push'
- steps:
- - uses: actions/checkout@v4
-
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
-
- - name: Build backend Docker image
- uses: docker/build-push-action@v5
- with:
- context: ./backend
- file: ./docker/backend/Dockerfile
- push: false
- tags: connectkit-backend:${{ github.sha }}
- cache-from: type=gha
- cache-to: type=gha,mode=max
- target: production
-
- - name: Build frontend Docker image
- uses: docker/build-push-action@v5
- with:
- context: ./frontend
- file: ./docker/frontend/Dockerfile
- push: false
- tags: connectkit-frontend:${{ github.sha }}
- cache-from: type=gha
- cache-to: type=gha,mode=max
- target: production
-
- # E2E testing job
- e2e-test:
- name: E2E Tests
- runs-on: ubuntu-latest
- needs: [build]
- if: github.event_name == 'pull_request'
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
- cache: "npm"
-
- - name: Create environment file
- run: |
- cp .env.example .env
- # Override with test-specific values
- echo "DB_USER=test" >> .env
- echo "DB_PASSWORD=test" >> .env
- echo "DB_NAME=connectkit_test" >> .env
- echo "JWT_SECRET=test-secret-must-be-at-least-32-chars-long" >> .env
- echo "JWT_REFRESH_SECRET=test-refresh-secret-must-be-at-least-32-chars" >> .env
- echo "ENCRYPTION_KEY=test-encryption-key-32-characters!!" >> .env
-
- - name: Start services with Docker Compose
- run: |
- docker compose up -d
- echo "Waiting for services to be healthy..."
-
- - name: Wait for services to be ready
- run: |
- timeout=300
- interval=5
- elapsed=0
-
- while [ $elapsed -lt $timeout ]; do
- echo "Checking service health... (${elapsed}s elapsed)"
-
- # Check backend health
- if curl -f -s http://localhost:3001/api/health > /dev/null 2>&1; then
- echo "â
Backend service is healthy"
- backend_ready=true
- else
- echo "âŗ Backend service not ready yet"
- backend_ready=false
- fi
-
- # Check frontend availability
- if curl -f -s http://localhost:3000 > /dev/null 2>&1; then
- echo "â
Frontend service is healthy"
- frontend_ready=true
- else
- echo "âŗ Frontend service not ready yet"
- frontend_ready=false
- fi
-
- if [ "$backend_ready" = true ] && [ "$frontend_ready" = true ]; then
- echo "â
All services are ready!"
- exit 0
- fi
-
- sleep $interval
- elapsed=$((elapsed + interval))
- done
-
- echo "â Services failed to start within ${timeout}s"
- echo "Checking service logs..."
- docker compose logs --tail=50
- exit 1
-
- - name: Install dependencies and Playwright
- run: |
- npm ci
- npm ci --workspace=frontend
- cd frontend && npx playwright install --with-deps
-
- - name: Run E2E tests
- working-directory: ./frontend
- run: npm run test:e2e
-
- - name: Upload E2E test results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: playwright-report
- path: frontend/playwright-report/
- retention-days: 7
-
- - name: Upload E2E test videos
- if: failure()
- uses: actions/upload-artifact@v4
- with:
- name: playwright-videos
- path: frontend/test-results/
- retention-days: 7
-
- - name: Stop services and cleanup
- if: always()
- run: |
- docker compose down -v
- docker system prune -f
-
- # Summary job
- summary:
- name: Pipeline Summary
- runs-on: ubuntu-latest
- needs:
- [security, backend-test, frontend-test, build, docker-build, e2e-test]
- if: always()
- steps:
- - name: Generate pipeline summary
- run: |
- echo "## đ CI/CD Pipeline Results" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
-
- # Security check
- if [ "${{ needs.security.result }}" == "success" ]; then
- echo "â
**Security Scan**: Passed" >> $GITHUB_STEP_SUMMARY
- else
- echo "â **Security Scan**: Failed or skipped" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Backend tests
- if [ "${{ needs.backend-test.result }}" == "success" ]; then
- echo "â
**Backend Tests**: All tests passed" >> $GITHUB_STEP_SUMMARY
- else
- echo "â **Backend Tests**: Some tests failed" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Frontend tests
- if [ "${{ needs.frontend-test.result }}" == "success" ]; then
- echo "â
**Frontend Tests**: All tests passed" >> $GITHUB_STEP_SUMMARY
- else
- echo "â **Frontend Tests**: Some tests failed" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Build
- if [ "${{ needs.build.result }}" == "success" ]; then
- echo "â
**Build**: Application built successfully" >> $GITHUB_STEP_SUMMARY
- else
- echo "â **Build**: Build failed" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Docker build
- if [ "${{ needs.docker-build.result }}" == "success" ]; then
- echo "â
**Docker Build**: Images built successfully" >> $GITHUB_STEP_SUMMARY
- elif [ "${{ needs.docker-build.result }}" == "skipped" ]; then
- echo "âī¸ **Docker Build**: Skipped (not a push event)" >> $GITHUB_STEP_SUMMARY
- else
- echo "â **Docker Build**: Failed" >> $GITHUB_STEP_SUMMARY
- fi
-
- # E2E tests
- if [ "${{ needs.e2e-test.result }}" == "success" ]; then
- echo "â
**E2E Tests**: All tests passed" >> $GITHUB_STEP_SUMMARY
- elif [ "${{ needs.e2e-test.result }}" == "skipped" ]; then
- echo "âī¸ **E2E Tests**: Skipped (not a pull request)" >> $GITHUB_STEP_SUMMARY
- else
- echo "â **E2E Tests**: Some tests failed" >> $GITHUB_STEP_SUMMARY
- fi
-
- echo "" >> $GITHUB_STEP_SUMMARY
- echo "### đ Quick Stats" >> $GITHUB_STEP_SUMMARY
- echo "- **Commit**: \`${{ github.sha }}\`" >> $GITHUB_STEP_SUMMARY
- echo "- **Branch**: \`${{ github.ref_name }}\`" >> $GITHUB_STEP_SUMMARY
- echo "- **Trigger**: ${{ github.event_name }}" >> $GITHUB_STEP_SUMMARY
diff --git a/.github/workflows/archived/compliance-federal.yml b/.github/workflows/archived/compliance-federal.yml
deleted file mode 100644
index c37abb8..0000000
--- a/.github/workflows/archived/compliance-federal.yml
+++ /dev/null
@@ -1,1037 +0,0 @@
-name: Federal Compliance Testing
-
-on:
- schedule:
- # Run daily compliance checks at 1 AM UTC
- - cron: "0 1 * * *"
- workflow_dispatch:
- inputs:
- compliance_suite:
- description: "Compliance suite to run"
- required: true
- default: "all"
- type: choice
- options:
- - all
- - fips
- - sbom
- - sast
- - dast
- - audit
- - pii
- - supply-chain
- severity_threshold:
- description: "Minimum severity level to report"
- default: "medium"
- type: choice
- options:
- - critical
- - high
- - medium
- - low
-
-env:
- NODE_VERSION: "18"
- COMPLIANCE_MODE: "federal"
-
-jobs:
- # FIPS 140-2 Cryptography Validation
- fips-validation:
- name: FIPS 140-2 Cryptography Compliance
- runs-on: ubuntu-latest
- if: ${{ github.event.inputs.compliance_suite == 'all' || github.event.inputs.compliance_suite == 'fips' || github.event_name == 'schedule' }}
- timeout-minutes: 30
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Install dependencies
- run: |
- npm ci
- npm ci --workspace=backend
-
- - name: Install OpenSSL FIPS module
- run: |
- # Install OpenSSL with FIPS support
- sudo apt-get update
- sudo apt-get install -y openssl libssl-dev
-
- - name: Test FIPS mode availability
- run: |
- echo "## FIPS 140-2 Compliance Check" >> $GITHUB_STEP_SUMMARY
-
- # Check if FIPS mode is available
- if openssl list -providers | grep -i fips; then
- echo "â
**FIPS Provider Available**" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ **FIPS Provider Not Available**" >> $GITHUB_STEP_SUMMARY
- fi
-
- - name: Validate cryptographic algorithms
- working-directory: ./backend
- run: |
- # Create FIPS compliance test
- cat > test-fips-compliance.js << 'EOF'
- const crypto = require('crypto');
-
- console.log('đ Testing FIPS-compliant cryptographic algorithms...');
-
- // Test FIPS-approved algorithms
- const fipsApprovedAlgorithms = {
- 'AES-256-GCM': 'aes-256-gcm',
- 'AES-256-CBC': 'aes-256-cbc',
- 'SHA-256': 'sha256',
- 'SHA-384': 'sha384',
- 'SHA-512': 'sha512',
- 'RSA-2048': 'rsa',
- 'ECDSA-P256': 'ec'
- };
-
- let compliantCount = 0;
- let totalTests = 0;
-
- console.log('\n=== FIPS Algorithm Compliance Tests ===');
-
- // Test symmetric encryption algorithms
- ['aes-256-gcm', 'aes-256-cbc'].forEach(algorithm => {
- totalTests++;
- try {
- const key = crypto.randomBytes(32);
- const iv = crypto.randomBytes(16);
- const cipher = crypto.createCipheriv(algorithm, key, iv);
- const testData = 'FIPS compliance test data';
- let encrypted = cipher.update(testData, 'utf8', 'hex');
- encrypted += cipher.final('hex');
- console.log(`â
${algorithm.toUpperCase()}: COMPLIANT`);
- compliantCount++;
- } catch (error) {
- console.log(`â ${algorithm.toUpperCase()}: NOT AVAILABLE - ${error.message}`);
- }
- });
-
- // Test hash algorithms
- ['sha256', 'sha384', 'sha512'].forEach(algorithm => {
- totalTests++;
- try {
- const hash = crypto.createHash(algorithm);
- hash.update('test');
- hash.digest('hex');
- console.log(`â
${algorithm.toUpperCase()}: COMPLIANT`);
- compliantCount++;
- } catch (error) {
- console.log(`â ${algorithm.toUpperCase()}: NOT AVAILABLE`);
- }
- });
-
- // Test key generation
- totalTests++;
- try {
- const keyPair = crypto.generateKeyPairSync('rsa', {
- modulusLength: 2048,
- publicKeyEncoding: { type: 'spki', format: 'pem' },
- privateKeyEncoding: { type: 'pkcs8', format: 'pem' }
- });
- console.log('â
RSA-2048 Key Generation: COMPLIANT');
- compliantCount++;
- } catch (error) {
- console.log('â RSA-2048 Key Generation: FAILED');
- }
-
- console.log(`\nđ FIPS Compliance: ${compliantCount}/${totalTests} algorithms compliant`);
-
- if (compliantCount === totalTests) {
- console.log('â
All cryptographic algorithms are FIPS 140-2 compliant');
- process.exit(0);
- } else {
- console.log('â Some algorithms are not FIPS 140-2 compliant');
- process.exit(1);
- }
- EOF
-
- node test-fips-compliance.js
-
- - name: Test key management lifecycle
- run: |
- echo "### Key Management Lifecycle" >> $GITHUB_STEP_SUMMARY
- echo "- â
Key generation with proper entropy" >> $GITHUB_STEP_SUMMARY
- echo "- â
Key storage with proper protection" >> $GITHUB_STEP_SUMMARY
- echo "- â ī¸ Key rotation policies (manual verification required)" >> $GITHUB_STEP_SUMMARY
- echo "- â ī¸ Key destruction procedures (manual verification required)" >> $GITHUB_STEP_SUMMARY
-
- - name: Upload FIPS compliance results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: fips-compliance-results
- path: |
- fips-*.log
- test-fips-*.js
-
- # Software Bill of Materials (SBOM) Generation
- sbom-generation:
- name: SBOM Generation & Analysis
- runs-on: ubuntu-latest
- if: ${{ github.event.inputs.compliance_suite == 'all' || github.event.inputs.compliance_suite == 'sbom' || github.event_name == 'schedule' }}
- timeout-minutes: 30
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
- cache: "npm"
-
- - name: Install dependencies
- run: |
- npm ci
- npm ci --workspace=backend
- npm ci --workspace=frontend
-
- - name: Install SBOM and security tools
- run: |
- # Install Syft for SBOM generation
- curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin
-
- # Install Grype for vulnerability scanning
- curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin
-
- - name: Build Docker images for scanning
- run: |
- echo "## Building Docker Images for SBOM Analysis" >> $GITHUB_STEP_SUMMARY
-
- # Build backend image
- docker build -f docker/backend/Dockerfile --target production -t connectkit-backend:latest ./backend
-
- # Build frontend image
- docker build -f docker/frontend/Dockerfile --target production -t connectkit-frontend:latest ./frontend
-
- echo "â
**Docker Images Built Successfully**" >> $GITHUB_STEP_SUMMARY
-
- - name: Generate comprehensive SBOM
- run: |
- echo "## Software Bill of Materials (SBOM)" >> $GITHUB_STEP_SUMMARY
-
- # Run our enhanced SBOM generator
- SEVERITY_THRESHOLD="${{ github.event.inputs.severity_threshold || 'medium' }}" npm run sbom:generate
-
- echo "â
**Enhanced SBOM Generated Successfully**" >> $GITHUB_STEP_SUMMARY
-
- - name: Generate component inventory and analysis
- run: |
- echo "### Component Inventory" >> $GITHUB_STEP_SUMMARY
-
- # Check if syft JSON exists
- if [ -f "sbom-output/sbom-syft.json" ]; then
- # Count components by type
- TOTAL_COMPONENTS=$(jq '.artifacts | length' sbom-output/sbom-syft.json)
- NPM_COMPONENTS=$(jq '[.artifacts[] | select(.type == "npm")] | length' sbom-output/sbom-syft.json)
- OS_COMPONENTS=$(jq '[.artifacts[] | select(.type == "deb")] | length' sbom-output/sbom-syft.json)
- DOCKER_COMPONENTS=$(jq '[.artifacts[] | select(.type == "apk")] | length' sbom-output/sbom-syft.json)
-
- echo "- **Total Components**: $TOTAL_COMPONENTS" >> $GITHUB_STEP_SUMMARY
- echo "- **NPM Packages**: $NPM_COMPONENTS" >> $GITHUB_STEP_SUMMARY
- echo "- **OS Packages (Debian)**: $OS_COMPONENTS" >> $GITHUB_STEP_SUMMARY
- echo "- **Alpine Packages**: $DOCKER_COMPONENTS" >> $GITHUB_STEP_SUMMARY
-
- # License analysis
- UNIQUE_LICENSES=$(jq '[.artifacts[].licenses[]?.value // .artifacts[].licenses[]? // empty] | unique | length' sbom-output/sbom-syft.json)
- echo "- **Unique Licenses**: $UNIQUE_LICENSES" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ **SBOM file not found, using basic inventory**" >> $GITHUB_STEP_SUMMARY
- fi
-
- - name: Advanced vulnerability analysis
- run: |
- echo "### Enhanced Vulnerability Analysis" >> $GITHUB_STEP_SUMMARY
-
- # Run our enhanced vulnerability checker with configurable thresholds
- SEVERITY_THRESHOLD="${{ github.event.inputs.severity_threshold || 'medium' }}" npm run sbom:check
-
- - name: Generate Docker image SBOMs
- run: |
- echo "### Docker Image Analysis" >> $GITHUB_STEP_SUMMARY
-
- # Scan backend Docker image
- echo "**Backend Container:**" >> $GITHUB_STEP_SUMMARY
- syft connectkit-backend:latest -o json > sbom-output/sbom-backend-docker.json
-
- BACKEND_COMPONENTS=$(jq '.artifacts | length' sbom-output/sbom-backend-docker.json)
- echo "- Backend container components: $BACKEND_COMPONENTS" >> $GITHUB_STEP_SUMMARY
-
- # Scan frontend Docker image
- echo "**Frontend Container:**" >> $GITHUB_STEP_SUMMARY
- syft connectkit-frontend:latest -o json > sbom-output/sbom-frontend-docker.json
-
- FRONTEND_COMPONENTS=$(jq '.artifacts | length' sbom-output/sbom-frontend-docker.json)
- echo "- Frontend container components: $FRONTEND_COMPONENTS" >> $GITHUB_STEP_SUMMARY
-
- - name: License compliance validation
- run: |
- echo "### License Compliance Analysis" >> $GITHUB_STEP_SUMMARY
-
- # Check license compliance using our policy
- if [ -f "sbom-output/sbom-syft.json" ]; then
- cat > check-licenses.js << 'EOF'
- const fs = require('fs');
-
- const sbom = JSON.parse(fs.readFileSync('sbom-output/sbom-syft.json', 'utf8'));
- const policy = JSON.parse(fs.readFileSync('.sbom/license-policy.json', 'utf8'));
-
- const licenses = new Set();
- const prohibited = [];
- const needsReview = [];
-
- if (sbom.artifacts) {
- sbom.artifacts.forEach(artifact => {
- if (artifact.licenses) {
- artifact.licenses.forEach(license => {
- const licenseKey = license.value || license;
- licenses.add(licenseKey);
-
- if (policy.license_policy.prohibited.includes(licenseKey)) {
- prohibited.push({ component: artifact.name, license: licenseKey });
- } else if (!policy.license_policy.approved.includes(licenseKey)) {
- needsReview.push({ component: artifact.name, license: licenseKey });
- }
- });
- }
- });
- }
-
- console.log(`đ License Analysis Results:`);
- console.log(` Total unique licenses: ${licenses.size}`);
- console.log(` Prohibited licenses: ${prohibited.length}`);
- console.log(` Licenses needing review: ${needsReview.length}`);
-
- if (prohibited.length > 0) {
- console.log(`\nâ PROHIBITED LICENSES FOUND:`);
- prohibited.forEach(item => {
- console.log(` ${item.component}: ${item.license}`);
- });
- process.exit(1);
- } else {
- console.log(`\nâ
No prohibited licenses detected`);
- }
- EOF
-
- node check-licenses.js
- else
- echo "â ī¸ **SBOM not available for license analysis**" >> $GITHUB_STEP_SUMMARY
- fi
-
- - name: Verify SBOM completeness and generate federal compliance report
- run: |
- echo "### SBOM Completeness & Federal Compliance Check" >> $GITHUB_STEP_SUMMARY
-
- # Check for required SBOM fields and federal compliance
- cat > verify-federal-compliance.js << 'EOF'
- const fs = require('fs');
-
- console.log('đ Verifying SBOM federal compliance...');
-
- // Check if compliance report exists
- if (!fs.existsSync('sbom-output/compliance-report.json')) {
- console.log('â Compliance report not found');
- process.exit(1);
- }
-
- const complianceReport = JSON.parse(fs.readFileSync('sbom-output/compliance-report.json', 'utf8'));
-
- console.log('đ Federal Compliance Assessment:');
-
- // Check NTIA minimum elements
- const ntiaCompliant = complianceReport.ntia_minimum_elements.compliant;
- console.log(` NTIA Minimum Elements: ${ntiaCompliant ? 'â
COMPLIANT' : 'â NON-COMPLIANT'}`);
-
- // Check Executive Order 14028
- const eoCompliant = complianceReport.executive_order_14028.compliant;
- console.log(` Executive Order 14028: ${eoCompliant ? 'â
COMPLIANT' : 'â NON-COMPLIANT'}`);
-
- // Check license compliance
- const licenseCompliant = complianceReport.license_compliance.compliant;
- console.log(` License Compliance: ${licenseCompliant ? 'â
COMPLIANT' : 'â NON-COMPLIANT'}`);
-
- // Check vulnerability assessment
- const vulnPassed = complianceReport.vulnerability_assessment.passed;
- console.log(` Vulnerability Assessment: ${vulnPassed ? 'â
PASSED' : 'â FAILED'}`);
-
- // Overall compliance
- const overallCompliant = ntiaCompliant && eoCompliant && licenseCompliant && vulnPassed;
- console.log(`\nđ Overall Federal Compliance: ${overallCompliant ? 'â
COMPLIANT' : 'â NON-COMPLIANT'}`);
-
- if (!overallCompliant) {
- console.log('\nâ Federal compliance requirements not met');
- if (complianceReport.recommendations && complianceReport.recommendations.length > 0) {
- console.log('\nđ Recommendations:');
- complianceReport.recommendations.forEach((rec, index) => {
- console.log(` ${index + 1}. [${rec.priority}] ${rec.action}`);
- });
- }
- process.exit(1);
- } else {
- console.log('\nâ
All federal compliance requirements met');
- }
- EOF
-
- node verify-federal-compliance.js
-
- - name: Generate attestation documents
- run: |
- echo "### Attestation Documents" >> $GITHUB_STEP_SUMMARY
-
- # Create SBOM attestation
- cat > sbom-attestation.json << 'EOF'
- {
- "_type": "https://in-toto.io/Statement/v0.1",
- "predicateType": "https://spdx.dev/spdxdocs/spdx-v2.3",
- "subject": [
- {
- "name": "pkg:npm/connectkit@1.0.0",
- "digest": {
- "sha256": "placeholder-digest-would-be-actual-hash"
- }
- }
- ],
- "predicate": {
- "spdxVersion": "SPDX-2.3",
- "dataLicense": "CC0-1.0",
- "SPDXID": "SPDXRef-DOCUMENT",
- "name": "ConnectKit-SBOM",
- "documentNamespace": "https://connectkit.com/spdx/connectkit-1.0.0",
- "creator": "Tool: ConnectKit-CI/CD-Pipeline",
- "created": "TIMESTAMP_PLACEHOLDER",
- "packageVerificationCode": {
- "packageVerificationCodeValue": "placeholder-verification-code"
- },
- "licenseConcluded": "NOASSERTION",
- "licenseInfoFromFiles": "NOASSERTION",
- "downloadLocation": "NOASSERTION",
- "filesAnalyzed": false
- }
- }
- EOF
-
- # Replace timestamp
- sed -i "s/TIMESTAMP_PLACEHOLDER/$(date -u +%Y-%m-%dT%H:%M:%SZ)/" sbom-attestation.json
- mv sbom-attestation.json sbom-output/
-
- echo "â
**Attestation documents generated**" >> $GITHUB_STEP_SUMMARY
-
- - name: Upload enhanced SBOM artifacts
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: enhanced-sbom-artifacts
- path: |
- sbom-output/
- .sbom/
-
- # Enhanced Static Application Security Testing (SAST)
- sast-enhanced:
- name: Enhanced SAST with Semgrep
- runs-on: ubuntu-latest
- if: ${{ github.event.inputs.compliance_suite == 'all' || github.event.inputs.compliance_suite == 'sast' || github.event_name == 'schedule' }}
- timeout-minutes: 30
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Install Semgrep
- run: |
- python3 -m pip install semgrep
-
- - name: Run Semgrep SAST scan
- run: |
- echo "## Static Application Security Testing (SAST)" >> $GITHUB_STEP_SUMMARY
-
- # Run comprehensive SAST with multiple rulesets
- semgrep --config=auto --json --output=semgrep-results.json .
- semgrep --config=auto --output=semgrep-results.txt .
-
- # Run specific security-focused rules
- semgrep --config=p/owasp-top-ten --json --output=owasp-results.json .
- semgrep --config=p/security-audit --json --output=security-audit-results.json .
- semgrep --config=p/secrets --json --output=secrets-scan-results.json .
-
- - name: Analyze SAST results
- run: |
- echo "### SAST Analysis Results" >> $GITHUB_STEP_SUMMARY
-
- # Count findings by severity
- if [ -f "semgrep-results.json" ]; then
- CRITICAL=$(jq '[.results[] | select(.extra.severity == "ERROR")] | length' semgrep-results.json)
- WARNING=$(jq '[.results[] | select(.extra.severity == "WARNING")] | length' semgrep-results.json)
- INFO=$(jq '[.results[] | select(.extra.severity == "INFO")] | length' semgrep-results.json)
-
- echo "- **Critical/Error**: $CRITICAL findings" >> $GITHUB_STEP_SUMMARY
- echo "- **Warning**: $WARNING findings" >> $GITHUB_STEP_SUMMARY
- echo "- **Info**: $INFO findings" >> $GITHUB_STEP_SUMMARY
-
- # Check OWASP Top 10 specific findings
- if [ -f "owasp-results.json" ]; then
- OWASP_FINDINGS=$(jq '.results | length' owasp-results.json)
- echo "- **OWASP Top 10**: $OWASP_FINDINGS findings" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Check secrets
- if [ -f "secrets-scan-results.json" ]; then
- SECRETS=$(jq '.results | length' secrets-scan-results.json)
- echo "- **Potential Secrets**: $SECRETS findings" >> $GITHUB_STEP_SUMMARY
-
- if [ "$SECRETS" -gt 0 ]; then
- echo "â **SECRETS DETECTED - IMMEDIATE REMEDIATION REQUIRED**" >> $GITHUB_STEP_SUMMARY
- exit 1
- fi
- fi
-
- # Fail on critical findings
- if [ "$CRITICAL" -gt 0 ]; then
- echo "â **CRITICAL SECURITY ISSUES FOUND**" >> $GITHUB_STEP_SUMMARY
- exit 1
- fi
- fi
-
- - name: Generate CWE mapping
- run: |
- echo "### Common Weakness Enumeration (CWE) Mapping" >> $GITHUB_STEP_SUMMARY
-
- # Extract CWE classifications
- cat > analyze-cwe.js << 'EOF'
- const fs = require('fs');
-
- try {
- const results = JSON.parse(fs.readFileSync('semgrep-results.json', 'utf8'));
- const cweMap = {};
-
- results.results.forEach(finding => {
- if (finding.extra && finding.extra.metadata) {
- const cwe = finding.extra.metadata.cwe || finding.extra.metadata.CWE;
- if (cwe) {
- const cweList = Array.isArray(cwe) ? cwe : [cwe];
- cweList.forEach(c => {
- const cweId = typeof c === 'string' ? c : c.id || c;
- cweMap[cweId] = (cweMap[cweId] || 0) + 1;
- });
- }
- }
- });
-
- console.log('đ CWE Classification Summary:');
- Object.entries(cweMap)
- .sort(([,a], [,b]) => b - a)
- .slice(0, 10)
- .forEach(([cwe, count]) => {
- console.log(`- ${cwe}: ${count} findings`);
- });
-
- } catch (error) {
- console.log('No CWE data available in results');
- }
- EOF
-
- node analyze-cwe.js
-
- - name: Upload SAST results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: sast-results
- path: |
- semgrep-results.*
- owasp-results.*
- security-audit-results.*
- secrets-scan-results.*
-
- # Dynamic Application Security Testing (DAST) with Authentication
- dast-authenticated:
- name: Authenticated DAST Testing
- runs-on: ubuntu-latest
- if: ${{ github.event.inputs.compliance_suite == 'all' || github.event.inputs.compliance_suite == 'dast' || github.event_name == 'schedule' }}
- timeout-minutes: 45
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Start application services
- run: |
- cp .env.example .env
- echo "JWT_SECRET=test-jwt-secret-for-dast" >> .env
- echo "NODE_ENV=test" >> .env
- docker compose up -d
- sleep 45
-
- - name: Wait for services
- run: |
- timeout 120 bash -c 'until curl -f http://localhost:3001/api/health; do sleep 2; done'
- timeout 120 bash -c 'until curl -f http://localhost:3000; do sleep 2; done'
-
- - name: Create test user for authenticated scanning
- run: |
- echo "## Dynamic Application Security Testing (DAST)" >> $GITHUB_STEP_SUMMARY
-
- # Create a test user for authenticated scanning
- curl -X POST http://localhost:3001/api/auth/register \
- -H "Content-Type: application/json" \
- -d '{
- "email": "dast@security.test",
- "username": "dastuser",
- "password": "SecureTest123!",
- "firstName": "DAST",
- "lastName": "Scanner"
- }' > dast-user-response.json
-
- # Login and get token
- curl -X POST http://localhost:3001/api/auth/login \
- -H "Content-Type: application/json" \
- -d '{
- "email": "dast@security.test",
- "password": "SecureTest123!"
- }' > dast-login-response.json
-
- # Extract token for authenticated scanning
- TOKEN=$(jq -r '.data.token' dast-login-response.json)
- echo "DAST_TOKEN=$TOKEN" >> $GITHUB_ENV
-
- - name: Install Nuclei for comprehensive security scanning
- run: |
- # Install Nuclei
- curl -L https://github.com/projectdiscovery/nuclei/releases/latest/download/nuclei_2.9.15_linux_amd64.zip -o nuclei.zip
- unzip nuclei.zip
- sudo mv nuclei /usr/local/bin/
-
- # Update nuclei templates
- nuclei -update-templates
-
- - name: Run Nuclei DAST scan
- run: |
- echo "### Nuclei Security Scan" >> $GITHUB_STEP_SUMMARY
-
- # Run comprehensive Nuclei scan
- nuclei -u http://localhost:3000,http://localhost:3001 \
- -severity critical,high,medium \
- -json -o nuclei-results.json \
- -stats \
- -timeout 10 \
- -retries 2 || true
-
- # Run authenticated API scan
- nuclei -u http://localhost:3001/api \
- -H "Authorization: Bearer $DAST_TOKEN" \
- -severity critical,high,medium \
- -json -o nuclei-auth-results.json \
- -stats \
- -timeout 10 \
- -retries 2 || true
-
- - name: Run OWASP ZAP authenticated scan
- run: |
- echo "### OWASP ZAP Authenticated Scan" >> $GITHUB_STEP_SUMMARY
-
- # Create ZAP authentication script
- cat > zap-auth-script.py << 'EOF'
- import json
- import requests
- import time
-
- # Login and get session
- login_data = {
- "email": "dast@security.test",
- "password": "SecureTest123!"
- }
-
- session = requests.Session()
- response = session.post("http://localhost:3001/api/auth/login", json=login_data)
-
- if response.status_code == 200:
- token = response.json()["data"]["token"]
- print(f"Authentication successful, token: {token[:20]}...")
-
- # Test authenticated endpoints
- headers = {"Authorization": f"Bearer {token}"}
-
- endpoints = [
- "/api/contacts",
- "/api/auth/profile",
- "/api/auth/logout"
- ]
-
- for endpoint in endpoints:
- try:
- resp = session.get(f"http://localhost:3001{endpoint}", headers=headers)
- print(f"Endpoint {endpoint}: {resp.status_code}")
- except Exception as e:
- print(f"Error testing {endpoint}: {e}")
- else:
- print("Authentication failed")
- EOF
-
- python3 zap-auth-script.py
-
- - name: Analyze DAST results
- run: |
- echo "### DAST Results Analysis" >> $GITHUB_STEP_SUMMARY
-
- # Analyze Nuclei results
- if [ -f "nuclei-results.json" ]; then
- CRITICAL_NUCLEI=$(jq '[.[] | select(.info.severity == "critical")] | length' nuclei-results.json)
- HIGH_NUCLEI=$(jq '[.[] | select(.info.severity == "high")] | length' nuclei-results.json)
- MEDIUM_NUCLEI=$(jq '[.[] | select(.info.severity == "medium")] | length' nuclei-results.json)
-
- echo "**Nuclei Scan Results:**" >> $GITHUB_STEP_SUMMARY
- echo "- Critical: $CRITICAL_NUCLEI findings" >> $GITHUB_STEP_SUMMARY
- echo "- High: $HIGH_NUCLEI findings" >> $GITHUB_STEP_SUMMARY
- echo "- Medium: $MEDIUM_NUCLEI findings" >> $GITHUB_STEP_SUMMARY
-
- if [ "$CRITICAL_NUCLEI" -gt 0 ]; then
- echo "â **CRITICAL VULNERABILITIES DETECTED**" >> $GITHUB_STEP_SUMMARY
- exit 1
- fi
- fi
-
- - name: Upload DAST results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: dast-results
- path: |
- nuclei-*.json
- zap-*.json
- dast-*.json
-
- - name: Stop services
- if: always()
- run: docker compose down -v
-
- # PII Detection and Data Privacy Testing
- pii-detection:
- name: PII Detection & Data Privacy
- runs-on: ubuntu-latest
- if: ${{ github.event.inputs.compliance_suite == 'all' || github.event.inputs.compliance_suite == 'pii' || github.event_name == 'schedule' }}
- timeout-minutes: 20
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Install PII detection tools
- run: |
- # Install detect-secrets for PII/secrets detection
- pip3 install detect-secrets
-
- # Install additional PII detection dependencies
- npm install -g @microsoft/detect-secrets
-
- - name: Scan for PII in codebase
- run: |
- echo "## PII Detection & Data Privacy Assessment" >> $GITHUB_STEP_SUMMARY
-
- # Create comprehensive PII detection script
- cat > pii-detector.js << 'EOF'
- const fs = require('fs');
- const path = require('path');
-
- // PII detection patterns
- const piiPatterns = {
- 'SSN': /\b\d{3}-?\d{2}-?\d{4}\b/g,
- 'Credit Card': /\b(?:\d{4}[-\s]?){3}\d{4}\b/g,
- 'Email': /\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b/g,
- 'Phone': /\b(?:\+?1[-.\s]?)?\(?([0-9]{3})\)?[-.\s]?([0-9]{3})[-.\s]?([0-9]{4})\b/g,
- 'IP Address': /\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b/g,
- 'API Key Pattern': /(?:api[_-]?key|token|secret)[_-]?[:=]\s*['"]?([a-zA-Z0-9]{20,})['"]?/gi,
- 'UUID': /\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b/gi
- };
-
- const sensitiveFilePatterns = {
- 'Database Connection': /(?:database|db)[_-]?(?:url|host|user|pass|password)/gi,
- 'AWS Credentials': /(?:aws[_-]?(?:access[_-]?key|secret)|AKIA[0-9A-Z]{16})/gi,
- 'Private Keys': /-----BEGIN (?:RSA |EC )?PRIVATE KEY-----/g,
- 'JWT Tokens': /eyJ[a-zA-Z0-9_-]*\.eyJ[a-zA-Z0-9_-]*\.[a-zA-Z0-9_-]*/g
- };
-
- function scanFile(filePath) {
- try {
- const content = fs.readFileSync(filePath, 'utf8');
- const findings = [];
-
- // Scan for PII patterns
- Object.entries(piiPatterns).forEach(([type, pattern]) => {
- const matches = content.match(pattern);
- if (matches && matches.length > 0) {
- findings.push({
- type,
- count: matches.length,
- file: filePath,
- severity: 'HIGH'
- });
- }
- });
-
- // Scan for sensitive file patterns
- Object.entries(sensitiveFilePatterns).forEach(([type, pattern]) => {
- const matches = content.match(pattern);
- if (matches && matches.length > 0) {
- findings.push({
- type,
- count: matches.length,
- file: filePath,
- severity: 'CRITICAL'
- });
- }
- });
-
- return findings;
- } catch (error) {
- return [];
- }
- }
-
- function scanDirectory(dir, excludes = ['node_modules', '.git', 'dist', 'build']) {
- const allFindings = [];
-
- function walk(currentPath) {
- const items = fs.readdirSync(currentPath);
-
- items.forEach(item => {
- const itemPath = path.join(currentPath, item);
- const stat = fs.statSync(itemPath);
-
- if (stat.isDirectory() && !excludes.includes(item)) {
- walk(itemPath);
- } else if (stat.isFile() && !item.startsWith('.') &&
- (item.endsWith('.js') || item.endsWith('.ts') ||
- item.endsWith('.jsx') || item.endsWith('.tsx') ||
- item.endsWith('.json') || item.endsWith('.env') ||
- item.endsWith('.yaml') || item.endsWith('.yml'))) {
- const findings = scanFile(itemPath);
- allFindings.push(...findings);
- }
- });
- }
-
- walk(dir);
- return allFindings;
- }
-
- console.log('đ Scanning for PII and sensitive data...');
-
- const findings = scanDirectory('.');
-
- console.log(`\nđ PII Detection Results: ${findings.length} potential issues found`);
-
- if (findings.length > 0) {
- const criticalFindings = findings.filter(f => f.severity === 'CRITICAL');
- const highFindings = findings.filter(f => f.severity === 'HIGH');
-
- console.log(`- Critical: ${criticalFindings.length}`);
- console.log(`- High: ${highFindings.length}`);
-
- findings.forEach(finding => {
- console.log(`${finding.severity === 'CRITICAL' ? 'đ¨' : 'â ī¸'} ${finding.type}: ${finding.count} occurrences in ${finding.file}`);
- });
-
- // Save detailed results
- fs.writeFileSync('pii-findings.json', JSON.stringify(findings, null, 2));
-
- if (criticalFindings.length > 0) {
- console.log('\nâ CRITICAL PII/SECRET EXPOSURES DETECTED');
- process.exit(1);
- }
- } else {
- console.log('â
No PII or sensitive data patterns detected');
- }
- EOF
-
- node pii-detector.js
-
- - name: Check data retention compliance
- run: |
- echo "### Data Retention Compliance" >> $GITHUB_STEP_SUMMARY
-
- # Check for data retention policies in code
- if grep -r "retention\|expire\|delete.*after\|purge" --include="*.js" --include="*.ts" .; then
- echo "â
**Data retention logic found in codebase**" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ **No explicit data retention logic found**" >> $GITHUB_STEP_SUMMARY
- echo "- Consider implementing automated data purging" >> $GITHUB_STEP_SUMMARY
- echo "- Add retention policies to comply with GDPR/CCPA" >> $GITHUB_STEP_SUMMARY
- fi
-
- - name: Check encryption at rest
- run: |
- echo "### Encryption at Rest Assessment" >> $GITHUB_STEP_SUMMARY
-
- # Look for encryption implementations
- if grep -r "encrypt\|cipher\|hash.*password" --include="*.js" --include="*.ts" backend/; then
- echo "â
**Encryption mechanisms found in backend**" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ **Limited encryption mechanisms detected**" >> $GITHUB_STEP_SUMMARY
- fi
-
- - name: Upload PII detection results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: pii-detection-results
- path: |
- pii-findings.json
- detect-secrets-*.json
-
- # Compliance Report Generation
- compliance-report:
- name: Federal Compliance Report
- runs-on: ubuntu-latest
- needs:
- [
- fips-validation,
- sbom-generation,
- sast-enhanced,
- dast-authenticated,
- pii-detection,
- ]
- if: always()
- timeout-minutes: 15
-
- steps:
- - name: Download all compliance artifacts
- uses: actions/download-artifact@v4
- with:
- path: compliance-results/
-
- - name: Generate comprehensive compliance report
- run: |
- echo "# Federal Compliance Assessment Report" > compliance-report.md
- echo "**Generated:** $(date -u +"%Y-%m-%d %H:%M:%S UTC")" >> compliance-report.md
- echo "" >> compliance-report.md
-
- echo "## Executive Summary" >> compliance-report.md
- echo "" >> compliance-report.md
-
- # Check results from each job
- echo "### Compliance Test Results" >> compliance-report.md
- echo "" >> compliance-report.md
-
- # FIPS Compliance
- if [ "${{ needs.fips-validation.result }}" == "success" ]; then
- echo "- â
**FIPS 140-2 Cryptography**: COMPLIANT" >> compliance-report.md
- else
- echo "- â **FIPS 140-2 Cryptography**: NON-COMPLIANT" >> compliance-report.md
- fi
-
- # SBOM Generation
- if [ "${{ needs.sbom-generation.result }}" == "success" ]; then
- echo "- â
**Software Bill of Materials**: COMPLIANT" >> compliance-report.md
- else
- echo "- â **Software Bill of Materials**: NON-COMPLIANT" >> compliance-report.md
- fi
-
- # SAST
- if [ "${{ needs.sast-enhanced.result }}" == "success" ]; then
- echo "- â
**Static Application Security Testing**: PASSED" >> compliance-report.md
- else
- echo "- â **Static Application Security Testing**: FAILED" >> compliance-report.md
- fi
-
- # DAST
- if [ "${{ needs.dast-authenticated.result }}" == "success" ]; then
- echo "- â
**Dynamic Application Security Testing**: PASSED" >> compliance-report.md
- else
- echo "- â **Dynamic Application Security Testing**: FAILED" >> compliance-report.md
- fi
-
- # PII Detection
- if [ "${{ needs.pii-detection.result }}" == "success" ]; then
- echo "- â
**PII Detection & Data Privacy**: COMPLIANT" >> compliance-report.md
- else
- echo "- â **PII Detection & Data Privacy**: NON-COMPLIANT" >> compliance-report.md
- fi
-
- echo "" >> compliance-report.md
- echo "## Compliance Framework Alignment" >> compliance-report.md
- echo "" >> compliance-report.md
- echo "### FedRAMP Controls Assessed" >> compliance-report.md
- echo "- **AC-2**: Account Management" >> compliance-report.md
- echo "- **AC-3**: Access Enforcement" >> compliance-report.md
- echo "- **AU-2**: Event Logging" >> compliance-report.md
- echo "- **CM-2**: Baseline Configuration" >> compliance-report.md
- echo "- **IA-5**: Authenticator Management" >> compliance-report.md
- echo "- **RA-5**: Vulnerability Scanning" >> compliance-report.md
- echo "- **SA-10**: Developer Configuration Management" >> compliance-report.md
- echo "- **SA-11**: Developer Security Testing" >> compliance-report.md
- echo "- **SC-7**: Boundary Protection" >> compliance-report.md
- echo "- **SC-13**: Cryptographic Protection" >> compliance-report.md
- echo "" >> compliance-report.md
-
- echo "### NIST 800-53 Controls" >> compliance-report.md
- echo "- **SI-2**: Flaw Remediation (Vulnerability Management)" >> compliance-report.md
- echo "- **SI-3**: Malicious Code Protection" >> compliance-report.md
- echo "- **SI-10**: Information Input Validation" >> compliance-report.md
- echo "- **SC-28**: Protection of Information at Rest" >> compliance-report.md
- echo "" >> compliance-report.md
-
- echo "## Recommendations" >> compliance-report.md
- echo "" >> compliance-report.md
- echo "1. **Immediate Actions Required:**" >> compliance-report.md
- echo " - Review and remediate any CRITICAL findings" >> compliance-report.md
- echo " - Implement missing security controls" >> compliance-report.md
- echo " - Update SBOM documentation" >> compliance-report.md
- echo "" >> compliance-report.md
- echo "2. **Ongoing Compliance:**" >> compliance-report.md
- echo " - Run daily compliance scans" >> compliance-report.md
- echo " - Maintain current SBOM" >> compliance-report.md
- echo " - Regular security training for developers" >> compliance-report.md
- echo "" >> compliance-report.md
-
- # Set overall compliance status
- FAILED_JOBS=0
- if [ "${{ needs.fips-validation.result }}" != "success" ]; then
- FAILED_JOBS=$((FAILED_JOBS + 1))
- fi
- if [ "${{ needs.sbom-generation.result }}" != "success" ]; then
- FAILED_JOBS=$((FAILED_JOBS + 1))
- fi
- if [ "${{ needs.sast-enhanced.result }}" != "success" ]; then
- FAILED_JOBS=$((FAILED_JOBS + 1))
- fi
- if [ "${{ needs.dast-authenticated.result }}" != "success" ]; then
- FAILED_JOBS=$((FAILED_JOBS + 1))
- fi
- if [ "${{ needs.pii-detection.result }}" != "success" ]; then
- FAILED_JOBS=$((FAILED_JOBS + 1))
- fi
-
- if [ $FAILED_JOBS -eq 0 ]; then
- echo "## â
OVERALL STATUS: COMPLIANT" >> compliance-report.md
- echo "All federal compliance requirements have been met." >> compliance-report.md
- else
- echo "## â OVERALL STATUS: NON-COMPLIANT" >> compliance-report.md
- echo "$FAILED_JOBS out of 5 compliance tests failed. Immediate remediation required." >> compliance-report.md
- fi
-
- - name: Upload compliance report
- uses: actions/upload-artifact@v4
- with:
- name: federal-compliance-report
- path: |
- compliance-report.md
- compliance-results/
diff --git a/.github/workflows/archived/nightly.yml b/.github/workflows/archived/nightly.yml
deleted file mode 100644
index fdc7d57..0000000
--- a/.github/workflows/archived/nightly.yml
+++ /dev/null
@@ -1,1313 +0,0 @@
-name: Nightly Extended Tests
-
-on:
- schedule:
- # Run every night at 2 AM UTC
- - cron: "0 2 * * *"
- workflow_dispatch:
- inputs:
- test_suite:
- description: "Test suite to run"
- required: true
- default: "all"
- type: choice
- options:
- - all
- - performance
- - security
- - chaos
- - data-integrity
- - backup-restore
- duration:
- description: "Test duration (for performance tests)"
- default: "30m"
- type: string
-
-env:
- NODE_VERSION: "18"
- EXTENDED_TEST_MODE: "true"
-
-jobs:
- # Extended performance testing with longer duration
- extended-performance:
- name: Extended Performance Testing
- runs-on: ubuntu-latest
- if: ${{ github.event.inputs.test_suite == 'all' || github.event.inputs.test_suite == 'performance' || github.event_name == 'schedule' }}
- timeout-minutes: 120
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup k6
- run: |
- wget https://github.com/grafana/k6/releases/download/v0.47.0/k6-v0.47.0-linux-amd64.tar.gz
- tar -xzf k6-v0.47.0-linux-amd64.tar.gz
- sudo cp k6-v0.47.0-linux-amd64/k6 /usr/local/bin/
-
- - name: Start services with production-like config
- run: |
- cp .env.example .env
- # Add production-like settings
- echo "NODE_ENV=production" >> .env
- echo "LOG_LEVEL=warn" >> .env
- docker compose up -d
- sleep 60
-
- - name: Wait for services to be ready
- run: |
- timeout 120 bash -c 'until curl -f http://localhost:3001/api/health; do sleep 5; done'
-
- - name: Create extended performance test data
- run: |
- # Seed database with substantial test data for realistic testing
- cat > seed-large-dataset.js << 'EOF'
- const http = require('http');
- const https = require('https');
-
- async function seedLargeDataset() {
- const baseUrl = 'http://localhost:3001';
-
- // Register admin user
- const adminData = {
- email: 'admin@nightlytest.com',
- username: 'nightlyadmin',
- password: 'NightlyTest123!',
- firstName: 'Nightly',
- lastName: 'Admin'
- };
-
- console.log('Creating admin user...');
-
- const registerResponse = await fetch(`${baseUrl}/api/auth/register`, {
- method: 'POST',
- headers: { 'Content-Type': 'application/json' },
- body: JSON.stringify(adminData)
- });
-
- if (registerResponse.ok) {
- const { data } = await registerResponse.json();
- const token = data.token;
-
- console.log('Creating test contacts...');
-
- // Create 1000+ contacts for realistic load testing
- const promises = [];
- for (let i = 0; i < 1000; i++) {
- const contact = {
- firstName: `TestContact${i}`,
- lastName: `Nightly${Math.floor(i / 100)}`,
- email: `nightly-${i}@test.example.com`,
- company: `TestCorp${Math.floor(i / 50)}`,
- jobTitle: `Position ${i % 20}`,
- phone: `+1${String(2000000000 + i).substring(0, 10)}`,
- notes: `Generated for nightly testing - batch ${Math.floor(i / 100)}`,
- tags: ['nightly-test', `batch-${Math.floor(i / 100)}`, `wave-${Math.floor(i / 200)}`]
- };
-
- promises.push(
- fetch(`${baseUrl}/api/contacts`, {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json',
- 'Authorization': `Bearer ${token}`
- },
- body: JSON.stringify(contact)
- })
- );
-
- // Batch requests to avoid overwhelming the server
- if (promises.length >= 50) {
- await Promise.all(promises);
- promises.length = 0;
- console.log(`Created ${i + 1} contacts...`);
- }
- }
-
- // Handle remaining promises
- if (promises.length > 0) {
- await Promise.all(promises);
- }
-
- console.log('Large dataset seeding completed!');
- } else {
- console.error('Failed to create admin user');
- }
- }
-
- seedLargeDataset().catch(console.error);
- EOF
-
- node seed-large-dataset.js
-
- - name: Run extended load test
- run: |
- DURATION="${{ github.event.inputs.duration || '30m' }}"
- echo "Running extended performance test for duration: $DURATION"
-
- # Create extended load test configuration
- cat > extended-load-test.js << 'EOF'
- import http from 'k6/http';
- import { check, sleep } from 'k6';
- import { Rate, Trend, Counter } from 'k6/metrics';
-
- // Extended test metrics
- const errorRate = new Rate('errors');
- const longRunningQueries = new Trend('long_running_queries');
- const memoryLeakIndicator = new Counter('potential_memory_leaks');
-
- export const options = {
- stages: [
- { duration: '5m', target: 50 }, // Warm up
- { duration: '10m', target: 100 }, // Normal load
- { duration: '10m', target: 200 }, // High load
- { duration: '5m', target: 500 }, // Spike test
- { duration: '15m', target: 200 }, // Sustained load
- { duration: '10m', target: 100 }, // Scale down
- { duration: '5m', target: 0 }, // Cool down
- ],
- thresholds: {
- http_req_duration: ['p(95)<1000'], // 95% under 1s for extended test
- errors: ['rate<0.1'], // 10% error rate acceptable for nightly
- long_running_queries: ['p(90)<2000'], // 90% of complex queries under 2s
- },
- };
-
- const BASE_URL = __ENV.BASE_URL || 'http://localhost:3001';
-
- export default function() {
- const operations = [
- searchLargeDataset,
- complexContactQueries,
- bulkContactOperations,
- paginationStressTest,
- concurrentUserSimulation
- ];
-
- const operation = operations[Math.floor(Math.random() * operations.length)];
- operation();
-
- sleep(Math.random() * 2 + 0.5); // Variable sleep 0.5-2.5s
- }
-
- function searchLargeDataset() {
- // Login first
- const loginResponse = http.post(`${BASE_URL}/api/auth/login`, JSON.stringify({
- email: 'admin@nightlytest.com',
- password: 'NightlyTest123!'
- }), {
- headers: { 'Content-Type': 'application/json' },
- });
-
- if (loginResponse.status !== 200) return;
-
- const token = JSON.parse(loginResponse.body).data.token;
- const headers = { 'Authorization': `Bearer ${token}` };
-
- // Complex search queries on large dataset
- const searchTerms = ['TestContact', 'Nightly', 'TestCorp', 'Position', 'batch'];
- const searchTerm = searchTerms[Math.floor(Math.random() * searchTerms.length)];
-
- const startTime = Date.now();
- const response = http.get(`${BASE_URL}/api/contacts?search=${searchTerm}&limit=100`, { headers });
- const duration = Date.now() - startTime;
-
- longRunningQueries.add(duration);
-
- check(response, {
- 'large dataset search successful': (r) => r.status === 200,
- 'search returns results': (r) => {
- try {
- const data = JSON.parse(r.body);
- return data.data && data.data.contacts && data.data.contacts.length > 0;
- } catch (e) {
- return false;
- }
- },
- });
-
- errorRate.add(response.status >= 400);
- }
-
- function complexContactQueries() {
- const loginResponse = http.post(`${BASE_URL}/api/auth/login`, JSON.stringify({
- email: 'admin@nightlytest.com',
- password: 'NightlyTest123!'
- }), {
- headers: { 'Content-Type': 'application/json' },
- });
-
- if (loginResponse.status !== 200) return;
-
- const token = JSON.parse(loginResponse.body).data.token;
- const headers = { 'Authorization': `Bearer ${token}` };
-
- // Multiple complex queries
- const queries = [
- 'company=TestCorp1&status=active&limit=50',
- 'tags=nightly-test&limit=75',
- 'search=batch&company=TestCorp&limit=25',
- 'sort=firstName&order=desc&limit=100'
- ];
-
- queries.forEach(query => {
- const response = http.get(`${BASE_URL}/api/contacts?${query}`, { headers });
- check(response, {
- 'complex query successful': (r) => r.status === 200,
- });
- errorRate.add(response.status >= 400);
- });
- }
-
- function bulkContactOperations() {
- const loginResponse = http.post(`${BASE_URL}/api/auth/login`, JSON.stringify({
- email: 'admin@nightlytest.com',
- password: 'NightlyTest123!'
- }), {
- headers: { 'Content-Type': 'application/json' },
- });
-
- if (loginResponse.status !== 200) return;
-
- const token = JSON.parse(loginResponse.body).data.token;
- const headers = {
- 'Authorization': `Bearer ${token}`,
- 'Content-Type': 'application/json'
- };
-
- // Create multiple contacts rapidly
- for (let i = 0; i < 5; i++) {
- const contact = {
- firstName: `NightlyBulk${Date.now()}${i}`,
- lastName: `Extended${i}`,
- email: `bulk-${Date.now()}-${i}@nightly.test`,
- company: `BulkTestCorp${i}`,
- notes: `Created during extended nightly testing - iteration ${i}`
- };
-
- const response = http.post(`${BASE_URL}/api/contacts`, JSON.stringify(contact), { headers });
- check(response, {
- 'bulk create successful': (r) => r.status === 201,
- });
- errorRate.add(response.status >= 400);
- }
- }
-
- function paginationStressTest() {
- const loginResponse = http.post(`${BASE_URL}/api/auth/login`, JSON.stringify({
- email: 'admin@nightlytest.com',
- password: 'NightlyTest123!'
- }), {
- headers: { 'Content-Type': 'application/json' },
- });
-
- if (loginResponse.status !== 200) return;
-
- const token = JSON.parse(loginResponse.body).data.token;
- const headers = { 'Authorization': `Bearer ${token}` };
-
- // Test various pagination scenarios
- const pageTests = [
- { page: 1, limit: 10 },
- { page: 5, limit: 20 },
- { page: 10, limit: 50 },
- { page: 20, limit: 25 },
- { page: 1, limit: 100 }
- ];
-
- pageTests.forEach(({ page, limit }) => {
- const response = http.get(`${BASE_URL}/api/contacts?page=${page}&limit=${limit}`, { headers });
- check(response, {
- 'pagination successful': (r) => r.status === 200,
- 'pagination has metadata': (r) => {
- try {
- const data = JSON.parse(r.body);
- return data.data && data.data.pagination;
- } catch (e) {
- return false;
- }
- },
- });
- errorRate.add(response.status >= 400);
- });
- }
-
- function concurrentUserSimulation() {
- // Simulate different user creating account and immediately using system
- const timestamp = Date.now();
- const userId = Math.random().toString(36).substring(7);
-
- const newUser = {
- email: `concurrent-${timestamp}-${userId}@nightly.test`,
- username: `concurrent${timestamp}${userId}`,
- password: 'ConcurrentTest123!',
- firstName: `Concurrent${userId}`,
- lastName: 'User'
- };
-
- // Register
- const registerResponse = http.post(`${BASE_URL}/api/auth/register`, JSON.stringify(newUser), {
- headers: { 'Content-Type': 'application/json' },
- });
-
- if (registerResponse.status === 201) {
- const token = JSON.parse(registerResponse.body).data.token;
- const headers = {
- 'Authorization': `Bearer ${token}`,
- 'Content-Type': 'application/json'
- };
-
- // Immediately start using system
- http.get(`${BASE_URL}/api/contacts`, { headers });
-
- const contact = {
- firstName: 'Quick',
- lastName: 'Contact',
- email: `quick-${timestamp}@example.com`
- };
-
- http.post(`${BASE_URL}/api/contacts`, JSON.stringify(contact), { headers });
- }
-
- errorRate.add(registerResponse.status >= 400);
- }
- EOF
-
- k6 run extended-load-test.js --env BASE_URL=http://localhost:3001
-
- - name: Run memory leak detection
- run: |
- echo "## Extended Performance Test Results" >> $GITHUB_STEP_SUMMARY
-
- # Monitor memory usage during the test
- docker stats --no-stream --format "table {{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}" > performance-stats.txt
-
- echo "### System Resource Usage" >> $GITHUB_STEP_SUMMARY
- cat performance-stats.txt >> $GITHUB_STEP_SUMMARY
-
- # Check for potential memory leaks
- BACKEND_MEMORY=$(docker stats connectkit-backend --no-stream --format "{{.MemUsage}}")
- echo "**Backend Memory Usage:** $BACKEND_MEMORY" >> $GITHUB_STEP_SUMMARY
-
- # Performance test completed
- echo "â
Extended performance testing completed" >> $GITHUB_STEP_SUMMARY
-
- - name: Upload extended performance results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: extended-performance-results
- path: |
- performance-stats.txt
- *.html
-
- - name: Stop services
- if: always()
- run: docker compose down -v
-
- # Chaos engineering tests
- chaos-testing:
- name: Chaos Engineering Tests
- runs-on: ubuntu-latest
- if: ${{ github.event.inputs.test_suite == 'all' || github.event.inputs.test_suite == 'chaos' || github.event_name == 'schedule' }}
- timeout-minutes: 60
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Start services
- run: |
- cp .env.example .env
- docker compose up -d
- sleep 30
-
- - name: Wait for services
- run: |
- timeout 60 bash -c 'until curl -f http://localhost:3001/api/health; do sleep 2; done'
-
- - name: Install chaos testing tools
- run: |
- # Install toxiproxy for network chaos
- wget https://github.com/Shopify/toxiproxy/releases/download/v2.5.0/toxiproxy-server-linux-amd64
- chmod +x toxiproxy-server-linux-amd64
- ./toxiproxy-server-linux-amd64 &
-
- wget https://github.com/Shopify/toxiproxy/releases/download/v2.5.0/toxiproxy-cli-linux-amd64
- chmod +x toxiproxy-cli-linux-amd64
- alias toxiproxy-cli='./toxiproxy-cli-linux-amd64'
-
- - name: Test database connection failures
- run: |
- echo "## Chaos Engineering Results" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
-
- # Test 1: Database connection interruption
- echo "### Database Connection Chaos Test" >> $GITHUB_STEP_SUMMARY
-
- # Stop database container
- docker stop connectkit-db
-
- # Test how backend handles database unavailability
- RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3001/api/health || echo "000")
-
- if [ "$RESPONSE" = "503" ] || [ "$RESPONSE" = "500" ]; then
- echo "â
Backend gracefully handles database disconnection (HTTP $RESPONSE)" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ Backend response to database failure: HTTP $RESPONSE" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Restart database
- docker start connectkit-db
- sleep 30
-
- # Test recovery
- timeout 60 bash -c 'until curl -f http://localhost:3001/api/health; do sleep 2; done'
- echo "â
Service recovered after database restart" >> $GITHUB_STEP_SUMMARY
-
- - name: Test Redis connection failures
- run: |
- echo "### Redis Connection Chaos Test" >> $GITHUB_STEP_SUMMARY
-
- # Stop Redis container
- docker stop connectkit-redis
-
- # Test how backend handles Redis unavailability
- RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3001/api/health || echo "000")
-
- if [ "$RESPONSE" = "200" ]; then
- echo "â
Backend continues functioning without Redis (HTTP $RESPONSE)" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ Backend affected by Redis failure: HTTP $RESPONSE" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Restart Redis
- docker start connectkit-redis
- sleep 15
-
- - name: Test high CPU load
- run: |
- echo "### CPU Stress Test" >> $GITHUB_STEP_SUMMARY
-
- # Install stress testing tools
- sudo apt-get update
- sudo apt-get install -y stress-ng
-
- # Apply CPU stress for 60 seconds
- stress-ng --cpu 2 --timeout 60s &
- STRESS_PID=$!
-
- sleep 10
-
- # Test API responsiveness under CPU stress
- RESPONSE_TIME=$(curl -o /dev/null -s -w "%{time_total}" http://localhost:3001/api/health)
-
- if (( $(echo "$RESPONSE_TIME < 2.0" | bc -l) )); then
- echo "â
API responsive under CPU stress (${RESPONSE_TIME}s response time)" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ API slow under CPU stress (${RESPONSE_TIME}s response time)" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Wait for stress test to complete
- wait $STRESS_PID
-
- - name: Test memory pressure
- run: |
- echo "### Memory Pressure Test" >> $GITHUB_STEP_SUMMARY
-
- # Apply memory pressure
- stress-ng --vm 1 --vm-bytes 1G --timeout 30s &
- STRESS_PID=$!
-
- sleep 10
-
- # Test API under memory pressure
- RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:3001/api/health)
-
- if [ "$RESPONSE" = "200" ]; then
- echo "â
API stable under memory pressure" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ API affected by memory pressure (HTTP $RESPONSE)" >> $GITHUB_STEP_SUMMARY
- fi
-
- wait $STRESS_PID
-
- - name: Test network latency simulation
- run: |
- echo "### Network Latency Simulation" >> $GITHUB_STEP_SUMMARY
-
- # Simulate network delays using tc (traffic control)
- sudo tc qdisc add dev lo root handle 1: prio
- sudo tc qdisc add dev lo parent 1:1 handle 10: netem delay 100ms
-
- # Test API with network delay
- START_TIME=$(date +%s.%N)
- curl -s http://localhost:3001/api/health > /dev/null
- END_TIME=$(date +%s.%N)
- RESPONSE_TIME=$(echo "$END_TIME - $START_TIME" | bc)
-
- if (( $(echo "$RESPONSE_TIME > 0.1" | bc -l) )); then
- echo "â
Network latency simulation working (${RESPONSE_TIME}s response)" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ Network latency simulation may not be working" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Remove network delay
- sudo tc qdisc del dev lo root
-
- - name: Test cascading failure recovery
- run: |
- echo "### Cascading Failure Recovery Test" >> $GITHUB_STEP_SUMMARY
-
- # Stop all dependencies simultaneously
- docker stop connectkit-db connectkit-redis
-
- # Wait for potential cascading effects
- sleep 20
-
- # Check if backend is still running (should be degraded but not crashed)
- if docker ps --format "table {{.Names}}" | grep -q connectkit-backend; then
- echo "â
Backend container survived dependency failures" >> $GITHUB_STEP_SUMMARY
- else
- echo "â Backend container crashed due to dependency failures" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Restart dependencies in wrong order (Redis first, then DB)
- docker start connectkit-redis
- sleep 10
- docker start connectkit-db
- sleep 30
-
- # Test full recovery
- timeout 60 bash -c 'until curl -f http://localhost:3001/api/health; do sleep 2; done'
- echo "â
System recovered from cascading failure" >> $GITHUB_STEP_SUMMARY
-
- - name: Stop services
- if: always()
- run: docker compose down -v
-
- # Data integrity testing
- data-integrity:
- name: Data Integrity Tests
- runs-on: ubuntu-latest
- if: ${{ github.event.inputs.test_suite == 'all' || github.event.inputs.test_suite == 'data-integrity' || github.event_name == 'schedule' }}
- services:
- postgres:
- image: postgres:15-alpine
- env:
- POSTGRES_USER: integrity_test
- POSTGRES_PASSWORD: integrity_test
- POSTGRES_DB: connectkit_integrity
- options: >-
- --health-cmd pg_isready
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- ports:
- - 5432:5432
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Install backend dependencies
- working-directory: ./backend
- run: npm ci
-
- - name: Run database migrations
- working-directory: ./backend
- env:
- NODE_ENV: test
- DB_HOST: localhost
- DB_PORT: 5432
- DB_USER: integrity_test
- DB_PASSWORD: integrity_test
- DB_NAME: connectkit_integrity
- run: npm run db:migrate
-
- - name: Create data integrity test suite
- working-directory: ./backend
- run: |
- cat > data-integrity-test.js << 'EOF'
- const { Pool } = require('pg');
- const crypto = require('crypto');
-
- const pool = new Pool({
- host: 'localhost',
- port: 5432,
- user: 'integrity_test',
- password: 'integrity_test',
- database: 'connectkit_integrity'
- });
-
- async function runDataIntegrityTests() {
- console.log('đ Starting data integrity tests...');
-
- try {
- // Test 1: Foreign key constraints
- console.log('\n1. Testing foreign key constraints...');
- await testForeignKeyConstraints();
-
- // Test 2: Data validation constraints
- console.log('\n2. Testing data validation constraints...');
- await testDataValidationConstraints();
-
- // Test 3: Concurrent transaction handling
- console.log('\n3. Testing concurrent transactions...');
- await testConcurrentTransactions();
-
- // Test 4: Data consistency under load
- console.log('\n4. Testing data consistency under load...');
- await testDataConsistencyUnderLoad();
-
- // Test 5: Audit trail integrity
- console.log('\n5. Testing audit trail integrity...');
- await testAuditTrailIntegrity();
-
- console.log('\nâ
All data integrity tests completed');
-
- } catch (error) {
- console.error('â Data integrity test failed:', error);
- process.exit(1);
- } finally {
- await pool.end();
- }
- }
-
- async function testForeignKeyConstraints() {
- const client = await pool.connect();
-
- try {
- // Try to insert contact with invalid user_id
- await client.query('BEGIN');
-
- try {
- await client.query(`
- INSERT INTO contacts (user_id, first_name, last_name, email)
- VALUES ('00000000-0000-0000-0000-000000000000', 'Test', 'User', 'test@example.com')
- `);
- throw new Error('Foreign key constraint should have been violated');
- } catch (error) {
- if (error.message.includes('foreign key constraint')) {
- console.log('â
Foreign key constraint working correctly');
- } else {
- throw error;
- }
- }
-
- await client.query('ROLLBACK');
-
- } finally {
- client.release();
- }
- }
-
- async function testDataValidationConstraints() {
- const client = await pool.connect();
-
- try {
- // Create a test user first
- await client.query('BEGIN');
-
- const userResult = await client.query(`
- INSERT INTO users (email, username, password_hash, first_name, last_name)
- VALUES ('test@example.com', 'testuser', 'hashedpassword', 'Test', 'User')
- RETURNING id
- `);
-
- const userId = userResult.rows[0].id;
-
- // Test email validation
- try {
- await client.query(`
- INSERT INTO contacts (user_id, first_name, last_name, email)
- VALUES ($1, 'Test', 'Contact', 'invalid-email')
- `, [userId]);
- throw new Error('Email validation should have failed');
- } catch (error) {
- if (error.message.includes('check constraint') || error.message.includes('email_format')) {
- console.log('â
Email validation constraint working');
- } else {
- throw error;
- }
- }
-
- await client.query('ROLLBACK');
-
- } finally {
- client.release();
- }
- }
-
- async function testConcurrentTransactions() {
- // Simulate concurrent updates to the same record
- const promises = [];
-
- // Create test data first
- const client = await pool.connect();
- await client.query('BEGIN');
-
- const userResult = await client.query(`
- INSERT INTO users (email, username, password_hash, first_name, last_name)
- VALUES ('concurrent@example.com', 'concurrent', 'password', 'Concurrent', 'User')
- RETURNING id
- `);
- const userId = userResult.rows[0].id;
-
- const contactResult = await client.query(`
- INSERT INTO contacts (user_id, first_name, last_name, email)
- VALUES ($1, 'Concurrent', 'Contact', 'concurrent.contact@example.com')
- RETURNING id
- `, [userId]);
- const contactId = contactResult.rows[0].id;
-
- await client.query('COMMIT');
- client.release();
-
- // Now test concurrent updates
- for (let i = 0; i < 10; i++) {
- promises.push(updateContactConcurrently(contactId, i));
- }
-
- const results = await Promise.allSettled(promises);
- const successful = results.filter(r => r.status === 'fulfilled').length;
- const failed = results.filter(r => r.status === 'rejected').length;
-
- console.log(`â
Concurrent transactions: ${successful} successful, ${failed} failed (expected some failures)`);
- }
-
- async function updateContactConcurrently(contactId, iteration) {
- const client = await pool.connect();
-
- try {
- await client.query('BEGIN');
-
- // Simulate processing time
- await new Promise(resolve => setTimeout(resolve, Math.random() * 100));
-
- await client.query(`
- UPDATE contacts
- SET notes = 'Updated by iteration ' || $1 || ' at ' || NOW()
- WHERE id = $2
- `, [iteration, contactId]);
-
- await client.query('COMMIT');
- return iteration;
-
- } catch (error) {
- await client.query('ROLLBACK');
- throw error;
- } finally {
- client.release();
- }
- }
-
- async function testDataConsistencyUnderLoad() {
- const promises = [];
- const startTime = Date.now();
-
- // Create multiple users and contacts simultaneously
- for (let i = 0; i < 50; i++) {
- promises.push(createUserWithContacts(i));
- }
-
- const results = await Promise.allSettled(promises);
- const successful = results.filter(r => r.status === 'fulfilled').length;
-
- console.log(`â
Created ${successful}/50 users with contacts under load`);
-
- // Verify data consistency
- const client = await pool.connect();
- const userCount = await client.query('SELECT COUNT(*) FROM users');
- const contactCount = await client.query('SELECT COUNT(*) FROM contacts');
-
- console.log(`Final counts: ${userCount.rows[0].count} users, ${contactCount.rows[0].count} contacts`);
- client.release();
- }
-
- async function createUserWithContacts(index) {
- const client = await pool.connect();
-
- try {
- await client.query('BEGIN');
-
- const userResult = await client.query(`
- INSERT INTO users (email, username, password_hash, first_name, last_name)
- VALUES ($1, $2, 'password', $3, $4)
- RETURNING id
- `, [
- `load${index}@example.com`,
- `loaduser${index}`,
- `Load${index}`,
- 'User'
- ]);
-
- const userId = userResult.rows[0].id;
-
- // Create 3 contacts for each user
- for (let j = 0; j < 3; j++) {
- await client.query(`
- INSERT INTO contacts (user_id, first_name, last_name, email)
- VALUES ($1, $2, $3, $4)
- `, [
- userId,
- `Contact${j}`,
- `ForUser${index}`,
- `contact${index}-${j}@example.com`
- ]);
- }
-
- await client.query('COMMIT');
- return { userId, contactsCreated: 3 };
-
- } catch (error) {
- await client.query('ROLLBACK');
- throw error;
- } finally {
- client.release();
- }
- }
-
- async function testAuditTrailIntegrity() {
- const client = await pool.connect();
-
- try {
- // Check if audit triggers are working
- await client.query('BEGIN');
-
- const userResult = await client.query(`
- INSERT INTO users (email, username, password_hash, first_name, last_name)
- VALUES ('audit@example.com', 'audituser', 'password', 'Audit', 'User')
- RETURNING id
- `);
- const userId = userResult.rows[0].id;
-
- // Insert should trigger audit log
- const auditResult = await client.query(`
- SELECT COUNT(*) FROM audit.audit_logs
- WHERE entity_type = 'users' AND entity_id = $1 AND action = 'create'
- `, [userId]);
-
- if (parseInt(auditResult.rows[0].count) > 0) {
- console.log('â
Audit trail working for INSERT operations');
- } else {
- console.log('â ī¸ Audit trail not working for INSERT operations');
- }
-
- // Test UPDATE audit
- await client.query(`
- UPDATE users SET first_name = 'Updated' WHERE id = $1
- `, [userId]);
-
- const updateAuditResult = await client.query(`
- SELECT COUNT(*) FROM audit.audit_logs
- WHERE entity_type = 'users' AND entity_id = $1 AND action = 'update'
- `, [userId]);
-
- if (parseInt(updateAuditResult.rows[0].count) > 0) {
- console.log('â
Audit trail working for UPDATE operations');
- } else {
- console.log('â ī¸ Audit trail not working for UPDATE operations');
- }
-
- await client.query('COMMIT');
-
- } finally {
- client.release();
- }
- }
-
- runDataIntegrityTests();
- EOF
-
- node data-integrity-test.js
-
- - name: Generate data integrity report
- run: |
- echo "## Data Integrity Test Results" >> $GITHUB_STEP_SUMMARY
- echo "â
All data integrity tests completed successfully" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
- echo "### Tests Performed" >> $GITHUB_STEP_SUMMARY
- echo "- Foreign key constraint validation" >> $GITHUB_STEP_SUMMARY
- echo "- Data validation constraints (email format, etc.)" >> $GITHUB_STEP_SUMMARY
- echo "- Concurrent transaction handling" >> $GITHUB_STEP_SUMMARY
- echo "- Data consistency under load" >> $GITHUB_STEP_SUMMARY
- echo "- Audit trail integrity verification" >> $GITHUB_STEP_SUMMARY
-
- # Backup and restore testing
- backup-restore:
- name: Backup & Restore Tests
- runs-on: ubuntu-latest
- if: ${{ github.event.inputs.test_suite == 'all' || github.event.inputs.test_suite == 'backup-restore' || github.event_name == 'schedule' }}
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Start services
- run: |
- cp .env.example .env
- docker compose up -d
- sleep 30
-
- - name: Wait for services and seed data
- run: |
- timeout 60 bash -c 'until curl -f http://localhost:3001/api/health; do sleep 2; done'
-
- # Create test data for backup
- cat > create-backup-data.js << 'EOF'
- const http = require('http');
-
- async function createBackupData() {
- const baseUrl = 'http://localhost:3001';
-
- // Create test user
- const registerResponse = await fetch(`${baseUrl}/api/auth/register`, {
- method: 'POST',
- headers: { 'Content-Type': 'application/json' },
- body: JSON.stringify({
- email: 'backup@test.com',
- username: 'backupuser',
- password: 'BackupTest123!',
- firstName: 'Backup',
- lastName: 'User'
- })
- });
-
- if (registerResponse.ok) {
- const { data } = await registerResponse.json();
- const token = data.token;
-
- // Create test contacts
- for (let i = 0; i < 10; i++) {
- await fetch(`${baseUrl}/api/contacts`, {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json',
- 'Authorization': `Bearer ${token}`
- },
- body: JSON.stringify({
- firstName: `BackupContact${i}`,
- lastName: 'Test',
- email: `backup${i}@example.com`,
- company: 'BackupTestCorp',
- notes: `Test data for backup validation - contact ${i}`
- })
- });
- }
-
- console.log('â
Backup test data created');
- }
- }
-
- createBackupData().catch(console.error);
- EOF
-
- node create-backup-data.js
-
- - name: Create database backup
- run: |
- echo "## Backup & Restore Test Results" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
-
- # Create database dump
- docker exec connectkit-db pg_dump -U admin connectkit > backup-test.sql
-
- if [ -f "backup-test.sql" ] && [ -s "backup-test.sql" ]; then
- BACKUP_SIZE=$(wc -l < backup-test.sql)
- echo "â
**Database backup created**: ${BACKUP_SIZE} lines" >> $GITHUB_STEP_SUMMARY
- else
- echo "â **Database backup failed**" >> $GITHUB_STEP_SUMMARY
- exit 1
- fi
-
- - name: Test backup restoration
- run: |
- # Stop services
- docker compose down -v
-
- # Start fresh database
- docker compose up -d db
- sleep 30
-
- # Restore from backup
- docker exec -i connectkit-db psql -U admin connectkit < backup-test.sql
-
- # Start other services
- docker compose up -d
- sleep 30
-
- # Verify restoration
- timeout 60 bash -c 'until curl -f http://localhost:3001/api/health; do sleep 2; done'
-
- - name: Verify backup data integrity
- run: |
- # Login and check if data was restored correctly
- cat > verify-restore.js << 'EOF'
- async function verifyRestore() {
- const baseUrl = 'http://localhost:3001';
-
- try {
- // Login with restored user
- const loginResponse = await fetch(`${baseUrl}/api/auth/login`, {
- method: 'POST',
- headers: { 'Content-Type': 'application/json' },
- body: JSON.stringify({
- email: 'backup@test.com',
- password: 'BackupTest123!'
- })
- });
-
- if (loginResponse.ok) {
- const { data } = await loginResponse.json();
- const token = data.token;
-
- // Check contacts
- const contactsResponse = await fetch(`${baseUrl}/api/contacts`, {
- headers: { 'Authorization': `Bearer ${token}` }
- });
-
- if (contactsResponse.ok) {
- const contactsData = await contactsResponse.json();
- const contactCount = contactsData.data.contacts.length;
-
- console.log(`â
Restored ${contactCount} contacts successfully`);
-
- // Verify specific contact
- const testContact = contactsData.data.contacts.find(c =>
- c.firstName === 'BackupContact0' && c.company === 'BackupTestCorp'
- );
-
- if (testContact) {
- console.log('â
Specific test contact data verified');
- } else {
- console.log('â ī¸ Test contact data may be incomplete');
- }
- } else {
- console.log('â Failed to retrieve contacts after restore');
- }
- } else {
- console.log('â Failed to login with restored user data');
- }
- } catch (error) {
- console.error('Error verifying restore:', error.message);
- }
- }
-
- verifyRestore();
- EOF
-
- node verify-restore.js
-
- echo "â
**Backup restoration verified**" >> $GITHUB_STEP_SUMMARY
- echo "- User data successfully restored" >> $GITHUB_STEP_SUMMARY
- echo "- Contact data integrity confirmed" >> $GITHUB_STEP_SUMMARY
- echo "- Authentication system working post-restore" >> $GITHUB_STEP_SUMMARY
-
- - name: Test incremental backup scenario
- run: |
- echo "### Incremental Backup Test" >> $GITHUB_STEP_SUMMARY
-
- # Create additional data after restore
- cat > create-incremental-data.js << 'EOF'
- async function createIncrementalData() {
- const baseUrl = 'http://localhost:3001';
-
- const loginResponse = await fetch(`${baseUrl}/api/auth/login`, {
- method: 'POST',
- headers: { 'Content-Type': 'application/json' },
- body: JSON.stringify({
- email: 'backup@test.com',
- password: 'BackupTest123!'
- })
- });
-
- if (loginResponse.ok) {
- const { data } = await loginResponse.json();
- const token = data.token;
-
- // Create incremental data
- for (let i = 10; i < 15; i++) {
- await fetch(`${baseUrl}/api/contacts`, {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json',
- 'Authorization': `Bearer ${token}`
- },
- body: JSON.stringify({
- firstName: `IncrementalContact${i}`,
- lastName: 'Test',
- email: `incremental${i}@example.com`,
- company: 'IncrementalCorp',
- notes: `Incremental data created after restore - contact ${i}`
- })
- });
- }
-
- console.log('â
Incremental data created');
- }
- }
-
- createIncrementalData().catch(console.error);
- EOF
-
- node create-incremental-data.js
-
- # Create incremental backup
- docker exec connectkit-db pg_dump -U admin connectkit > backup-incremental.sql
-
- INCREMENTAL_SIZE=$(wc -l < backup-incremental.sql)
- ORIGINAL_SIZE=$(wc -l < backup-test.sql)
-
- if [ "$INCREMENTAL_SIZE" -gt "$ORIGINAL_SIZE" ]; then
- echo "â
**Incremental backup successful**: Backup grew from ${ORIGINAL_SIZE} to ${INCREMENTAL_SIZE} lines" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ **Incremental backup**: Size didn't increase as expected" >> $GITHUB_STEP_SUMMARY
- fi
-
- - name: Upload backup files
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: backup-test-files
- path: |
- backup-test.sql
- backup-incremental.sql
-
- - name: Stop services
- if: always()
- run: docker compose down -v
-
- # Nightly report consolidation
- nightly-report:
- name: Nightly Test Report
- runs-on: ubuntu-latest
- needs: [extended-performance, chaos-testing, data-integrity, backup-restore]
- if: always()
-
- steps:
- - name: Download all nightly test artifacts
- uses: actions/download-artifact@v4
- with:
- path: nightly-results/
-
- - name: Generate comprehensive nightly report
- run: |
- echo "# đ Nightly Extended Testing Report" > nightly-report.md
- echo "" >> nightly-report.md
- echo "**Test Date:** $(date -u)" >> nightly-report.md
- echo "**Test Duration:** Extended (~2 hours)" >> nightly-report.md
- echo "" >> nightly-report.md
-
- echo "## đ Test Suite Results" >> nightly-report.md
- echo "" >> nightly-report.md
-
- # Check job results
- if [ "${{ needs.extended-performance.result }}" == "success" ]; then
- echo "â
**Extended Performance Testing**: All thresholds met" >> nightly-report.md
- elif [ "${{ needs.extended-performance.result }}" == "skipped" ]; then
- echo "âī¸ **Extended Performance Testing**: Skipped" >> nightly-report.md
- else
- echo "â **Extended Performance Testing**: Failed or timeout" >> nightly-report.md
- fi
-
- if [ "${{ needs.chaos-testing.result }}" == "success" ]; then
- echo "â
**Chaos Engineering**: System resilient to failures" >> nightly-report.md
- elif [ "${{ needs.chaos-testing.result }}" == "skipped" ]; then
- echo "âī¸ **Chaos Engineering**: Skipped" >> nightly-report.md
- else
- echo "â **Chaos Engineering**: System instability detected" >> nightly-report.md
- fi
-
- if [ "${{ needs.data-integrity.result }}" == "success" ]; then
- echo "â
**Data Integrity**: All constraints and consistency checks passed" >> nightly-report.md
- elif [ "${{ needs.data-integrity.result }}" == "skipped" ]; then
- echo "âī¸ **Data Integrity**: Skipped" >> nightly-report.md
- else
- echo "â **Data Integrity**: Data consistency issues found" >> nightly-report.md
- fi
-
- if [ "${{ needs.backup-restore.result }}" == "success" ]; then
- echo "â
**Backup & Restore**: Backup and recovery procedures working" >> nightly-report.md
- elif [ "${{ needs.backup-restore.result }}" == "skipped" ]; then
- echo "âī¸ **Backup & Restore**: Skipped" >> nightly-report.md
- else
- echo "â **Backup & Restore**: Backup or restore process failed" >> nightly-report.md
- fi
-
- echo "" >> nightly-report.md
- echo "## đ¯ System Health Assessment" >> nightly-report.md
- echo "" >> nightly-report.md
-
- # Overall assessment
- SUCCESS_COUNT=$(echo "${{ needs.extended-performance.result == 'success' }} ${{ needs.chaos-testing.result == 'success' }} ${{ needs.data-integrity.result == 'success' }} ${{ needs.backup-restore.result == 'success' }}" | tr ' ' '\n' | grep -c "true")
- TOTAL_TESTS=4
-
- if [ $SUCCESS_COUNT -eq $TOTAL_TESTS ]; then
- echo "đĸ **Overall System Health: EXCELLENT** ($SUCCESS_COUNT/$TOTAL_TESTS tests passed)" >> nightly-report.md
- echo "The system demonstrated excellent stability, performance, and resilience during extended testing." >> nightly-report.md
- elif [ $SUCCESS_COUNT -ge 3 ]; then
- echo "đĄ **Overall System Health: GOOD** ($SUCCESS_COUNT/$TOTAL_TESTS tests passed)" >> nightly-report.md
- echo "The system is performing well with minor issues in some areas." >> nightly-report.md
- elif [ $SUCCESS_COUNT -ge 2 ]; then
- echo "đ **Overall System Health: FAIR** ($SUCCESS_COUNT/$TOTAL_TESTS tests passed)" >> nightly-report.md
- echo "The system has some stability or performance concerns that should be addressed." >> nightly-report.md
- else
- echo "đ´ **Overall System Health: POOR** ($SUCCESS_COUNT/$TOTAL_TESTS tests passed)" >> nightly-report.md
- echo "The system has significant issues that require immediate attention." >> nightly-report.md
- fi
-
- echo "" >> nightly-report.md
- echo "## đ Performance Insights" >> nightly-report.md
- echo "" >> nightly-report.md
- echo "- Extended load testing with realistic user patterns" >> nightly-report.md
- echo "- Memory leak detection and resource monitoring" >> nightly-report.md
- echo "- Database performance under sustained load" >> nightly-report.md
- echo "- API response time degradation analysis" >> nightly-report.md
-
- echo "" >> nightly-report.md
- echo "## đĄī¸ Resilience Findings" >> nightly-report.md
- echo "" >> nightly-report.md
- echo "- Chaos engineering simulated real-world failures" >> nightly-report.md
- echo "- Database and Redis connection failure recovery" >> nightly-report.md
- echo "- System behavior under resource pressure" >> nightly-report.md
- echo "- Cascading failure prevention mechanisms" >> nightly-report.md
-
- echo "" >> nightly-report.md
- echo "## đ Recommendations" >> nightly-report.md
- echo "" >> nightly-report.md
-
- if [ $SUCCESS_COUNT -lt $TOTAL_TESTS ]; then
- echo "### Immediate Actions Needed" >> nightly-report.md
- echo "- Investigate failed test results in detail" >> nightly-report.md
- echo "- Review system logs for error patterns" >> nightly-report.md
- echo "- Consider scaling infrastructure if performance issues detected" >> nightly-report.md
- echo "" >> nightly-report.md
- fi
-
- echo "### Ongoing Maintenance" >> nightly-report.md
- echo "- Monitor trends in nightly test results" >> nightly-report.md
- echo "- Regular backup procedure validation" >> nightly-report.md
- echo "- Performance baseline updates as system evolves" >> nightly-report.md
- echo "- Chaos engineering scenarios expansion" >> nightly-report.md
-
- cat nightly-report.md >> $GITHUB_STEP_SUMMARY
-
- - name: Upload consolidated nightly report
- uses: actions/upload-artifact@v4
- with:
- name: nightly-comprehensive-report
- path: |
- nightly-report.md
- nightly-results/
-
- - name: Send notification on failures
- if: ${{ (needs.extended-performance.result == 'failure') || (needs.chaos-testing.result == 'failure') || (needs.data-integrity.result == 'failure') || (needs.backup-restore.result == 'failure') }}
- run: |
- echo "đ¨ NIGHTLY TEST FAILURES DETECTED"
- echo "One or more nightly tests have failed. Please check the detailed results."
- echo "Failed tests should be investigated and resolved promptly."
-
- # In a real environment, you might send Slack/email notifications here
- # curl -X POST -H 'Content-type: application/json' --data '{"text":"Nightly tests failed"}' $SLACK_WEBHOOK_URL
diff --git a/.github/workflows/archived/performance.yml b/.github/workflows/archived/performance.yml
deleted file mode 100644
index dd74a89..0000000
--- a/.github/workflows/archived/performance.yml
+++ /dev/null
@@ -1,368 +0,0 @@
-name: Performance Testing (Disabled)
-
-# Temporarily disabled - using minimal CI first
-on:
- workflow_dispatch:
- inputs:
- test_duration:
- description: "Load test duration (default: 5m)"
- default: "5m"
- type: string
- max_users:
- description: "Maximum concurrent users (default: 100)"
- default: "100"
- type: string
- # pull_request:
- # branches: [main, develop]
- # push:
- # branches: [main]
- schedule:
- # Run performance tests daily at 2 AM UTC
- - cron: "0 2 * * *"
-
-env:
- NODE_VERSION: "18"
- K6_VERSION: "0.47.0"
-
-jobs:
- # Frontend Performance Testing with Lighthouse CI
- lighthouse-ci:
- name: Lighthouse CI
- runs-on: ubuntu-latest
- if: github.event_name != 'schedule' # Skip on scheduled runs
- steps:
- - uses: actions/checkout@v4
- with:
- fetch-depth: 1
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Install dependencies
- run: |
- npm install
- npm install --workspace=frontend
-
- - name: Build frontend
- working-directory: ./frontend
- run: npm run build
-
- - name: Start services with Docker Compose
- run: |
- cp .env.example .env
- echo "VITE_API_URL=http://localhost:3001/api" >> .env
- docker compose up -d
- sleep 30
-
- - name: Wait for services to be ready
- run: |
- timeout 60 bash -c 'until curl -f http://localhost:3001/api/health; do sleep 2; done'
- timeout 60 bash -c 'until curl -f http://localhost:3000; do sleep 2; done'
-
- - name: Install Lighthouse CI
- run: npm install -g @lhci/cli@0.12.x
-
- - name: Run Lighthouse CI
- run: |
- lhci collect \
- --url=http://localhost:3000 \
- --url=http://localhost:3000/login \
- --url=http://localhost:3000/register \
- --numberOfRuns=3
- env:
- LHCI_GITHUB_APP_TOKEN: ${{ secrets.LHCI_GITHUB_APP_TOKEN }}
-
- - name: Upload Lighthouse results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: lighthouse-results
- path: .lighthouseci/
-
- - name: Performance Budget Check
- run: |
- lhci assert \
- --preset=lighthouse:recommended \
- --assertions.categories:performance=0.9 \
- --assertions.categories:accessibility=0.9 \
- --assertions.categories:best-practices=0.9 \
- --assertions.categories:seo=0.9
-
- - name: Stop services
- if: always()
- run: docker compose down -v
-
- # Backend Load Testing with k6
- k6-load-test:
- name: k6 Load Testing
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup k6
- run: |
- wget https://github.com/grafana/k6/releases/download/v${{ env.K6_VERSION }}/k6-v${{ env.K6_VERSION }}-linux-amd64.tar.gz
- tar -xzf k6-v${{ env.K6_VERSION }}-linux-amd64.tar.gz
- sudo cp k6-v${{ env.K6_VERSION }}-linux-amd64/k6 /usr/local/bin/
-
- - name: Start services with Docker Compose
- run: |
- cp .env.example .env
- docker compose up -d
- sleep 30
-
- - name: Wait for services to be ready
- run: |
- timeout 60 bash -c 'until curl -f http://localhost:3001/api/health; do sleep 2; done'
-
- - name: Run k6 smoke test
- run: k6 run tests/performance/k6/smoke-test.js --env BASE_URL=http://localhost:3001
-
- - name: Run k6 load test
- run: |
- DURATION=${{ github.event.inputs.test_duration || '5m' }}
- MAX_USERS=${{ github.event.inputs.max_users || '100' }}
- k6 run tests/performance/k6/load-test.js \
- --env BASE_URL=http://localhost:3001 \
- --env DURATION=$DURATION \
- --env MAX_USERS=$MAX_USERS \
- --out json=load-test-results.json
-
- - name: Run k6 stress test (main branch only)
- if: github.ref == 'refs/heads/main'
- run: k6 run tests/performance/k6/stress-test.js --env BASE_URL=http://localhost:3001
-
- - name: Parse k6 results
- if: always()
- run: |
- if [ -f "load-test-results.json" ]; then
- echo "## Load Test Results" >> $GITHUB_STEP_SUMMARY
- echo "| Metric | Value |" >> $GITHUB_STEP_SUMMARY
- echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY
-
- # Extract key metrics from k6 JSON output
- AVG_RESPONSE_TIME=$(jq -r '.metrics.http_req_duration.values.avg' load-test-results.json)
- P95_RESPONSE_TIME=$(jq -r '.metrics.http_req_duration.values["p(95)"]' load-test-results.json)
- REQUEST_RATE=$(jq -r '.metrics.http_reqs.values.rate' load-test-results.json)
- ERROR_RATE=$(jq -r '.metrics.http_req_failed.values.rate' load-test-results.json)
-
- echo "| Average Response Time | ${AVG_RESPONSE_TIME}ms |" >> $GITHUB_STEP_SUMMARY
- echo "| 95th Percentile | ${P95_RESPONSE_TIME}ms |" >> $GITHUB_STEP_SUMMARY
- echo "| Requests/sec | ${REQUEST_RATE} |" >> $GITHUB_STEP_SUMMARY
- echo "| Error Rate | ${ERROR_RATE}% |" >> $GITHUB_STEP_SUMMARY
-
- # Fail if performance thresholds are not met
- if (( $(echo "$P95_RESPONSE_TIME > 500" | bc -l) )); then
- echo "â Performance threshold exceeded: P95 response time is ${P95_RESPONSE_TIME}ms (threshold: 500ms)"
- exit 1
- fi
-
- if (( $(echo "$ERROR_RATE > 1" | bc -l) )); then
- echo "â Error rate too high: ${ERROR_RATE}% (threshold: 1%)"
- exit 1
- fi
-
- echo "â
Performance thresholds passed"
- fi
-
- - name: Upload k6 results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: k6-results
- path: |
- load-test-results.json
- k6-*.html
-
- - name: Stop services
- if: always()
- run: docker compose down -v
-
- # Database Performance Testing
- database-performance:
- name: Database Performance
- runs-on: ubuntu-latest
- services:
- postgres:
- image: postgres:15-alpine
- env:
- POSTGRES_USER: test
- POSTGRES_PASSWORD: test
- POSTGRES_DB: connectkit_perf_test
- options: >-
- --health-cmd pg_isready
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- ports:
- - 5432:5432
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Install dependencies
- run: |
- npm install
- npm install --workspace=backend
-
- - name: Run database migrations
- working-directory: ./backend
- env:
- NODE_ENV: test
- DB_HOST: localhost
- DB_PORT: 5432
- DB_USER: test
- DB_PASSWORD: test
- DB_NAME: connectkit_perf_test
- run: npm run db:migrate
-
- - name: Seed performance test data
- working-directory: ./backend
- env:
- NODE_ENV: test
- DB_HOST: localhost
- DB_PORT: 5432
- DB_USER: test
- DB_PASSWORD: test
- DB_NAME: connectkit_perf_test
- run: npm run db:seed:performance
-
- - name: Run database performance tests
- working-directory: ./backend
- env:
- NODE_ENV: test
- DB_HOST: localhost
- DB_PORT: 5432
- DB_USER: test
- DB_PASSWORD: test
- DB_NAME: connectkit_perf_test
- run: npm run test:performance:db
-
- - name: Analyze query performance
- run: |
- echo "## Database Performance Results" >> $GITHUB_STEP_SUMMARY
- echo "Query performance analysis completed" >> $GITHUB_STEP_SUMMARY
- # Add specific metrics parsing here
-
- # Bundle Size Analysis
- bundle-analysis:
- name: Bundle Size Analysis
- runs-on: ubuntu-latest
- if: github.event_name == 'pull_request'
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Install dependencies
- run: |
- npm install
- npm install --workspace=frontend
-
- - name: Build frontend
- working-directory: ./frontend
- run: npm run build
-
- - name: Analyze bundle size
- uses: preactjs/compressed-size-action@v2
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
- build-script: |
- cd frontend
- npm run build
- pattern: "frontend/dist/**/*.{js,css}"
- exclude: "{**/*.map,**/node_modules/**}"
- strip-hash: '\\.[a-f\\d]{8}\\.'
-
- - name: Check bundle size limits
- working-directory: ./frontend
- run: |
- BUNDLE_SIZE=$(du -sk dist/ | cut -f1)
- THRESHOLD_KB=512
-
- echo "Bundle size: ${BUNDLE_SIZE}KB (threshold: ${THRESHOLD_KB}KB)"
-
- if [ $BUNDLE_SIZE -gt $THRESHOLD_KB ]; then
- echo "â Bundle size exceeds threshold: ${BUNDLE_SIZE}KB > ${THRESHOLD_KB}KB"
- exit 1
- else
- echo "â
Bundle size within threshold: ${BUNDLE_SIZE}KB <= ${THRESHOLD_KB}KB"
- fi
-
- # Memory and CPU Profiling
- profiling:
- name: Performance Profiling
- runs-on: ubuntu-latest
- if: github.ref == 'refs/heads/main' && github.event_name != 'schedule'
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Start services with profiling
- run: |
- cp .env.example .env
- echo "NODE_OPTIONS=--inspect=0.0.0.0:9229 --max-old-space-size=512" >> .env
- docker compose up -d
- sleep 30
-
- - name: Run memory profiling
- run: |
- # Install profiling tools
- npm install -g clinic autocannon
-
- # Profile the application
- timeout 60s autocannon -c 10 -d 30 http://localhost:3001/api/health || true
-
- echo "## Memory Profiling Results" >> $GITHUB_STEP_SUMMARY
- echo "Memory profiling completed - check artifacts for detailed reports" >> $GITHUB_STEP_SUMMARY
-
- - name: Upload profiling results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: profiling-results
- path: |
- .clinic/
- *.html
-
- - name: Stop services
- if: always()
- run: docker compose down -v
-
- # Performance Regression Detection
- performance-comparison:
- name: Performance Comparison
- runs-on: ubuntu-latest
- if: github.event_name == 'pull_request'
- steps:
- - uses: actions/checkout@v4
- with:
- fetch-depth: 2
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Compare performance with base branch
- run: |
- echo "## Performance Comparison" >> $GITHUB_STEP_SUMMARY
- echo "Comparing performance metrics with base branch..." >> $GITHUB_STEP_SUMMARY
-
- # This would typically compare metrics from current PR vs main branch
- # Implementation depends on where metrics are stored (e.g., database, files)
- echo "Performance comparison analysis would be implemented here" >> $GITHUB_STEP_SUMMARY
diff --git a/.github/workflows/archived/security.yml b/.github/workflows/archived/security.yml
deleted file mode 100644
index c5269ca..0000000
--- a/.github/workflows/archived/security.yml
+++ /dev/null
@@ -1,647 +0,0 @@
-name: Security Testing
-
-# PORT ALLOCATION STRATEGY (to prevent conflicts when jobs run in parallel):
-# âââ OWASP ZAP Job: Frontend: 3000, Backend: 3001, PostgreSQL: 5433, Redis: 6380
-# âââ Security Headers: Frontend: 3100, Backend: 3101, PostgreSQL: 5434, Redis: 6381
-# âââ Backend Security: PostgreSQL: 5432 (isolated service container)
-# âââ Other jobs: Use service containers or no ports
-
-on:
- workflow_dispatch:
- push:
- branches: [main, develop]
- pull_request:
- branches: [main, develop]
- schedule:
- # Run security scans daily at 1 AM UTC
- - cron: "0 1 * * *"
-
-env:
- NODE_VERSION: "18"
-
-jobs:
- # Static Application Security Testing (SAST)
- # NOTE: CodeQL requires GitHub Advanced Security - commented out for public/free repositories
- # codeql-analysis:
- # name: CodeQL Analysis
- # runs-on: ubuntu-latest
- # permissions:
- # actions: read
- # contents: read
- # security-events: write
- # strategy:
- # fail-fast: false
- # matrix:
- # language: ['javascript', 'typescript']
- #
- # steps:
- # - name: Checkout repository
- # uses: actions/checkout@v4
- #
- # - name: Initialize CodeQL
- # uses: github/codeql-action/init@v3
- # with:
- # languages: ${{ matrix.language }}
- # queries: security-extended,security-and-quality
- #
- # - name: Setup Node.js
- # uses: actions/setup-node@v4
- # with:
- # node-version: ${{ env.NODE_VERSION }}
- # cache: 'npm'
- #
- # - name: Install dependencies
- # run: |
- # cd frontend && npm install
- # cd ../backend && npm install
- #
- # - name: Build applications
- # run: |
- # cd frontend && npm run build
- # cd ../backend && npm run build
- #
- # - name: Perform CodeQL Analysis
- # uses: github/codeql-action/analyze@v3
- # with:
- # category: "/language:${{matrix.language}}"
-
- # Dependency vulnerability scanning
- dependency-scan:
- name: Dependency Security Scan
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Run npm audit (Frontend)
- run: |
- npm install
- npm install --workspace=frontend
- npm audit --workspace=frontend --audit-level=moderate --production || echo "Found vulnerabilities - check report"
- npm audit --workspace=frontend --json --production > npm-audit-frontend.json || true
-
- - name: Run npm audit (Backend)
- run: |
- npm install --workspace=backend
- npm audit --workspace=backend --audit-level=moderate --production || echo "Found vulnerabilities - check report"
- npm audit --workspace=backend --json --production > npm-audit-backend.json || true
-
- - name: Upload dependency scan results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: dependency-scan-results
- path: |
- frontend/npm-audit-frontend.json
- backend/npm-audit-backend.json
-
- - name: Check for critical vulnerabilities
- run: |
- echo "## Security Vulnerability Summary" >> $GITHUB_STEP_SUMMARY
-
- # Check frontend vulnerabilities
- if [ -f "frontend/npm-audit-frontend.json" ]; then
- FRONTEND_CRITICAL=$(jq '.vulnerabilities | to_entries | map(select(.value.severity == "critical")) | length' frontend/npm-audit-frontend.json)
- FRONTEND_HIGH=$(jq '.vulnerabilities | to_entries | map(select(.value.severity == "high")) | length' frontend/npm-audit-frontend.json)
- echo "**Frontend:** $FRONTEND_CRITICAL critical, $FRONTEND_HIGH high severity vulnerabilities" >> $GITHUB_STEP_SUMMARY
-
- if [ "$FRONTEND_CRITICAL" != "0" ]; then
- echo "â Critical vulnerabilities found in frontend dependencies!"
- exit 1
- fi
- fi
-
- # Check backend vulnerabilities
- if [ -f "backend/npm-audit-backend.json" ]; then
- BACKEND_CRITICAL=$(jq '.vulnerabilities | to_entries | map(select(.value.severity == "critical")) | length' backend/npm-audit-backend.json)
- BACKEND_HIGH=$(jq '.vulnerabilities | to_entries | map(select(.value.severity == "high")) | length' backend/npm-audit-backend.json)
- echo "**Backend:** $BACKEND_CRITICAL critical, $BACKEND_HIGH high severity vulnerabilities" >> $GITHUB_STEP_SUMMARY
-
- if [ "$BACKEND_CRITICAL" != "0" ]; then
- echo "â Critical vulnerabilities found in backend dependencies!"
- exit 1
- fi
- fi
-
- echo "â
No critical vulnerabilities found in dependencies" >> $GITHUB_STEP_SUMMARY
-
- # Container security scanning
- container-security:
- name: Container Security Scan
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - name: Build Docker images
- run: |
- # Build backend targeting dependencies stage (skip TypeScript compilation for security scan)
- docker build -t connectkit-backend:security-test --target dependencies -f docker/backend/Dockerfile .
- # Build frontend targeting dependencies stage (skip TypeScript compilation for security scan)
- docker build -t connectkit-frontend:security-test --target dependencies -f docker/frontend/Dockerfile .
-
- - name: Run Trivy vulnerability scanner (Backend)
- uses: aquasecurity/trivy-action@master
- with:
- image-ref: "connectkit-backend:security-test"
- format: "sarif"
- output: "trivy-backend-results.sarif"
-
- - name: Run Trivy vulnerability scanner (Frontend)
- uses: aquasecurity/trivy-action@master
- with:
- image-ref: "connectkit-frontend:security-test"
- format: "sarif"
- output: "trivy-frontend-results.sarif"
-
- # NOTE: SARIF upload requires GitHub Advanced Security - commented out for public/free repositories
- # - name: Upload Trivy scan results to GitHub Security tab
- # uses: github/codeql-action/upload-sarif@v3
- # if: always()
- # with:
- # sarif_file: '.'
-
- - name: Run Trivy vulnerability scanner (Table format)
- uses: aquasecurity/trivy-action@master
- with:
- image-ref: "connectkit-backend:security-test"
- format: "table"
- exit-code: "1"
- ignore-unfixed: true
- vuln-type: "os,library"
- severity: "CRITICAL,HIGH"
-
- # NOTE: Secret detection tools disabled - GitLeaks requires organization license
- # Re-enable when proper licensing is available
- # secret-scan:
- # name: Secret Detection
- # runs-on: ubuntu-latest
- # steps:
- # - uses: actions/checkout@v4
- # with:
- # fetch-depth: 0
- #
- # - name: Run GitLeaks
- # uses: gitleaks/gitleaks-action@v2
- # env:
- # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- # GITLEAKS_LICENSE: ${{ secrets.GITLEAKS_LICENSE}}
- #
- # - name: Run TruffleHog
- # uses: trufflesecurity/trufflehog@main
- # with:
- # path: ./
- # base: main
- # head: HEAD
- # extra_args: --debug --only-verified
-
- # Frontend security testing
- frontend-security:
- name: Frontend Security Tests
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Install dependencies
- run: |
- npm install
- npm install --workspace=frontend
-
- - name: Build frontend
- working-directory: ./frontend
- run: npm run build
-
- - name: Install security ESLint plugin
- working-directory: ./frontend
- run: npm install --save-dev eslint-plugin-security
-
- - name: Run ESLint security plugin
- working-directory: ./frontend
- run: |
- npm run lint -- --format=json --output-file=eslint-security-results.json || true
-
- - name: Analyze bundle for security issues
- working-directory: ./frontend
- run: |
- # Install and run webpack-bundle-analyzer for security analysis
- npx webpack-bundle-analyzer dist/assets/*.js --report --format json --out bundle-analysis.json || true
-
- - name: Check for sensitive data in build
- working-directory: ./frontend/dist
- run: |
- echo "## Frontend Security Scan Results" >> $GITHUB_STEP_SUMMARY
-
- # Check for potential secrets in build files
- if grep -r -i "password\|secret\|key\|token" . --include="*.js" --include="*.css" | grep -v "node_modules"; then
- echo "â ī¸ Potential sensitive data found in build files:" >> $GITHUB_STEP_SUMMARY
- grep -r -i "password\|secret\|key\|token" . --include="*.js" --include="*.css" | head -5 >> $GITHUB_STEP_SUMMARY
- else
- echo "â
No sensitive data found in build files" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Check file permissions
- find . -type f -perm /077 | head -10 || echo "â
File permissions look secure"
-
- - name: Upload frontend security results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: frontend-security-results
- path: |
- frontend/eslint-security-results.json
- frontend/bundle-analysis.json
-
- # Backend security testing
- backend-security:
- name: Backend Security Tests
- runs-on: ubuntu-latest
- services:
- postgres:
- image: postgres:15-alpine
- env:
- POSTGRES_USER: test_security
- POSTGRES_PASSWORD: test_security
- POSTGRES_DB: connectkit_security_test
- options: >-
- --health-cmd pg_isready
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- ports:
- - 5432:5432
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: ${{ env.NODE_VERSION }}
-
- - name: Install dependencies
- run: |
- npm install
- npm install --workspace=backend
-
- - name: Run ESLint security plugin
- working-directory: ./backend
- run: |
- npm run lint -- --format=json --output-file=eslint-security-results.json || true
-
- - name: Run custom security tests
- working-directory: ./backend
- env:
- NODE_ENV: test
- DB_HOST: localhost
- DB_PORT: 5432
- DB_USER: test_security
- DB_PASSWORD: test_security
- DB_NAME: connectkit_security_test
- JWT_SECRET: test-security-secret-key
- JWT_REFRESH_SECRET: test-security-refresh-key
- ENCRYPTION_KEY: test-security-encryption-key-32ch
- run: |
- # Run database migrations
- npm run db:migrate || echo "Migration failed, continuing..."
-
- # Run security-focused tests if they exist
- if [ -f "src/tests/security" ]; then
- npm run test:security || echo "No security tests found"
- fi
-
- # Test for SQL injection vulnerabilities
- npm run test -- --testPathPattern=security || echo "No security test pattern found"
-
- - name: Check for hardcoded secrets
- working-directory: ./backend
- run: |
- echo "## Backend Security Scan Results" >> $GITHUB_STEP_SUMMARY
-
- # Check for hardcoded secrets (excluding test files)
- if grep -r -E "(password|secret|key|token)\s*[:=]\s*['\"][^'\"]*['\"]" src/ --include="*.ts" --include="*.js" --exclude-dir="tests" --exclude-dir="__tests__" | grep -v -E "(test|mock|example|placeholder)"; then
- echo "â ī¸ Potential hardcoded secrets found:" >> $GITHUB_STEP_SUMMARY
- echo "Please review and move to environment variables" >> $GITHUB_STEP_SUMMARY
- else
- echo "â
No hardcoded secrets found" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Check for console.log statements in production code
- if find src/ -name "*.ts" -not -path "*/tests/*" -not -path "*/__tests__/*" -exec grep -l "console\." {} \;; then
- echo "â ī¸ Console statements found - should be removed for production" >> $GITHUB_STEP_SUMMARY
- else
- echo "â
No console statements found in production code" >> $GITHUB_STEP_SUMMARY
- fi
-
- - name: Upload backend security results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: backend-security-results
- path: |
- backend/eslint-security-results.json
-
- # OWASP ZAP security testing
- owasp-zap:
- name: OWASP ZAP Security Test
- runs-on: ubuntu-latest
- if: github.event_name == 'push' && github.ref == 'refs/heads/main'
- steps:
- - uses: actions/checkout@v4
-
- - name: Start application services
- run: |
- # Create environment file with unique ports for OWASP ZAP job
- cat > .env << EOF
- NODE_ENV=test
- PORT=3001
- FRONTEND_PORT=3000
- DB_HOST=localhost
- DB_PORT=5433
- DB_USER=postgres
- DB_PASSWORD=postgres
- DB_NAME=connectkit_test
- REDIS_URL=redis://localhost:6380
- JWT_SECRET=test-jwt-secret-for-security-testing-very-long-key
- JWT_REFRESH_SECRET=test-refresh-secret-for-security-testing-very-long-key
- ENCRYPTION_KEY=test-encryption-key-32-characters
- CORS_ORIGIN=http://localhost:3000
- EOF
-
- # Set unique port overrides to prevent conflicts with other security jobs
- export DB_PORT=5433
- export REDIS_PORT=6380
- export BACKEND_PORT=3001
- export FRONTEND_PORT=3000
-
- # Check port availability before starting
- if lsof -i:3000 -i:3001 -i:5433 -i:6380 >/dev/null 2>&1; then
- echo "Warning: Some ports already in use, attempting cleanup..."
- docker compose down -v || true
- sleep 5
- fi
-
- docker compose up -d
- sleep 60
-
- - name: Wait for services
- run: |
- timeout 60 bash -c 'until curl -f http://localhost:3001/api/health; do sleep 2; done'
- timeout 60 bash -c 'until curl -f http://localhost:3000; do sleep 2; done'
-
- - name: Run OWASP ZAP Baseline Scan
- uses: zaproxy/action-baseline@v0.10.0
- with:
- target: "http://localhost:3000"
- rules_file_name: ".zap/rules.tsv"
- cmd_options: "-a -d -T 15 -m 5"
-
- - name: Run OWASP ZAP API Scan
- uses: zaproxy/action-api-scan@v0.6.0
- with:
- target: "http://localhost:3001/api"
- format: openapi
- cmd_options: "-a -d -T 10 -l INFO"
-
- - name: Upload ZAP results
- if: always()
- uses: actions/upload-artifact@v4
- with:
- name: owasp-zap-results
- path: |
- report_html.html
- report_json.json
-
- - name: Stop services
- if: always()
- run: |
- # Comprehensive cleanup for OWASP ZAP job (ports: 3000, 3001, 5433, 6380)
- docker compose down -v || true
-
- # Ensure ports are freed
- docker container prune -f || true
- sleep 5
-
- # Kill any processes still using our ports
- for port in 3000 3001 5433 6380; do
- if lsof -ti:$port >/dev/null 2>&1; then
- echo "Cleaning up processes on port $port"
- lsof -ti:$port | xargs kill -9 || true
- fi
- done
-
- # Security headers and configuration testing
- security-headers:
- name: Security Headers Test
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - name: Start application services
- run: |
- # Create environment file with unique ports for Security Headers job
- cat > .env << EOF
- NODE_ENV=test
- PORT=3101
- FRONTEND_PORT=3100
- DB_HOST=localhost
- DB_PORT=5434
- DB_USER=postgres
- DB_PASSWORD=postgres
- DB_NAME=connectkit_test
- REDIS_URL=redis://localhost:6381
- JWT_SECRET=test-jwt-secret-for-security-testing-very-long-key
- JWT_REFRESH_SECRET=test-refresh-secret-for-security-testing-very-long-key
- ENCRYPTION_KEY=test-encryption-key-32-characters
- CORS_ORIGIN=http://localhost:3100
- EOF
-
- # Set unique port overrides to prevent conflicts with other security jobs
- export DB_PORT=5434
- export REDIS_PORT=6381
- export BACKEND_PORT=3101
- export FRONTEND_PORT=3100
-
- # Check port availability before starting
- if lsof -i:3100 -i:3101 -i:5434 -i:6381 >/dev/null 2>&1; then
- echo "Warning: Some ports already in use, attempting cleanup..."
- docker compose down -v || true
- sleep 5
- fi
-
- docker compose up -d
- sleep 45
-
- - name: Wait for services
- run: |
- timeout 60 bash -c 'until curl -f http://localhost:3101/api/health; do sleep 2; done'
- timeout 60 bash -c 'until curl -f http://localhost:3100; do sleep 2; done'
-
- - name: Test security headers
- run: |
- echo "## Security Headers Analysis" >> $GITHUB_STEP_SUMMARY
-
- # Test backend security headers
- echo "### Backend API Security Headers" >> $GITHUB_STEP_SUMMARY
- BACKEND_HEADERS=$(curl -I http://localhost:3101/api/health 2>/dev/null)
-
- # Check for required security headers
- if echo "$BACKEND_HEADERS" | grep -i "x-content-type-options: nosniff"; then
- echo "â
X-Content-Type-Options: nosniff" >> $GITHUB_STEP_SUMMARY
- else
- echo "â Missing X-Content-Type-Options header" >> $GITHUB_STEP_SUMMARY
- fi
-
- if echo "$BACKEND_HEADERS" | grep -i "x-frame-options"; then
- echo "â
X-Frame-Options present" >> $GITHUB_STEP_SUMMARY
- else
- echo "â Missing X-Frame-Options header" >> $GITHUB_STEP_SUMMARY
- fi
-
- if echo "$BACKEND_HEADERS" | grep -i "strict-transport-security"; then
- echo "â
Strict-Transport-Security present" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ Missing HSTS header (expected in production)" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Test frontend security headers
- echo "### Frontend Security Headers" >> $GITHUB_STEP_SUMMARY
- FRONTEND_HEADERS=$(curl -I http://localhost:3100 2>/dev/null)
-
- if echo "$FRONTEND_HEADERS" | grep -i "content-security-policy"; then
- echo "â
Content-Security-Policy present" >> $GITHUB_STEP_SUMMARY
- else
- echo "â Missing Content-Security-Policy header" >> $GITHUB_STEP_SUMMARY
- fi
-
- if echo "$FRONTEND_HEADERS" | grep -i "permissions-policy"; then
- echo "â
Permissions-Policy present" >> $GITHUB_STEP_SUMMARY
- else
- echo "â Missing Permissions-Policy header" >> $GITHUB_STEP_SUMMARY
- fi
-
- if echo "$FRONTEND_HEADERS" | grep -i "cross-origin-embedder-policy"; then
- echo "â
Cross-Origin-Embedder-Policy present" >> $GITHUB_STEP_SUMMARY
- else
- echo "â Missing Cross-Origin-Embedder-Policy header" >> $GITHUB_STEP_SUMMARY
- fi
-
- # Check for information disclosure
- echo "### Information Disclosure Check" >> $GITHUB_STEP_SUMMARY
- if echo "$BACKEND_HEADERS$FRONTEND_HEADERS" | grep -i "server:"; then
- echo "â ī¸ Server header present - consider removing" >> $GITHUB_STEP_SUMMARY
- else
- echo "â
No server information disclosed" >> $GITHUB_STEP_SUMMARY
- fi
-
- - name: Test CORS configuration
- run: |
- echo "### CORS Configuration Test" >> $GITHUB_STEP_SUMMARY
-
- # Test CORS preflight
- CORS_RESPONSE=$(curl -H "Origin: http://malicious.example.com" \
- -H "Access-Control-Request-Method: POST" \
- -H "Access-Control-Request-Headers: X-Requested-With" \
- -X OPTIONS http://localhost:3101/api/contacts 2>/dev/null)
-
- if echo "$CORS_RESPONSE" | grep -i "access-control-allow-origin: \*"; then
- echo "â CORS allows all origins - security risk!" >> $GITHUB_STEP_SUMMARY
- else
- echo "â
CORS properly configured" >> $GITHUB_STEP_SUMMARY
- fi
-
- - name: Test rate limiting
- run: |
- echo "### Rate Limiting Test" >> $GITHUB_STEP_SUMMARY
-
- # Test rate limiting with rapid requests
- for i in {1..15}; do
- curl -s -o /dev/null -w "%{http_code}\n" http://localhost:3101/api/health
- done | tail -5 > rate_limit_test.txt
-
- if grep -q "429" rate_limit_test.txt; then
- echo "â
Rate limiting is active" >> $GITHUB_STEP_SUMMARY
- else
- echo "â ī¸ Rate limiting not detected" >> $GITHUB_STEP_SUMMARY
- fi
-
- - name: Stop services
- if: always()
- run: |
- # Comprehensive cleanup for Security Headers job (ports: 3100, 3101, 5434, 6381)
- docker compose down -v || true
-
- # Ensure ports are freed
- docker container prune -f || true
- sleep 5
-
- # Kill any processes still using our ports
- for port in 3100 3101 5434 6381; do
- if lsof -ti:$port >/dev/null 2>&1; then
- echo "Cleaning up processes on port $port"
- lsof -ti:$port | xargs kill -9 || true
- fi
- done
-
- # Security test report consolidation
- security-report:
- name: Security Report
- runs-on: ubuntu-latest
- needs:
- [dependency-scan, container-security, frontend-security, backend-security]
- if: always()
- steps:
- - name: Download all security artifacts
- uses: actions/download-artifact@v4
- with:
- path: security-results/
-
- - name: Generate security summary report
- run: |
- echo "# đ Security Testing Report" > security-summary.md
- echo "" >> security-summary.md
- echo "## Test Results Summary" >> security-summary.md
- echo "" >> security-summary.md
-
- # Check job results
- # CodeQL Analysis commented out - requires GitHub Advanced Security
- echo "âšī¸ **CodeQL Analysis**: Skipped (requires GitHub Advanced Security)" >> security-summary.md
-
- if [ "${{ needs.dependency-scan.result }}" == "success" ]; then
- echo "â
**Dependency Scan**: No critical vulnerabilities" >> security-summary.md
- else
- echo "â **Dependency Scan**: Critical vulnerabilities found" >> security-summary.md
- fi
-
- if [ "${{ needs.container-security.result }}" == "success" ]; then
- echo "â
**Container Security**: Passed" >> security-summary.md
- else
- echo "â **Container Security**: Vulnerabilities found" >> security-summary.md
- fi
-
- echo "â ī¸ **Secret Detection**: Disabled (requires organization license)" >> security-summary.md
-
- echo "" >> security-summary.md
- echo "## Recommendations" >> security-summary.md
- echo "" >> security-summary.md
- echo "- Regular security updates for all dependencies" >> security-summary.md
- echo "- Implement proper secret management" >> security-summary.md
- echo "- Regular security header audits" >> security-summary.md
- echo "- Continuous monitoring for vulnerabilities" >> security-summary.md
-
- cat security-summary.md >> $GITHUB_STEP_SUMMARY
-
- - name: Upload consolidated security report
- uses: actions/upload-artifact@v4
- with:
- name: security-report
- path: |
- security-summary.md
- security-results/
diff --git a/.github/workflows/security-containers.yml b/.github/workflows/security-containers.yml
index 274f5e6..4e37502 100644
--- a/.github/workflows/security-containers.yml
+++ b/.github/workflows/security-containers.yml
@@ -25,26 +25,26 @@ jobs:
container-security:
name: Container Security Scan
runs-on: ubuntu-latest
-
+
# Skip any PR created by dependabot to avoid permission issues
if: (github.actor != 'dependabot[bot]')
-
+
strategy:
fail-fast: false
matrix:
service: [backend, frontend]
-
+
steps:
- name: Checkout repository
uses: actions/checkout@v4
-
+
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
+
- name: Build Docker image (${{ matrix.service }})
run: |
echo "Building ${{ matrix.service }} Docker image for security scanning..."
-
+
# Build targeting dependencies stage to speed up security scan
docker build \
-t connectkit-${{ matrix.service }}:security-test \
@@ -62,7 +62,7 @@ jobs:
docker build -t connectkit-${{ matrix.service }}:security-test -f Dockerfile.minimal .
}
continue-on-error: true
-
+
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
@@ -72,7 +72,7 @@ jobs:
severity: "CRITICAL,HIGH,MEDIUM"
timeout: "10m"
continue-on-error: true
-
+
- name: Run Trivy scanner (Table format)
uses: aquasecurity/trivy-action@master
with:
@@ -84,25 +84,71 @@ jobs:
severity: "CRITICAL,HIGH"
timeout: "10m"
continue-on-error: true
-
- - name: Run Grype vulnerability scanner
+
+ - name: Install Syft and Grype
run: |
+ echo "Installing Syft for SBOM generation..."
+ curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /tmp
+
echo "Installing Grype scanner..."
curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /tmp
-
+
+ /tmp/syft --version
+ /tmp/grype --version
+ continue-on-error: true
+
+ - name: Generate container SBOM
+ run: |
+ echo "Generating SBOM for ${{ matrix.service }} container..."
+
+ # Generate SBOM in multiple formats
+ /tmp/syft connectkit-${{ matrix.service }}:security-test \
+ -o spdx-json=sbom-${{ matrix.service }}-spdx.json \
+ -o cyclonedx-json=sbom-${{ matrix.service }}-cyclonedx.json \
+ -o json=sbom-${{ matrix.service }}-syft.json
+
+ echo "SBOM generated successfully"
+
+ # Display SBOM summary
+ echo "### SBOM Summary for ${{ matrix.service }}:" >> $GITHUB_STEP_SUMMARY
+ if [ -f "sbom-${{ matrix.service }}-syft.json" ]; then
+ COMPONENTS=$(jq '.artifacts | length' sbom-${{ matrix.service }}-syft.json || echo "0")
+ echo "- Total Components: $COMPONENTS" >> $GITHUB_STEP_SUMMARY
+
+ # Count by type
+ echo "- Component Types:" >> $GITHUB_STEP_SUMMARY
+ jq -r '.artifacts[].type' sbom-${{ matrix.service }}-syft.json | sort | uniq -c | while read count type; do
+ echo " - $type: $count" >> $GITHUB_STEP_SUMMARY
+ done
+ fi
+ continue-on-error: true
+
+ - name: Run Grype vulnerability scanner
+ run: |
echo "Scanning ${{ matrix.service }} with Grype..."
- /tmp/grype connectkit-${{ matrix.service }}:security-test \
- --output json \
- --file grype-${{ matrix.service }}-results.json \
- --fail-on critical || echo "Grype scan completed with findings"
+
+ # Scan using SBOM if it exists, otherwise scan image
+ if [ -f "sbom-${{ matrix.service }}-syft.json" ]; then
+ echo "Scanning via SBOM..."
+ /tmp/grype sbom:sbom-${{ matrix.service }}-syft.json \
+ --output json \
+ --file grype-${{ matrix.service }}-results.json \
+ --fail-on critical || echo "Grype scan completed with findings"
+ else
+ echo "Scanning container directly..."
+ /tmp/grype connectkit-${{ matrix.service }}:security-test \
+ --output json \
+ --file grype-${{ matrix.service }}-results.json \
+ --fail-on critical || echo "Grype scan completed with findings"
+ fi
continue-on-error: true
-
+
- name: Analyze Docker configuration
run: |
echo "## Docker Security Analysis - ${{ matrix.service }}" >> $GITHUB_STEP_SUMMARY
-
+
DOCKERFILE="docker/${{ matrix.service }}/Dockerfile"
-
+
if [ -f "$DOCKERFILE" ]; then
echo "### Dockerfile Security Checks:" >> $GITHUB_STEP_SUMMARY
@@ -142,11 +188,11 @@ jobs:
fi
fi
continue-on-error: true
-
+
- name: Check base image security
run: |
echo "### Base Image Security Check:" >> $GITHUB_STEP_SUMMARY
-
+
# Extract base image from Dockerfile
DOCKERFILE="docker/${{ matrix.service }}/Dockerfile"
if [ -f "$DOCKERFILE" ]; then
@@ -161,11 +207,11 @@ jobs:
fi
fi
continue-on-error: true
-
+
- name: Generate vulnerability summary
run: |
echo "### Vulnerability Summary:" >> $GITHUB_STEP_SUMMARY
-
+
if [ -f "grype-${{ matrix.service }}-results.json" ]; then
CRITICAL=$(jq '[.matches[] | select(.vulnerability.severity == "Critical")] | length' grype-${{ matrix.service }}-results.json || echo "0")
HIGH=$(jq '[.matches[] | select(.vulnerability.severity == "High")] | length' grype-${{ matrix.service }}-results.json || echo "0")
@@ -182,8 +228,8 @@ jobs:
fi
fi
continue-on-error: true
-
- - name: Upload scan results
+
+ - name: Upload scan results and SBOMs
if: always()
uses: actions/upload-artifact@v4
with:
@@ -191,4 +237,5 @@ jobs:
path: |
trivy-${{ matrix.service }}-results.sarif
grype-${{ matrix.service }}-results.json
- retention-days: 30
\ No newline at end of file
+ sbom-${{ matrix.service }}-*.json
+ retention-days: 30
diff --git a/.github/workflows/security-dependencies.yml b/.github/workflows/security-dependencies.yml
index 1be2f7c..f73a33f 100644
--- a/.github/workflows/security-dependencies.yml
+++ b/.github/workflows/security-dependencies.yml
@@ -18,34 +18,34 @@ jobs:
dependency-scan:
name: Dependency Security Scan
runs-on: ubuntu-latest
-
+
# Skip any PR created by dependabot to avoid permission issues
if: (github.actor != 'dependabot[bot]')
-
+
steps:
- name: Checkout repository
uses: actions/checkout@v4
-
+
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 18
- cache: 'npm'
-
+ cache: "npm"
+
- name: Install workspace dependencies
run: |
echo "Installing workspace dependencies..."
npm install
continue-on-error: true
-
+
- name: Run npm audit (Frontend)
run: |
echo "## Frontend Dependency Security Scan" >> $GITHUB_STEP_SUMMARY
echo "Running npm audit for frontend dependencies..."
-
+
npm audit --workspace=frontend --audit-level=moderate --production || echo "Found vulnerabilities - check report"
npm audit --workspace=frontend --json --production > frontend-audit.json || true
-
+
# Extract vulnerability counts
if [ -f "frontend-audit.json" ]; then
CRITICAL=$(jq '.metadata.vulnerabilities.critical // 0' frontend-audit.json)
@@ -67,15 +67,15 @@ jobs:
fi
fi
continue-on-error: true
-
+
- name: Run npm audit (Backend)
run: |
echo "## Backend Dependency Security Scan" >> $GITHUB_STEP_SUMMARY
echo "Running npm audit for backend dependencies..."
-
+
npm audit --workspace=backend --audit-level=moderate --production || echo "Found vulnerabilities - check report"
npm audit --workspace=backend --json --production > backend-audit.json || true
-
+
# Extract vulnerability counts
if [ -f "backend-audit.json" ]; then
CRITICAL=$(jq '.metadata.vulnerabilities.critical // 0' backend-audit.json)
@@ -97,22 +97,57 @@ jobs:
fi
fi
continue-on-error: true
-
+
+ - name: Generate SBOM for dependencies
+ run: |
+ echo "## SBOM Generation" >> $GITHUB_STEP_SUMMARY
+ echo "Generating Software Bill of Materials..." >> $GITHUB_STEP_SUMMARY
+
+ # Install SBOM generation tools
+ echo "Installing SBOM tools..."
+ npm install -g @cyclonedx/cyclonedx-npm
+
+ # Generate CycloneDX SBOM for frontend
+ cd frontend
+ npx @cyclonedx/cyclonedx-npm --output-format json --output-file sbom-deps-frontend.json || true
+ npx @cyclonedx/cyclonedx-npm --output-format xml --output-file sbom-deps-frontend.xml || true
+
+ # Generate CycloneDX SBOM for backend
+ cd ../backend
+ npx @cyclonedx/cyclonedx-npm --output-format json --output-file sbom-deps-backend.json || true
+ npx @cyclonedx/cyclonedx-npm --output-format xml --output-file sbom-deps-backend.xml || true
+
+ cd ..
+
+ # Count components
+ if [ -f "frontend/sbom-deps-frontend.json" ]; then
+ FRONTEND_DEPS=$(jq '.components | length' frontend/sbom-deps-frontend.json || echo "0")
+ echo "### Frontend SBOM: $FRONTEND_DEPS components" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ if [ -f "backend/sbom-deps-backend.json" ]; then
+ BACKEND_DEPS=$(jq '.components | length' backend/sbom-deps-backend.json || echo "0")
+ echo "### Backend SBOM: $BACKEND_DEPS components" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ echo "" >> $GITHUB_STEP_SUMMARY
+ continue-on-error: true
+
- name: Check for outdated packages
run: |
echo "## Outdated Package Check" >> $GITHUB_STEP_SUMMARY
echo "Checking for outdated packages..."
-
+
npm outdated --workspace=frontend > frontend-outdated.txt || true
npm outdated --workspace=backend > backend-outdated.txt || true
-
+
if [ -s "frontend-outdated.txt" ]; then
echo "### Frontend Outdated Packages:" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
head -20 frontend-outdated.txt >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
fi
-
+
if [ -s "backend-outdated.txt" ]; then
echo "### Backend Outdated Packages:" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
@@ -120,8 +155,8 @@ jobs:
echo '```' >> $GITHUB_STEP_SUMMARY
fi
continue-on-error: true
-
- - name: Upload dependency scan results
+
+ - name: Upload dependency scan results and SBOMs
if: always()
uses: actions/upload-artifact@v4
with:
@@ -131,28 +166,32 @@ jobs:
backend-audit.json
frontend-outdated.txt
backend-outdated.txt
+ frontend/sbom-deps-*.json
+ frontend/sbom-deps-*.xml
+ backend/sbom-deps-*.json
+ backend/sbom-deps-*.xml
retention-days: 30
-
+
- name: Enforce security policy
run: |
echo "Checking security policy compliance..."
-
+
FRONTEND_CRITICAL=0
BACKEND_CRITICAL=0
-
+
if [ -f "frontend-audit.json" ]; then
FRONTEND_CRITICAL=$(jq '.metadata.vulnerabilities.critical // 0' frontend-audit.json)
fi
-
+
if [ -f "backend-audit.json" ]; then
BACKEND_CRITICAL=$(jq '.metadata.vulnerabilities.critical // 0' backend-audit.json)
fi
-
+
if [ "$FRONTEND_CRITICAL" != "0" ] || [ "$BACKEND_CRITICAL" != "0" ]; then
echo "â Build failed due to critical security vulnerabilities!"
echo "Please run 'npm audit fix' or update vulnerable dependencies."
exit 1
fi
-
+
echo "â
Security policy check passed - no critical vulnerabilities"
- continue-on-error: true
\ No newline at end of file
+ continue-on-error: true
diff --git a/.github/workflows/security-report.yml b/.github/workflows/security-report.yml
index 0a49d77..c31d349 100644
--- a/.github/workflows/security-report.yml
+++ b/.github/workflows/security-report.yml
@@ -2,7 +2,7 @@ name: Security - Consolidated Report
on:
workflow_run:
- workflows:
+ workflows:
- "Security - Dependency Scanning"
- "Security - Container Scanning"
- "Security - Frontend Analysis"
@@ -24,20 +24,20 @@ jobs:
security-report:
name: Security Report Consolidation
runs-on: ubuntu-latest
-
+
# Skip any PR created by dependabot to avoid permission issues
if: (github.actor != 'dependabot[bot]')
-
+
steps:
- name: Checkout repository
uses: actions/checkout@v4
-
+
- name: Setup report environment
run: |
echo "Setting up security report environment..."
mkdir -p security-reports
echo "Report directory created"
-
+
- name: Download recent artifacts
uses: dawidd6/action-download-artifact@v3
with:
@@ -48,17 +48,17 @@ jobs:
if_no_artifact_found: warn
search_artifacts: true
continue-on-error: true
-
+
- name: Analyze dependency scan results
run: |
echo "# đ ConnectKit Security Report" > security-summary.md
echo "" >> security-summary.md
echo "**Generated**: $(date -u '+%Y-%m-%d %H:%M:%S UTC')" >> security-summary.md
echo "" >> security-summary.md
-
+
echo "## đĻ Dependency Security" >> security-summary.md
echo "" >> security-summary.md
-
+
# Check for dependency scan results
if find security-reports -name "*audit*.json" -type f | head -1; then
TOTAL_VULNS=0
@@ -99,12 +99,12 @@ jobs:
fi
echo "" >> security-summary.md
continue-on-error: true
-
+
- name: Analyze container security results
run: |
echo "## đŗ Container Security" >> security-summary.md
echo "" >> security-summary.md
-
+
# Check for Trivy/Grype results
if find security-reports -name "*trivy*.sarif" -o -name "*grype*.json" -type f | head -1; then
echo "### Container Vulnerability Summary" >> security-summary.md
@@ -139,12 +139,12 @@ jobs:
fi
echo "" >> security-summary.md
continue-on-error: true
-
+
- name: Analyze application security results
run: |
echo "## đĄī¸ Application Security" >> security-summary.md
echo "" >> security-summary.md
-
+
# Frontend security
echo "### Frontend Security" >> security-summary.md
if find security-reports -path "*frontend*" -name "*eslint*.json" -type f | head -1; then
@@ -165,7 +165,7 @@ jobs:
echo "âšī¸ No frontend security scan results" >> security-summary.md
fi
echo "" >> security-summary.md
-
+
# Backend security
echo "### Backend Security" >> security-summary.md
if find security-reports -path "*backend*" -name "*eslint*.json" -type f | head -1; then
@@ -187,37 +187,85 @@ jobs:
fi
echo "" >> security-summary.md
continue-on-error: true
-
+
+ - name: Analyze SBOM results
+ run: |
+ echo "## đĻ Software Bill of Materials (SBOM)" >> security-summary.md
+ echo "" >> security-summary.md
+
+ # Check for SBOM files
+ SBOM_COUNT=$(find security-reports -name "sbom-*.json" 2>/dev/null | wc -l || echo "0")
+
+ if [ "$SBOM_COUNT" -gt 0 ]; then
+ echo "### SBOM Generation Status: â
Active" >> security-summary.md
+ echo "- Total SBOMs generated: $SBOM_COUNT" >> security-summary.md
+ echo "" >> security-summary.md
+
+ # Count total components across all SBOMs
+ TOTAL_COMPONENTS=0
+ for sbom in security-reports/**/sbom-*-syft.json security-reports/**/sbom-*-cdxgen.json; do
+ if [ -f "$sbom" ]; then
+ if echo "$sbom" | grep -q "syft"; then
+ COMPONENTS=$(jq '.artifacts | length' "$sbom" 2>/dev/null || echo "0")
+ elif echo "$sbom" | grep -q "cdxgen\|cyclonedx"; then
+ COMPONENTS=$(jq '.components | length' "$sbom" 2>/dev/null || echo "0")
+ else
+ COMPONENTS=0
+ fi
+ TOTAL_COMPONENTS=$((TOTAL_COMPONENTS + COMPONENTS))
+ fi
+ done
+
+ echo "### Supply Chain Summary:" >> security-summary.md
+ echo "- Total components tracked: $TOTAL_COMPONENTS" >> security-summary.md
+ echo "- Formats available: SPDX, CycloneDX, Syft native" >> security-summary.md
+ echo "" >> security-summary.md
+
+ # Check for license information
+ LICENSE_FILES=$(find security-reports -name "licenses-*.json" 2>/dev/null | wc -l || echo "0")
+ if [ "$LICENSE_FILES" -gt 0 ]; then
+ echo "### License Compliance: â
Tracked" >> security-summary.md
+ else
+ echo "### License Compliance: â ī¸ No license data available" >> security-summary.md
+ fi
+ else
+ echo "### SBOM Generation Status: â ī¸ No SBOMs found" >> security-summary.md
+ echo "Consider running the SBOM generation workflow" >> security-summary.md
+ fi
+ echo "" >> security-summary.md
+ continue-on-error: true
+
- name: Check existing SAST results
run: |
echo "## đ Static Application Security Testing (SAST)" >> security-summary.md
echo "" >> security-summary.md
-
+
# Check for CodeQL
echo "### SAST Tools Status:" >> security-summary.md
echo "- **CodeQL**: â
Configured (workflow: sast-codeql.yml)" >> security-summary.md
echo "- **Semgrep**: â
Configured (workflow: sast-semgrep.yml)" >> security-summary.md
echo "- **Node.js Security**: â
Configured (workflow: sast-nodejs.yml)" >> security-summary.md
echo "- **TruffleHog Secrets**: â
Configured (workflow: sast-trufflehog.yml)" >> security-summary.md
+ echo "- **SBOM Generation**: â
Configured (workflow: security-sbom.yml)" >> security-summary.md
echo "" >> security-summary.md
continue-on-error: true
-
+
- name: Generate security scorecard
run: |
echo "## đ Security Scorecard" >> security-summary.md
echo "" >> security-summary.md
-
+
SCORE=100
CRITICAL_ISSUES=0
HIGH_ISSUES=0
MEDIUM_ISSUES=0
-
+
# Count all issues from various sources
# This is a simplified scoring system
-
+
echo "### Overall Security Score: $SCORE/100" >> security-summary.md
echo "" >> security-summary.md
-
+
echo "| Category | Status | Score Impact |" >> security-summary.md
echo "|----------|--------|--------------|" >> security-summary.md
echo "| Dependency Security | â
Scanning Active | 0 |" >> security-summary.md
@@ -226,38 +274,40 @@ jobs:
echo "| Secret Detection | â
TruffleHog Active | 0 |" >> security-summary.md
echo "| Security Headers | â ī¸ Needs Review | -10 |" >> security-summary.md
echo "| OWASP Testing | â
ZAP Configured | 0 |" >> security-summary.md
+ echo "| SBOM Generation | â
Multi-format | 0 |" >> security-summary.md
+ echo "| Supply Chain | â
Components Tracked | 0 |" >> security-summary.md
echo "" >> security-summary.md
continue-on-error: true
-
+
- name: Generate recommendations
run: |
echo "## đ¯ Security Recommendations" >> security-summary.md
echo "" >> security-summary.md
-
+
echo "### Immediate Actions (Priority 1)" >> security-summary.md
echo "1. **Update Critical Dependencies**: Run \`npm audit fix\` for automatic fixes" >> security-summary.md
echo "2. **Security Headers**: Implement CSP, HSTS, and X-Frame-Options headers" >> security-summary.md
echo "3. **Secrets Management**: Rotate any detected secrets immediately" >> security-summary.md
echo "" >> security-summary.md
-
+
echo "### Short-term Improvements (Priority 2)" >> security-summary.md
echo "1. **Container Hardening**: Update base images to latest secure versions" >> security-summary.md
echo "2. **Rate Limiting**: Implement rate limiting on all API endpoints" >> security-summary.md
echo "3. **Input Validation**: Strengthen input validation and sanitization" >> security-summary.md
echo "" >> security-summary.md
-
+
echo "### Long-term Enhancements (Priority 3)" >> security-summary.md
echo "1. **Security Testing**: Add security-focused unit and integration tests" >> security-summary.md
echo "2. **Threat Modeling**: Conduct threat modeling sessions" >> security-summary.md
echo "3. **Security Training**: Regular security awareness for development team" >> security-summary.md
echo "" >> security-summary.md
continue-on-error: true
-
+
- name: Generate compliance checklist
run: |
echo "## â
Compliance & Best Practices Checklist" >> security-summary.md
echo "" >> security-summary.md
-
+
echo "### OWASP Top 10 Coverage" >> security-summary.md
echo "- [x] A01:2021 â Broken Access Control (JWT auth implemented)" >> security-summary.md
echo "- [x] A02:2021 â Cryptographic Failures (Encryption configured)" >> security-summary.md
@@ -270,7 +320,7 @@ jobs:
echo "- [x] A09:2021 â Logging Failures (Logging configured)" >> security-summary.md
echo "- [ ] A10:2021 â SSRF (Needs validation)" >> security-summary.md
echo "" >> security-summary.md
-
+
echo "### Security Controls" >> security-summary.md
echo "- [x] Automated security scanning in CI/CD" >> security-summary.md
echo "- [x] Dependency vulnerability scanning" >> security-summary.md
@@ -282,12 +332,12 @@ jobs:
echo "- [ ] Web Application Firewall (WAF)" >> security-summary.md
echo "" >> security-summary.md
continue-on-error: true
-
+
- name: Create summary for GitHub
run: |
# Copy summary to GitHub step summary
cat security-summary.md >> $GITHUB_STEP_SUMMARY
-
+
# Create a brief summary for PR comments
echo "## đ Security Report Summary" > security-brief.md
echo "" >> security-brief.md
@@ -302,7 +352,7 @@ jobs:
echo "" >> security-brief.md
echo "Full report available in workflow artifacts." >> security-brief.md
continue-on-error: true
-
+
- name: Upload security report
if: always()
uses: actions/upload-artifact@v4
@@ -313,16 +363,16 @@ jobs:
security-brief.md
security-reports/
retention-days: 90
-
+
- name: Create security issues if critical vulnerabilities found
run: |
# This would create GitHub issues for critical findings
# Placeholder for issue creation logic
echo "Security report generation completed"
-
+
# Check if we should create issues
if grep -q "â" security-summary.md; then
echo "Critical security issues detected - manual review required"
# In a real implementation, this would create GitHub issues
fi
- continue-on-error: true
\ No newline at end of file
+ continue-on-error: true
diff --git a/.github/workflows/security-sbom.yml b/.github/workflows/security-sbom.yml
new file mode 100644
index 0000000..38443b4
--- /dev/null
+++ b/.github/workflows/security-sbom.yml
@@ -0,0 +1,482 @@
+name: Security - SBOM Generation
+
+on:
+ pull_request:
+ branches: [main, develop]
+ push:
+ branches: [main]
+ release:
+ types: [published]
+ schedule:
+ - cron: "0 2 * * 1" # Weekly on Monday at 2 AM UTC
+ workflow_dispatch:
+
+permissions:
+ contents: write
+ security-events: write
+ actions: read
+ id-token: write # For SBOM attestation
+
+jobs:
+ sbom-source:
+ name: Generate Source Code SBOM
+ runs-on: ubuntu-latest
+
+ # Skip any PR created by dependabot to avoid permission issues
+ if: (github.actor != 'dependabot[bot]')
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: 18
+ cache: "npm"
+
+ - name: Install Syft
+ run: |
+ echo "Installing Syft for SBOM generation..."
+ curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin
+ syft --version
+
+ - name: Check Node version for cdxgen
+ run: |
+ echo "Checking Node.js version for cdxgen compatibility..."
+ NODE_VERSION=$(node --version | cut -d. -f1 | sed 's/v//')
+ echo "Node.js major version: $NODE_VERSION"
+ if [ "$NODE_VERSION" -lt 20 ]; then
+ echo "â ī¸ cdxgen requires Node.js 20+, skipping cdxgen installation"
+ echo "SKIP_CDXGEN=true" >> $GITHUB_ENV
+ else
+ echo "â
Node.js version compatible with cdxgen"
+ echo "SKIP_CDXGEN=false" >> $GITHUB_ENV
+ fi
+
+ - name: Generate Frontend SBOM with Syft
+ run: |
+ echo "Generating Frontend SBOM with Syft..."
+ cd frontend
+
+ # Generate SBOM in multiple formats
+ syft . -o spdx-json=sbom-frontend-spdx.json
+ syft . -o cyclonedx-json=sbom-frontend-cyclonedx.json
+ syft . -o json=sbom-frontend-syft.json
+
+ echo "Frontend SBOM generated successfully"
+ ls -la sbom-*.json
+ continue-on-error: true
+
+ - name: Generate Backend SBOM with Syft
+ run: |
+ echo "Generating Backend SBOM with Syft..."
+ cd backend
+
+ # Generate SBOM in multiple formats
+ syft . -o spdx-json=sbom-backend-spdx.json
+ syft . -o cyclonedx-json=sbom-backend-cyclonedx.json
+ syft . -o json=sbom-backend-syft.json
+
+ echo "Backend SBOM generated successfully"
+ ls -la sbom-*.json
+ continue-on-error: true
+
+ - name: Install cdxgen (if compatible)
+ if: env.SKIP_CDXGEN == 'false'
+ run: |
+ echo "Installing cdxgen for CycloneDX generation..."
+ npm install -g @cyclonedx/cdxgen
+ cdxgen --version || echo "cdxgen installation completed"
+ continue-on-error: true
+
+ - name: Generate CycloneDX SBOM with cdxgen
+ if: env.SKIP_CDXGEN == 'false'
+ run: |
+ echo "Generating comprehensive CycloneDX SBOM..."
+
+ # Frontend CycloneDX
+ cd frontend
+ cdxgen -o sbom-frontend-cdxgen.json -t js --spec-version 1.5 || echo "Frontend SBOM generation failed"
+
+ # Backend CycloneDX
+ cd ../backend
+ cdxgen -o sbom-backend-cdxgen.json -t js --spec-version 1.5 || echo "Backend SBOM generation failed"
+
+ # Full project CycloneDX
+ cd ..
+ cdxgen -o sbom-project-cdxgen.json -t js --spec-version 1.5 || echo "Project SBOM generation failed"
+
+ echo "CycloneDX SBOMs generation attempted"
+ continue-on-error: true
+
+ - name: Alternative CycloneDX generation with npm
+ if: env.SKIP_CDXGEN == 'true'
+ run: |
+ echo "Using npm-based CycloneDX generation as fallback..."
+ npm install -g @cyclonedx/cyclonedx-npm
+
+ # Frontend CycloneDX
+ cd frontend
+ npx @cyclonedx/cyclonedx-npm --output-format json --output-file sbom-frontend-cdxgen.json || true
+
+ # Backend CycloneDX
+ cd ../backend
+ npx @cyclonedx/cyclonedx-npm --output-format json --output-file sbom-backend-cdxgen.json || true
+
+ # Full project
+ cd ..
+ npx @cyclonedx/cyclonedx-npm --output-format json --output-file sbom-project-cdxgen.json || true
+
+ echo "Alternative CycloneDX SBOMs generated"
+ continue-on-error: true
+
+ - name: Generate npm dependency tree
+ run: |
+ echo "Generating npm dependency trees..."
+
+ # Frontend dependencies
+ cd frontend
+ npm list --json > npm-dependencies-frontend.json || true
+ npm list --production --json > npm-dependencies-frontend-prod.json || true
+
+ # Backend dependencies
+ cd ../backend
+ npm list --json > npm-dependencies-backend.json || true
+ npm list --production --json > npm-dependencies-backend-prod.json || true
+
+ echo "Dependency trees generated"
+ continue-on-error: true
+
+ - name: Analyze licenses
+ run: |
+ echo "## License Analysis" >> $GITHUB_STEP_SUMMARY
+
+ # Install license checker
+ npm install -g license-checker
+
+ # Frontend licenses
+ cd frontend
+ license-checker --json > licenses-frontend.json || true
+ license-checker --summary >> $GITHUB_STEP_SUMMARY || true
+
+ # Backend licenses
+ cd ../backend
+ echo "### Backend Licenses:" >> $GITHUB_STEP_SUMMARY
+ license-checker --summary >> $GITHUB_STEP_SUMMARY || true
+ license-checker --json > licenses-backend.json || true
+
+ echo "License analysis completed"
+ continue-on-error: true
+
+ - name: Generate SBOM summary
+ run: |
+ echo "## SBOM Generation Summary" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ # Count components in SBOMs
+ if [ -f "frontend/sbom-frontend-syft.json" ]; then
+ FRONTEND_COMPONENTS=$(jq '.artifacts | length' frontend/sbom-frontend-syft.json || echo "0")
+ echo "### Frontend Components: $FRONTEND_COMPONENTS" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ if [ -f "backend/sbom-backend-syft.json" ]; then
+ BACKEND_COMPONENTS=$(jq '.artifacts | length' backend/sbom-backend-syft.json || echo "0")
+ echo "### Backend Components: $BACKEND_COMPONENTS" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "### Generated Formats:" >> $GITHUB_STEP_SUMMARY
+ echo "- SPDX 2.3 JSON" >> $GITHUB_STEP_SUMMARY
+ echo "- CycloneDX 1.5 JSON" >> $GITHUB_STEP_SUMMARY
+ echo "- Syft native JSON" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ # List generated files
+ echo "### Generated SBOM Files:" >> $GITHUB_STEP_SUMMARY
+ find . -name "sbom-*.json" -o -name "licenses-*.json" -o -name "npm-dependencies-*.json" | while read file; do
+ SIZE=$(du -h "$file" | cut -f1)
+ echo "- $(basename $file) ($SIZE)" >> $GITHUB_STEP_SUMMARY
+ done
+ continue-on-error: true
+
+ - name: Upload source SBOMs
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: sbom-source-${{ github.run_number }}
+ path: |
+ frontend/sbom-*.json
+ backend/sbom-*.json
+ sbom-project-cdxgen.json
+ frontend/licenses-*.json
+ backend/licenses-*.json
+ frontend/npm-dependencies-*.json
+ backend/npm-dependencies-*.json
+ retention-days: 90
+
+ sbom-containers:
+ name: Generate Container SBOM
+ runs-on: ubuntu-latest
+
+ # Skip any PR created by dependabot to avoid permission issues
+ if: (github.actor != 'dependabot[bot]')
+
+ strategy:
+ fail-fast: false
+ matrix:
+ service: [backend, frontend]
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Build Docker image (${{ matrix.service }})
+ run: |
+ echo "Building ${{ matrix.service }} Docker image for SBOM generation..."
+
+ docker build \
+ -t connectkit-${{ matrix.service }}:sbom \
+ -f docker/${{ matrix.service }}/Dockerfile \
+ . || {
+ echo "Build failed, creating minimal image..."
+ cat > Dockerfile.minimal << EOF
+ FROM node:18-alpine
+ WORKDIR /app
+ COPY ${{ matrix.service }}/package*.json ./
+ RUN npm ci --only=production || npm install --production || echo "Install failed"
+ EOF
+ docker build -t connectkit-${{ matrix.service }}:sbom -f Dockerfile.minimal .
+ }
+ continue-on-error: true
+
+ - name: Install Syft
+ run: |
+ echo "Installing Syft..."
+ curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b /usr/local/bin
+ syft --version
+
+ - name: Generate container SBOM with Syft
+ run: |
+ echo "Generating container SBOM for ${{ matrix.service }}..."
+
+ # Generate SBOM in multiple formats
+ syft connectkit-${{ matrix.service }}:sbom -o spdx-json=sbom-container-${{ matrix.service }}-spdx.json
+ syft connectkit-${{ matrix.service }}:sbom -o cyclonedx-json=sbom-container-${{ matrix.service }}-cyclonedx.json
+ syft connectkit-${{ matrix.service }}:sbom -o json=sbom-container-${{ matrix.service }}-syft.json
+
+ echo "Container SBOM generated successfully"
+ ls -la sbom-container-*.json
+ continue-on-error: true
+
+ - name: Generate container layer analysis
+ run: |
+ echo "## Container Analysis - ${{ matrix.service }}" >> $GITHUB_STEP_SUMMARY
+
+ # Analyze image layers
+ docker history connectkit-${{ matrix.service }}:sbom --no-trunc > layer-history-${{ matrix.service }}.txt || true
+
+ # Get image size
+ IMAGE_SIZE=$(docker images connectkit-${{ matrix.service }}:sbom --format "{{.Size}}")
+ echo "### Image Size: $IMAGE_SIZE" >> $GITHUB_STEP_SUMMARY
+
+ # Count packages if SBOM exists
+ if [ -f "sbom-container-${{ matrix.service }}-syft.json" ]; then
+ PACKAGE_COUNT=$(jq '.artifacts | length' sbom-container-${{ matrix.service }}-syft.json || echo "0")
+ echo "### Total Packages: $PACKAGE_COUNT" >> $GITHUB_STEP_SUMMARY
+
+ # Count by type
+ echo "### Package Types:" >> $GITHUB_STEP_SUMMARY
+ jq -r '.artifacts[].type' sbom-container-${{ matrix.service }}-syft.json | sort | uniq -c | while read count type; do
+ echo "- $type: $count" >> $GITHUB_STEP_SUMMARY
+ done
+ fi
+ continue-on-error: true
+
+ - name: Upload container SBOMs
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: sbom-container-${{ matrix.service }}-${{ github.run_number }}
+ path: |
+ sbom-container-*.json
+ layer-history-*.txt
+ retention-days: 90
+
+ sbom-vulnerability-scan:
+ name: Scan SBOMs for Vulnerabilities
+ runs-on: ubuntu-latest
+ needs: [sbom-source, sbom-containers]
+
+ # Skip any PR created by dependabot to avoid permission issues
+ if: (github.actor != 'dependabot[bot]')
+
+ steps:
+ - name: Download all SBOMs
+ uses: actions/download-artifact@v4
+ with:
+ pattern: sbom-*
+ path: sboms/
+
+ - name: Install Grype
+ run: |
+ echo "Installing Grype vulnerability scanner..."
+ curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin
+ grype --version
+
+ - name: Scan source SBOMs with Grype
+ run: |
+ echo "## Vulnerability Scan Results" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ # Find and scan all Syft-format SBOMs
+ find sboms -name "*-syft.json" | while read sbom; do
+ SBOM_NAME=$(basename $sbom .json)
+ echo "Scanning $SBOM_NAME..."
+
+ # Scan and output to JSON
+ grype sbom:$sbom -o json > vuln-$SBOM_NAME.json || true
+
+ # Scan and output to table for summary
+ echo "### $SBOM_NAME Vulnerabilities:" >> $GITHUB_STEP_SUMMARY
+ grype sbom:$sbom -q || echo "No vulnerabilities found" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ done
+ continue-on-error: true
+
+ - name: Generate vulnerability summary
+ run: |
+ echo "## Vulnerability Summary" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ TOTAL_CRITICAL=0
+ TOTAL_HIGH=0
+ TOTAL_MEDIUM=0
+ TOTAL_LOW=0
+
+ # Count vulnerabilities from all scan results
+ find . -name "vuln-*.json" | while read vuln_file; do
+ if [ -f "$vuln_file" ]; then
+ CRITICAL=$(jq '[.matches[] | select(.vulnerability.severity == "Critical")] | length' $vuln_file 2>/dev/null || echo "0")
+ HIGH=$(jq '[.matches[] | select(.vulnerability.severity == "High")] | length' $vuln_file 2>/dev/null || echo "0")
+ MEDIUM=$(jq '[.matches[] | select(.vulnerability.severity == "Medium")] | length' $vuln_file 2>/dev/null || echo "0")
+ LOW=$(jq '[.matches[] | select(.vulnerability.severity == "Low")] | length' $vuln_file 2>/dev/null || echo "0")
+
+ TOTAL_CRITICAL=$((TOTAL_CRITICAL + CRITICAL))
+ TOTAL_HIGH=$((TOTAL_HIGH + HIGH))
+ TOTAL_MEDIUM=$((TOTAL_MEDIUM + MEDIUM))
+ TOTAL_LOW=$((TOTAL_LOW + LOW))
+ fi
+ done
+
+ echo "### Total Vulnerabilities Found:" >> $GITHUB_STEP_SUMMARY
+ echo "- Critical: $TOTAL_CRITICAL" >> $GITHUB_STEP_SUMMARY
+ echo "- High: $TOTAL_HIGH" >> $GITHUB_STEP_SUMMARY
+ echo "- Medium: $TOTAL_MEDIUM" >> $GITHUB_STEP_SUMMARY
+ echo "- Low: $TOTAL_LOW" >> $GITHUB_STEP_SUMMARY
+
+ if [ "$TOTAL_CRITICAL" -gt 0 ]; then
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "â **Critical vulnerabilities detected!**" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "â
**No critical vulnerabilities found**" >> $GITHUB_STEP_SUMMARY
+ fi
+ continue-on-error: true
+
+ - name: Upload vulnerability scan results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: sbom-vulnerabilities-${{ github.run_number }}
+ path: |
+ vuln-*.json
+ retention-days: 90
+
+ sbom-attestation:
+ name: Generate SBOM Attestation
+ runs-on: ubuntu-latest
+ needs: [sbom-source]
+ if: github.event_name == 'push' && github.ref == 'refs/heads/main'
+
+ permissions:
+ contents: write
+ id-token: write
+ attestations: write
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Download source SBOMs
+ uses: actions/download-artifact@v4
+ with:
+ name: sbom-source-${{ github.run_number }}
+ path: sboms/
+
+ - name: Generate SBOM attestation
+ uses: actions/attest-sbom@v1
+ with:
+ subject-path: "./sboms"
+ sbom-path: "sboms/sbom-project-cdxgen.json"
+ push-to-registry: false
+ continue-on-error: true
+
+ - name: Upload attestation
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: sbom-attestation-${{ github.run_number }}
+ path: |
+ *.sigstore
+ retention-days: 90
+
+ sbom-publish:
+ name: Publish SBOMs
+ runs-on: ubuntu-latest
+ needs: [sbom-source, sbom-containers, sbom-vulnerability-scan]
+ if: github.event_name == 'release'
+
+ permissions:
+ contents: write
+
+ steps:
+ - name: Download all SBOMs
+ uses: actions/download-artifact@v4
+ with:
+ pattern: sbom-*
+ path: sboms/
+
+ - name: Create SBOM archive
+ run: |
+ echo "Creating SBOM archive for release..."
+
+ # Create organized directory structure
+ mkdir -p release-sboms/source
+ mkdir -p release-sboms/containers
+ mkdir -p release-sboms/vulnerabilities
+
+ # Copy SBOMs to organized structure
+ find sboms -name "*frontend*" -name "*.json" -exec cp {} release-sboms/source/ \;
+ find sboms -name "*backend*" -name "*.json" -exec cp {} release-sboms/source/ \;
+ find sboms -name "*container*" -name "*.json" -exec cp {} release-sboms/containers/ \;
+ find sboms -name "vuln-*" -name "*.json" -exec cp {} release-sboms/vulnerabilities/ \;
+
+ # Create archive
+ tar -czf sbom-${{ github.event.release.tag_name }}.tar.gz release-sboms/
+
+ echo "SBOM archive created successfully"
+ continue-on-error: true
+
+ - name: Upload SBOMs to release
+ uses: softprops/action-gh-release@v1
+ with:
+ files: |
+ sbom-*.tar.gz
+ release-sboms/source/sbom-project-cdxgen.json
+ release-sboms/source/sbom-frontend-spdx.json
+ release-sboms/source/sbom-backend-spdx.json
+ continue-on-error: true