Skip to content

Merge origin/main, keeping local README.md version #5

Merge origin/main, keeping local README.md version

Merge origin/main, keeping local README.md version #5

name: Performance Optimization - SLO Achievement
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main, develop ]
workflow_dispatch:
inputs:
optimization_target:
description: 'Optimization target (cpu, latency, throughput)'
required: false
default: 'cpu'
type: choice
options:
- cpu
- latency
- throughput
- all
env:
NODE_VERSION: '18'
PYTHON_VERSION: '3.11'
GO_VERSION: '1.21'
RUST_VERSION: '1.75'
OPTIMIZATION_TARGET: ${{ github.event.inputs.optimization_target || 'all' }}
jobs:
performance-baseline:
name: Performance Baseline
runs-on: ubuntu-latest
services:
postgres:
image: postgres:15-alpine
env:
POSTGRES_PASSWORD: postgres
POSTGRES_DB: pf_testbed_perf
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
submodules: recursive
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip'
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
cache: true
- name: Setup Rust
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ env.RUST_VERSION }}
override: true
- name: Install dependencies
run: |
npm ci --prefer-offline
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r testbed/tools/requirements.txt
go mod download
cargo --version
- name: Build and start services
run: |
# Build services with performance profiling
docker-compose build --no-cache --parallel
# Start services
docker-compose up -d
# Wait for services to be healthy
echo "Waiting for services to be ready..."
# Wait for Gateway
timeout 120 bash -c 'until curl -f http://localhost:3003/health; do sleep 2; done'
# Wait for Ingress
timeout 120 bash -c 'until curl -f http://localhost:3001/health; do sleep 2; done'
# Wait for Ledger
timeout 120 bash -c 'until curl -f http://localhost:3002/health; do sleep 2; done'
echo "All services are healthy"
- name: Run baseline performance tests
run: |
# Install k6 for load testing
curl -L https://github.com/grafana/k6/releases/download/v0.47.0/k6-v0.47.0-linux-amd64.tar.gz | tar xz
sudo cp k6-v0.47.0-linux-amd64/k6 /usr/local/bin/
# Run baseline performance test
k6 run testbed/tests/performance/baseline-test.js \
--out json=baseline-performance.json \
--duration 5m \
--vus 20 \
--stage 30s:20 \
--stage 4m:20 \
--stage 30s:0
echo "Baseline performance test completed"
- name: Collect baseline metrics
run: |
# Extract baseline metrics
python -c "
import json
with open('baseline-performance.json', 'r') as f:
metrics = json.load(f)
http_metrics = metrics.get('metrics', {})
baseline_metrics = {
'p95_latency': http_metrics.get('http_req_duration', {}).get('p(95)', 0),
'p99_latency': http_metrics.get('http_req_duration', {}).get('p(99)', 0),
'throughput': http_metrics.get('http_reqs', {}).get('rate', 0),
'error_rate': http_metrics.get('http_req_failed', {}).get('rate', 0),
'cpu_usage': 0, # Will be measured separately
'memory_usage': 0 # Will be measured separately
}
with open('baseline-metrics.json', 'w') as f:
json.dump(baseline_metrics, f, indent=2)
print('Baseline metrics collected:', baseline_metrics)
"
outputs:
baseline-metrics: ${{ steps.baseline-collection.outputs.metrics }}
implement-optimizations:
name: Implement Performance Optimizations
runs-on: ubuntu-latest
needs: performance-baseline
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip'
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
cache: true
- name: Setup Rust
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ env.RUST_VERSION }}
override: true
- name: Install optimization dependencies
run: |
# Install performance optimization libraries
npm install @grpc/grpc-js @grpc/proto-loader blake3 @noble/ed25519
# Install Python optimization libraries
pip install grpcio grpcio-tools blake3 cryptography
# Install Go optimization libraries
go get google.golang.org/grpc
go get github.com/oasisprotocol/curve25519-voi/primitives/ed25519
go get github.com/oasisprotocol/curve25519-voi/primitives/blake3
- name: Implement gRPC optimizations
run: |
# Create gRPC service definitions
mkdir -p testbed/runtime/gateway/proto
cat > testbed/runtime/gateway/proto/gateway.proto << 'EOF'
syntax = "proto3";
package gateway;
service GatewayService {
rpc ProcessRequest (Request) returns (Response);
rpc BatchProcess (BatchRequest) returns (BatchResponse);
rpc StreamProcess (stream Request) returns (stream Response);
}
message Request {
string tenant_id = 1;
string request_id = 2;
bytes payload = 3;
int64 timestamp = 4;
}
message Response {
string request_id = 1;
bytes result = 2;
int64 processing_time_ms = 3;
string status = 4;
}
message BatchRequest {
repeated Request requests = 1;
}
message BatchResponse {
repeated Response responses = 1;
int64 total_processing_time_ms = 2;
}
EOF
# Generate gRPC code
npx grpc_tools_node_protoc \
--js_out=import_style=commonjs,binary:testbed/runtime/gateway/src \
--grpc_out=grpc_js:testbed/runtime/gateway/src \
testbed/runtime/gateway/proto/gateway.proto
echo "gRPC optimizations implemented"
- name: Implement BLAKE3 hashing
run: |
# Create BLAKE3 optimization module
cat > testbed/runtime/gateway/src/optimizations/blake3.ts << 'EOF'
import { blake3 } from 'blake3';
export class Blake3Optimizer {
private static instance: Blake3Optimizer;
private hashPool: Map<string, Uint8Array> = new Map();
private constructor() {}
static getInstance(): Blake3Optimizer {
if (!Blake3Optimizer.instance) {
Blake3Optimizer.instance = new Blake3Optimizer();
}
return Blake3Optimizer.instance;
}
// Fast hashing with BLAKE3
hash(data: string | Uint8Array): string {
if (typeof data === 'string') {
data = new TextEncoder().encode(data);
}
return blake3(data).toString('hex');
}
// Batch hashing for multiple inputs
batchHash(inputs: (string | Uint8Array)[]): string[] {
return inputs.map(input => this.hash(input));
}
// Keyed hashing for deterministic results
keyedHash(key: string, data: string | Uint8Array): string {
const keyed = blake3.createKeyed(key);
if (typeof data === 'string') {
data = new TextEncoder().encode(data);
}
return keyed.update(data).toString('hex');
}
// Streaming hash for large data
createStreamingHasher(): any {
return blake3.createHash();
}
}
export const blake3Optimizer = Blake3Optimizer.getInstance();
EOF
echo "BLAKE3 hashing optimizations implemented"
- name: Implement batch Ed25519 verification
run: |
# Create batch Ed25519 verification module
cat > testbed/runtime/gateway/src/optimizations/ed25519-batch.ts << 'EOF'
import { verify } from '@noble/ed25519';
export interface Ed25519Signature {
message: Uint8Array;
signature: Uint8Array;
publicKey: Uint8Array;
}
export class Ed25519BatchVerifier {
private static instance: Ed25519BatchVerifier;
private verificationQueue: Ed25519Signature[] = [];
private batchSize: number = 100;
private constructor() {}
static getInstance(): Ed25519BatchVerifier {
if (!Ed25519BatchVerifier.instance) {
Ed25519BatchVerifier.instance = new Ed25519BatchVerifier();
}
return Ed25519BatchVerifier.instance;
}
// Add signature to verification queue
addSignature(signature: Ed25519Signature): void {
this.verificationQueue.push(signature);
// Process batch if queue is full
if (this.verificationQueue.length >= this.batchSize) {
this.processBatch();
}
}
// Process current batch
private async processBatch(): Promise<boolean[]> {
if (this.verificationQueue.length === 0) {
return [];
}
const batch = this.verificationQueue.splice(0, this.batchSize);
const results = await Promise.all(
batch.map(sig => this.verifySingle(sig))
);
return results;
}
// Verify single signature
private async verifySingle(signature: Ed25519Signature): Promise<boolean> {
try {
return await verify(signature.signature, signature.message, signature.publicKey);
} catch (error) {
console.error('Ed25519 verification failed:', error);
return false;
}
}
// Force process remaining signatures
async flush(): Promise<boolean[]> {
return await this.processBatch();
}
// Set batch size
setBatchSize(size: number): void {
this.batchSize = size;
}
}
export const ed25519BatchVerifier = Ed25519BatchVerifier.getInstance();
EOF
echo "Batch Ed25519 verification implemented"
- name: Implement streaming detectors
run: |
# Create streaming detector with early exit
cat > testbed/runtime/gateway/src/optimizations/streaming-detector.ts << 'EOF'
export interface DetectionRule {
id: string;
pattern: RegExp | string;
threshold: number;
earlyExit: boolean;
severity: 'low' | 'medium' | 'high' | 'critical';
}
export class StreamingDetector {
private rules: DetectionRule[] = [];
private detectionCache: Map<string, number> = new Map();
private cacheSize: number = 1000;
constructor(rules: DetectionRule[] = []) {
this.rules = rules;
}
// Add detection rule
addRule(rule: DetectionRule): void {
this.rules.push(rule);
}
// Process streaming data with early exit
async processStream(
dataStream: AsyncIterable<string>,
onDetection?: (detection: any) => void
): Promise<{ detections: any[]; earlyExit: boolean }> {
const detections: any[] = [];
let earlyExit = false;
for await (const chunk of dataStream) {
if (earlyExit) break;
const chunkDetections = await this.processChunk(chunk);
detections.push(...chunkDetections);
// Check for early exit conditions
for (const detection of chunkDetections) {
if (detection.rule.earlyExit && detection.severity === 'critical') {
earlyExit = true;
break;
}
}
// Call detection callback
if (onDetection) {
for (const detection of chunkDetections) {
onDetection(detection);
}
}
}
return { detections, earlyExit };
}
// Process individual chunk
private async processChunk(chunk: string): Promise<any[]> {
const detections: any[] = [];
for (const rule of this.rules) {
const matches = this.findMatches(chunk, rule);
if (matches.length > 0) {
const detection = {
rule,
matches,
count: matches.length,
severity: rule.severity,
timestamp: new Date().toISOString()
};
detections.push(detection);
// Early exit for critical rules
if (rule.earlyExit && rule.severity === 'critical') {
break;
}
}
}
return detections;
}
// Find matches in chunk
private findMatches(chunk: string, rule: DetectionRule): string[] {
const cacheKey = `${rule.id}:${chunk}`;
// Check cache first
if (this.detectionCache.has(cacheKey)) {
return [];
}
const matches: string[] = [];
const pattern = typeof rule.pattern === 'string' ? new RegExp(rule.pattern, 'g') : rule.pattern;
let match;
while ((match = pattern.exec(chunk)) !== null) {
matches.push(match[0]);
// Check threshold
if (matches.length >= rule.threshold) {
break;
}
}
// Cache result
this.detectionCache.set(cacheKey, matches.length);
this.manageCacheSize();
return matches;
}
// Manage cache size
private manageCacheSize(): void {
if (this.detectionCache.size > this.cacheSize) {
const entries = Array.from(this.detectionCache.entries());
const toRemove = entries.slice(0, this.detectionCache.size - this.cacheSize);
for (const [key] of toRemove) {
this.detectionCache.delete(key);
}
}
}
}
EOF
echo "Streaming detectors implemented"
- name: Implement WASM pool pre-warming
run: |
# Create WASM pool pre-warming module
cat > testbed/runtime/gateway/src/optimizations/wasm-pool.ts << 'EOF'
export interface WasmModule {
id: string;
instance: WebAssembly.Instance;
lastUsed: number;
useCount: number;
}
export class WasmPool {
private modules: Map<string, WasmModule> = new Map();
private maxPoolSize: number = 10;
private preWarmCount: number = 3;
private moduleFactory: () => Promise<WebAssembly.Instance>;
constructor(moduleFactory: () => Promise<WebAssembly.Instance>) {
this.moduleFactory = moduleFactory;
this.preWarm();
}
// Pre-warm the pool
private async preWarm(): Promise<void> {
const promises: Promise<void>[] = [];
for (let i = 0; i < this.preWarmCount; i++) {
promises.push(this.createModule());
}
await Promise.all(promises);
console.log(`WASM pool pre-warmed with ${this.preWarmCount} modules`);
}
// Create new module
private async createModule(): Promise<void> {
try {
const instance = await this.moduleFactory();
const module: WasmModule = {
id: `module_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,
instance,
lastUsed: Date.now(),
useCount: 0
};
this.modules.set(module.id, module);
} catch (error) {
console.error('Failed to create WASM module:', error);
}
}
// Get module from pool
async getModule(): Promise<WebAssembly.Instance> {
// Find least recently used module
let lruModule: WasmModule | undefined;
let oldestTime = Date.now();
for (const module of this.modules.values()) {
if (module.lastUsed < oldestTime) {
oldestTime = module.lastUsed;
lruModule = module;
}
}
if (lruModule) {
lruModule.lastUsed = Date.now();
lruModule.useCount++;
return lruModule.instance;
}
// Create new module if pool is empty
await this.createModule();
const newModule = Array.from(this.modules.values())[0];
return newModule.instance;
}
// Return module to pool
returnModule(instance: WebAssembly.Instance): void {
// Find module by instance
for (const [id, module] of this.modules.entries()) {
if (module.instance === instance) {
module.lastUsed = Date.now();
break;
}
}
}
// Clean up old modules
cleanup(maxAge: number = 5 * 60 * 1000): void {
const now = Date.now();
for (const [id, module] of this.modules.entries()) {
if (now - module.lastUsed > maxAge) {
this.modules.delete(id);
}
}
}
// Get pool statistics
getStats(): { size: number; preWarmCount: number; maxSize: number } {
return {
size: this.modules.size,
preWarmCount: this.preWarmCount,
maxSize: this.maxPoolSize
};
}
}
EOF
echo "WASM pool pre-warming implemented"
- name: Implement semantic and decision caches
run: |
# Create semantic cache for retrieval
cat > testbed/runtime/gateway/src/optimizations/semantic-cache.ts << 'EOF'
export interface CacheEntry<T> {
key: string;
value: T;
timestamp: number;
ttl: number;
accessCount: number;
lastAccessed: number;
}
export class SemanticCache<T> {
private cache: Map<string, CacheEntry<T>> = new Map();
private maxSize: number = 1000;
private defaultTTL: number = 5 * 60 * 1000; // 5 minutes
// Get value from cache
get(key: string): T | null {
const entry = this.cache.get(key);
if (!entry) {
return null;
}
// Check if expired
if (Date.now() > entry.timestamp + entry.ttl) {
this.cache.delete(key);
return null;
}
// Update access statistics
entry.accessCount++;
entry.lastAccessed = Date.now();
return entry.value;
}
// Set value in cache
set(key: string, value: T, ttl?: number): void {
const entry: CacheEntry<T> = {
key,
value,
timestamp: Date.now(),
ttl: ttl || this.defaultTTL,
accessCount: 0,
lastAccessed: Date.now()
};
// Evict if cache is full
if (this.cache.size >= this.maxSize) {
this.evictLRU();
}
this.cache.set(key, entry);
}
// Evict least recently used entry
private evictLRU(): void {
let lruKey: string | undefined;
let oldestTime = Date.now();
for (const [key, entry] of this.cache.entries()) {
if (entry.lastAccessed < oldestTime) {
oldestTime = entry.lastAccessed;
lruKey = key;
}
}
if (lruKey) {
this.cache.delete(lruKey);
}
}
// Clear expired entries
cleanup(): void {
const now = Date.now();
for (const [key, entry] of this.cache.entries()) {
if (now > entry.timestamp + entry.ttl) {
this.cache.delete(key);
}
}
}
// Get cache statistics
getStats(): { size: number; maxSize: number; hitRate: number } {
const totalRequests = Array.from(this.cache.values()).reduce((sum, entry) => sum + entry.accessCount, 0);
const hitRate = totalRequests > 0 ? totalRequests / this.cache.size : 0;
return {
size: this.cache.size,
maxSize: this.maxSize,
hitRate
};
}
}
// Decision cache for kernel decisions
export class DecisionCache extends SemanticCache<any> {
constructor() {
super();
// Set longer TTL for decisions
this.defaultTTL = 30 * 60 * 1000; // 30 minutes
}
// Cache decision with context
cacheDecision(context: string, decision: any, confidence: number): void {
const key = this.generateDecisionKey(context, decision);
const ttl = this.calculateTTL(confidence);
this.set(key, { decision, confidence, context }, ttl);
}
// Generate decision key
private generateDecisionKey(context: string, decision: any): string {
return `decision:${context}:${JSON.stringify(decision)}`;
}
// Calculate TTL based on confidence
private calculateTTL(confidence: number): number {
// Higher confidence = longer TTL
const baseTTL = 5 * 60 * 1000; // 5 minutes
const confidenceMultiplier = Math.max(0.1, Math.min(2.0, confidence));
return baseTTL * confidenceMultiplier;
}
}
EOF
echo "Semantic and decision caches implemented"
- name: Implement performance headers
run: |
# Create performance header middleware
cat > testbed/runtime/gateway/src/optimizations/performance-headers.ts << 'EOF'
import { Request, Response, NextFunction } from 'express';
export interface PerformanceMetrics {
planStart: number;
retrievalStart: number;
kernelStart: number;
egressStart: number;
}
export class PerformanceHeaderMiddleware {
// Add performance timing headers
static addTimingHeaders(req: Request, res: Response, next: NextFunction): void {
const startTime = Date.now();
// Store start time in request
(req as any).performanceMetrics = {
planStart: startTime,
retrievalStart: startTime,
kernelStart: startTime,
egressStart: startTime
};
// Add timing headers to response
res.set({
'X-PF-Plan-ms': '0',
'X-PF-Retrieval-ms': '0',
'X-PF-Kernel-ms': '0',
'X-PF-Egress-ms': '0'
});
next();
}
// Update plan timing
static updatePlanTiming(req: Request, res: Response, planStart: number): void {
const metrics = (req as any).performanceMetrics;
if (metrics) {
metrics.planStart = planStart;
const planTime = Date.now() - planStart;
res.set('X-PF-Plan-ms', planTime.toString());
}
}
// Update retrieval timing
static updateRetrievalTiming(req: Request, res: Response, retrievalStart: number): void {
const metrics = (req as any).performanceMetrics;
if (metrics) {
metrics.retrievalStart = retrievalStart;
const retrievalTime = Date.now() - retrievalStart;
res.set('X-PF-Retrieval-ms', retrievalTime.toString());
}
}
// Update kernel timing
static updateKernelTiming(req: Request, res: Response, kernelStart: number): void {
const metrics = (req as any).performanceMetrics;
if (metrics) {
metrics.kernelStart = kernelStart;
const kernelTime = Date.now() - kernelStart;
res.set('X-PF-Kernel-ms', kernelTime.toString());
}
}
// Update egress timing
static updateEgressTiming(req: Request, res: Response, egressStart: number): void {
const metrics = (req as any).performanceMetrics;
if (metrics) {
metrics.egressStart = egressStart;
const egressTime = Date.now() - egressStart;
res.set('X-PF-Egress-ms', egressTime.toString());
}
}
// Get total processing time
static getTotalTime(req: Request): number {
const metrics = (req as any).performanceMetrics;
if (metrics) {
return Date.now() - metrics.planStart;
}
return 0;
}
}
EOF
echo "Performance headers implemented"
- name: Build optimized services
run: |
# Build services with optimizations
npm run build
# Verify optimizations are included
if ! grep -q "gRPC" testbed/runtime/gateway/dist/index.js; then
echo "ERROR: gRPC optimizations not found in build"
exit 1
fi
if ! grep -q "BLAKE3" testbed/runtime/gateway/dist/index.js; then
echo "ERROR: BLAKE3 optimizations not found in build"
exit 1
fi
echo "✅ All optimizations built successfully"
performance-testing:
name: Performance Testing
runs-on: ubuntu-latest
needs: [performance-baseline, implement-optimizations]
services:
postgres:
image: postgres:15-alpine
env:
POSTGRES_PASSWORD: postgres
POSTGRES_DB: pf_testbed_perf_test
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: 'pip'
- name: Download baseline metrics
uses: actions/download-artifact@v3
with:
name: baseline-metrics-${{ needs.performance-baseline.outputs.baseline-metrics }}
path: ./baseline-metrics
- name: Build and start optimized services
run: |
# Build optimized services
npm run build
# Start services
docker-compose up -d
# Wait for services to be healthy
echo "Waiting for services to be ready..."
# Wait for Gateway
timeout 120 bash -c 'until curl -f http://localhost:3003/health; do sleep 2; done'
# Wait for Ingress
timeout 120 bash -c 'until curl -f http://localhost:3001/health; do sleep 2; done'
# Wait for Ledger
timeout 120 bash -c 'until curl -f http://localhost:3002/health; do sleep 2; done'
echo "All optimized services are healthy"
- name: Run optimized performance tests
run: |
# Install k6
curl -L https://github.com/grafana/k6/releases/download/v0.47.0/k6-v0.47.0-linux-amd64.tar.gz | tar xz
sudo cp k6-v0.47.0-linux-amd64/k6 /usr/local/bin/
# Run optimized performance test
k6 run testbed/tests/performance/optimized-test.js \
--out json=optimized-performance.json \
--duration 5m \
--vus 20 \
--stage 30s:20 \
--stage 4m:20 \
--stage 30s:0
echo "Optimized performance test completed"
- name: Collect optimized metrics
run: |
# Extract optimized metrics
python -c "
import json
with open('optimized-performance.json', 'r') as f:
metrics = json.load(f)
http_metrics = metrics.get('metrics', {})
optimized_metrics = {
'p95_latency': http_metrics.get('http_req_duration', {}).get('p(95)', 0),
'p99_latency': http_metrics.get('http_req_duration', {}).get('p(99)', 0),
'throughput': http_metrics.get('http_reqs', {}).get('rate', 0),
'error_rate': http_metrics.get('http_req_failed', {}).get('rate', 0),
'cpu_usage': 0, # Will be measured separately
'memory_usage': 0 # Will be measured separately
}
with open('optimized-metrics.json', 'w') as f:
json.dump(optimized_metrics, f, indent=2)
print('Optimized metrics collected:', optimized_metrics)
"
- name: Compare performance metrics
run: |
# Load baseline and optimized metrics
python -c "
import json
with open('baseline-metrics/baseline-metrics.json', 'r') as f:
baseline = json.load(f)
with open('optimized-metrics.json', 'r') as f:
optimized = json.load(f)
# Calculate improvements
improvements = {}
for key in baseline.keys():
if key in optimized:
baseline_val = baseline[key]
optimized_val = optimized[key]
if baseline_val > 0:
if 'latency' in key:
# Lower is better for latency
improvement = ((baseline_val - optimized_val) / baseline_val) * 100
else:
# Higher is better for throughput
improvement = ((optimized_val - baseline_val) / baseline_val) * 100
improvements[key] = improvement
# Generate performance report
report = {
'baseline_metrics': baseline,
'optimized_metrics': optimized,
'improvements': improvements,
'summary': {}
}
# Calculate overall improvements
latency_improvement = (improvements.get('p95_latency', 0) + improvements.get('p99_latency', 0)) / 2
throughput_improvement = improvements.get('throughput', 0)
report['summary'] = {
'latency_improvement_percent': latency_improvement,
'throughput_improvement_percent': throughput_improvement,
'overall_improvement': (latency_improvement + throughput_improvement) / 2
}
with open('performance-comparison.json', 'w') as f:
json.dump(report, f, indent=2)
print('Performance comparison completed:')
print(f'Latency improvement: {latency_improvement:.2f}%')
print(f'Throughput improvement: {throughput_improvement:.2f}%')
print(f'Overall improvement: {report[\"summary\"][\"overall_improvement\"]:.2f}%')
"
- name: Validate SLO thresholds
run: |
# Validate that optimizations meet SLO thresholds
python -c "
import json
with open('performance-comparison.json', 'r') as f:
comparison = json.load(f)
baseline = comparison['baseline_metrics']
optimized = comparison['optimized_metrics']
# SLO thresholds
slo_thresholds = {
'p95_latency': 2.0, # 2 seconds
'p99_latency': 5.0, # 5 seconds
'error_rate': 0.02, # 2%
'throughput_min': 100 # 100 req/s
}
# Check if SLOs are met
slo_violations = []
if optimized['p95_latency'] > slo_thresholds['p95_latency']:
slo_violations.append(f'P95 latency {optimized[\"p95_latency\"]}s exceeds {slo_thresholds[\"p95_latency\"]}s threshold')
if optimized['p99_latency'] > slo_thresholds['p99_latency']:
slo_violations.append(f'P99 latency {optimized[\"p99_latency\"]}s exceeds {slo_thresholds[\"p99_latency\"]}s threshold')
if optimized['error_rate'] > slo_thresholds['error_rate']:
slo_violations.append(f'Error rate {optimized[\"error_rate\"]} exceeds {slo_thresholds[\"error_rate\"]} threshold')
if optimized['throughput'] < slo_thresholds['throughput_min']:
slo_violations.append(f'Throughput {optimized[\"throughput\"]} req/s below {slo_thresholds[\"throughput_min\"]} req/s minimum')
if slo_violations:
print('❌ SLO violations detected:')
for violation in slo_violations:
print(f' - {violation}')
exit(1)
print('✅ All SLO thresholds met')
"
- name: Upload performance artifacts
uses: actions/upload-artifact@v3
if: always()
with:
name: performance-artifacts-${{ github.run_number }}
path: |
baseline-metrics/
optimized-metrics.json
performance-comparison.json
baseline-performance.json
optimized-performance.json
retention-days: 30
performance-summary:
name: Performance Summary
runs-on: ubuntu-latest
needs: [performance-baseline, implement-optimizations, performance-testing]
if: always()
steps:
- name: Generate performance summary
run: |
echo "## Performance Optimization Summary" >> performance-summary.md
echo "" >> performance-summary.md
echo "**Baseline Performance:** ${{ needs.performance-baseline.result }}" >> performance-summary.md
echo "**Optimization Implementation:** ${{ needs.implement-optimizations.result }}" >> performance-summary.md
echo "**Performance Testing:** ${{ needs.performance-testing.result }}" >> performance-summary.md
echo "**Optimization Target:** ${{ env.OPTIMIZATION_TARGET }}" >> performance-summary.md
echo "" >> performance-summary.md
if [ "${{ needs.performance-testing.result }}" == "success" ]; then
echo "✅ **Overall Performance Status: OPTIMIZED**" >> performance-summary.md
echo "" >> performance-summary.md
echo "Performance optimizations implemented and tested successfully:" >> performance-summary.md
echo "- gRPC internal communication" >> performance-summary.md
echo "- BLAKE3 hashing optimization" >> performance-summary.md
echo "- Batch Ed25519 verification" >> performance-summary.md
echo "- Streaming detectors with early exit" >> performance-summary.md
echo "- WASM pool pre-warming" >> performance-summary.md
echo "- Semantic and decision caches" >> performance-summary.md
echo "- Performance timing headers" >> performance-summary.md
echo "" >> performance-summary.md
echo "All SLO thresholds met with optimized performance." >> performance-summary.md
else
echo "❌ **Overall Performance Status: FAILED**" >> performance-summary.md
echo "" >> performance-summary.md
echo "Performance optimization encountered failures. Check the logs for details." >> performance-summary.md
fi
- name: Comment on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v6
with:
script: |
const fs = require('fs');
const summary = fs.readFileSync('performance-summary.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: summary
});