diff --git a/.github/workflows/e2e-metrics.yml b/.github/workflows/e2e-metrics.yml
new file mode 100644
index 00000000..3765eced
--- /dev/null
+++ b/.github/workflows/e2e-metrics.yml
@@ -0,0 +1,252 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+
+# E2E Test Metrics Collection Workflow
+# This workflow runs after E2E tests complete to collect metrics and update the dashboard
+# Uses GitHub API to fetch workflow run job statuses (no artifacts needed)
+
+name: E2E Metrics Collection
+
+on:
+ workflow_run:
+ workflows: ["E2E Test Orchestrator"]
+ types:
+ - completed
+
+ # Allow manual trigger for testing
+ workflow_dispatch:
+ inputs:
+ run_id:
+ description: 'Workflow run ID to collect metrics from (optional)'
+ required: false
+ type: string
+
+# Prevent concurrent metrics updates to avoid merge conflicts
+concurrency:
+ group: metrics-update
+ cancel-in-progress: false
+
+permissions:
+ actions: read
+
+jobs:
+ collect-metrics:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout Metrics Repository
+ uses: actions/checkout@v4
+ with:
+ repository: microsoft/Agent365-metrics
+ ref: main
+ token: ${{ secrets.METRICS_REPO_TOKEN }}
+
+ - name: Fetch E2E Test Results from Workflow Run
+ id: results
+ uses: actions/github-script@v7
+ with:
+ script: |
+ // Get the workflow run ID
+ let runId;
+ if (context.eventName === 'workflow_run') {
+ runId = context.payload.workflow_run.id;
+ } else if (context.payload.inputs && context.payload.inputs.run_id) {
+ runId = parseInt(context.payload.inputs.run_id);
+ } else {
+ core.setFailed('No workflow run ID available');
+ return;
+ }
+
+ console.log(`Fetching results for workflow run: ${runId}`);
+
+ // Get the workflow run details
+ const { data: workflowRun } = await github.rest.actions.getWorkflowRun({
+ owner: 'microsoft',
+ repo: 'Agent365-Samples',
+ run_id: runId
+ });
+
+ console.log(`Workflow: ${workflowRun.name}`);
+ console.log(`Status: ${workflowRun.status}`);
+ console.log(`Conclusion: ${workflowRun.conclusion}`);
+ console.log(`Branch: ${workflowRun.head_branch}`);
+ console.log(`Commit: ${workflowRun.head_sha}`);
+
+ // Get all jobs for the workflow run
+ const { data: jobsData } = await github.rest.actions.listJobsForWorkflowRun({
+ owner: 'microsoft',
+ repo: 'Agent365-Samples',
+ run_id: runId
+ });
+
+ // Map job names to sample names
+ const sampleMapping = {
+ 'Python OpenAI E2E': 'python-openai',
+ 'Python Agent Framework E2E': 'python-af',
+ 'Node.js OpenAI E2E': 'nodejs-openai',
+ 'Node.js LangChain E2E': 'nodejs-langchain',
+ '.NET Semantic Kernel E2E': 'dotnet-sk',
+ '.NET Agent Framework E2E': 'dotnet-af'
+ };
+
+ // Determine testing stage
+ let stage = 'scheduled';
+ if (workflowRun.event === 'pull_request') {
+ stage = 'pre-checkin';
+ } else if (workflowRun.event === 'push' && workflowRun.head_branch === 'main') {
+ stage = 'post-checkin';
+ } else if (workflowRun.event === 'schedule') {
+ stage = 'scheduled';
+ }
+
+ // Create one entry per sample (matches dashboard format)
+ const entries = [];
+ let passedCount = 0;
+ let failedCount = 0;
+
+ for (const job of jobsData.jobs) {
+ const sampleName = sampleMapping[job.name];
+ if (!sampleName) {
+ console.log(`Skipping job: ${job.name} (not a sample job)`);
+ continue;
+ }
+
+ const passed = job.conclusion === 'success';
+ const failed = job.conclusion === 'failure';
+
+ if (passed) passedCount++;
+ if (failed) failedCount++;
+
+ // Create entry in dashboard-expected format
+ const entry = {
+ id: `run-${runId}-${sampleName}`,
+ timestamp: workflowRun.created_at,
+ stage: stage,
+ sampleName: sampleName,
+ sdkVersions: {}, // SDK versions not available from API
+ testResults: {
+ status: passed ? 'passed' : (failed ? 'failed' : 'skipped'),
+ total: 1,
+ passed: passed ? 1 : 0,
+ failed: failed ? 1 : 0,
+ skipped: (!passed && !failed) ? 1 : 0
+ },
+ bugsCaught: {
+ count: 0,
+ details: []
+ },
+ runUrl: workflowRun.html_url
+ };
+
+ entries.push(entry);
+ console.log(`${sampleName}: ${job.conclusion}`);
+ }
+
+ // Write entries to file
+ const fs = require('fs');
+ fs.writeFileSync('new-entries.json', JSON.stringify(entries, null, 2));
+
+ core.setOutput('run_id', runId);
+ core.setOutput('conclusion', workflowRun.conclusion);
+ core.setOutput('passed', passedCount);
+ core.setOutput('failed', failedCount);
+ core.setOutput('stage', stage);
+ core.setOutput('branch', workflowRun.head_branch);
+ core.setOutput('entry_count', entries.length);
+
+ console.log(`\n=== Created ${entries.length} entries ===`);
+
+ - name: Update History File
+ id: update
+ run: |
+ HISTORY_FILE="docs/history.json"
+ NEW_ENTRIES_FILE="new-entries.json"
+
+ echo "=== Updating history file ==="
+
+ # Read the new entries
+ NEW_ENTRIES=$(cat "$NEW_ENTRIES_FILE")
+ ENTRY_COUNT=$(echo "$NEW_ENTRIES" | jq 'length')
+ echo "New entries to add: $ENTRY_COUNT"
+
+ # Read existing history or create new one
+ if [ -f "$HISTORY_FILE" ]; then
+ HISTORY=$(cat "$HISTORY_FILE")
+ else
+ HISTORY='{"lastUpdated":null,"totalRuns":0,"entries":[],"summary":{},"pullRequests":[]}'
+ fi
+
+ # Get run ID from first entry to check for duplicates
+ RUN_ID="${{ steps.results.outputs.run_id }}"
+
+ # Remove any existing entries from this run (by matching run-XXXX- prefix in id)
+ HISTORY=$(echo "$HISTORY" | jq --arg runId "run-${RUN_ID}-" '
+ .entries = [.entries[] | select(.id | startswith($runId) | not)]
+ ')
+
+ # Add new entries at the beginning
+ HISTORY=$(echo "$HISTORY" | jq --argjson newEntries "$NEW_ENTRIES" '
+ .entries = $newEntries + .entries
+ ')
+
+ # Update metadata
+ TOTAL_RUNS=$(echo "$HISTORY" | jq '.entries | length')
+ LAST_UPDATED=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
+
+ # Calculate summary stats
+ PASSED=$(echo "$HISTORY" | jq '[.entries[].testResults.passed] | add // 0')
+ FAILED=$(echo "$HISTORY" | jq '[.entries[].testResults.failed] | add // 0')
+ TOTAL=$((PASSED + FAILED))
+ if [ "$TOTAL" -gt 0 ]; then
+ PASS_RATE=$((PASSED * 100 / TOTAL))
+ else
+ PASS_RATE=0
+ fi
+
+ HISTORY=$(echo "$HISTORY" | jq --arg updated "$LAST_UPDATED" --argjson total "$TOTAL_RUNS" --argjson passRate "$PASS_RATE" '
+ .lastUpdated = $updated |
+ .totalRuns = $total |
+ .summary.passRate = $passRate
+ ')
+
+ # Keep only last 200 entries to prevent file from growing too large
+ HISTORY=$(echo "$HISTORY" | jq '.entries = .entries[:200]')
+
+ # Write updated history
+ echo "$HISTORY" | jq '.' > "$HISTORY_FILE"
+
+ echo "โ
History file updated"
+ echo "Total entries: $TOTAL_RUNS"
+ echo "Pass rate: $PASS_RATE%"
+
+ # Check if there are changes
+ git diff --quiet "$HISTORY_FILE" && echo "has_changes=false" >> $GITHUB_OUTPUT || echo "has_changes=true" >> $GITHUB_OUTPUT
+
+ - name: Commit and Push Metrics
+ if: steps.update.outputs.has_changes == 'true'
+ run: |
+ git config user.name "github-actions[bot]"
+ git config user.email "github-actions[bot]@users.noreply.github.com"
+
+ git add docs/history.json
+ git commit -m "๐ Update E2E metrics for run #${{ steps.results.outputs.run_id }} [skip ci]"
+ git push origin main
+
+ echo "โ
Metrics committed and pushed to Agent365-metrics"
+
+ - name: Generate Summary
+ run: |
+ echo "## ๐ E2E Test Metrics Collected" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "**Run ID:** ${{ steps.results.outputs.run_id }}" >> $GITHUB_STEP_SUMMARY
+ echo "**Branch:** ${{ steps.results.outputs.branch }}" >> $GITHUB_STEP_SUMMARY
+ echo "**Stage:** ${{ steps.results.outputs.stage }}" >> $GITHUB_STEP_SUMMARY
+ echo "**Result:** ${{ steps.results.outputs.conclusion }}" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "| Metric | Value |" >> $GITHUB_STEP_SUMMARY
+ echo "|--------|-------|" >> $GITHUB_STEP_SUMMARY
+ echo "| โ
Passed | ${{ steps.results.outputs.passed }} |" >> $GITHUB_STEP_SUMMARY
+ echo "| โ Failed | ${{ steps.results.outputs.failed }} |" >> $GITHUB_STEP_SUMMARY
+ echo "| ๐ Entries | ${{ steps.results.outputs.entry_count }} |" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "View the [E2E Testing Dashboard](https://agent365-metrics-dashboard.azurewebsites.net/api/e2e-dashboard) for detailed statistics." >> $GITHUB_STEP_SUMMARY
diff --git a/docs/metrics/history.json b/docs/metrics/history.json
new file mode 100644
index 00000000..4f554cff
--- /dev/null
+++ b/docs/metrics/history.json
@@ -0,0 +1,7 @@
+{
+ "lastUpdated": null,
+ "totalRuns": 0,
+ "entries": [],
+ "summary": {},
+ "pullRequests": []
+}
diff --git a/docs/metrics/index.html b/docs/metrics/index.html
new file mode 100644
index 00000000..284188e0
--- /dev/null
+++ b/docs/metrics/index.html
@@ -0,0 +1,1647 @@
+
+
+
+
+
+ Agent 365 SDK Integration Testing Dashboard
+
+
+
+
+
+
+
+
+
+
SDK Issues Caught Before Production
+
0
+
E2E test failures that caught SDK compatibility issues before they reached customers.
+ Each "issue" = a test failure indicating an Agent 365 SDK incompatibility with sample code
+
+
+
+
+
+
๐ฌ Pre-Release Testing
+
0
+
Issues caught testing pre-release Agent 365 SDK versions
+
+
Agent 365 SDK Versions
+
No data yet
+
+
+
+
+
๐ Pre-Checkin (PR)
+
0
+
Issues caught in PRs before sample code is merged
+
+
Agent 365 SDK Versions
+
No data yet
+
+
+
+
+
โ
Post-Checkin (Main)
+
0
+
SDK regressions detected after merge to main
+
+
Agent 365 SDK Versions
+
No data yet
+
+
+
+
+
๐ Release Validation
+
0
+
Final SDK compatibility check before release
+
+
Agent 365 SDK Versions
+
No data yet
+
+
+
+
+
+
+
+
๐ Bugs Caught by Testing Stage
+
+
+
+
+
+
๐ฆ SDK Test Results by Package
+
+
+
+
+
+
+
+
+
๐ Error Categories
+
+ Categorized errors to identify patterns and SDK issues
+
+
+
+
+ Issue Type
+ Failure Count
+ Affected Samples
+ Failure %
+ Impact
+ Linked Issues
+
+
+
+
+
+
+
+
+
+
+
๐ฆ Agent 365 SDK Version Validation
+
+ Tracking Agent 365 SDK packages used in E2E tests to catch compatibility issues early
+
+
+
+
+
+ ๐ข Node.js Packages
+ @microsoft/agents-*
+
+
+
+
+ SDK Package
+ Installed
+ Latest Available
+ Test Runs
+ Bugs Caught
+ Status
+
+
+
+
+
+
+
+
+
+
+
+ ๐ฃ .NET Packages
+ Microsoft.Agents.*
+
+
+
+
+ SDK Package
+ Installed
+ Latest Available
+ Test Runs
+ Bugs Caught
+ Status
+
+
+
+
+
+
+
+
+
+
+
+ ๐ก Python Packages
+ microsoft-agents-*
+
+
+
+
+ SDK Package
+ Installed
+ Latest Available
+ Test Runs
+ Bugs Caught
+ Status
+
+
+
+
+
+
+
+
+
+
+
+
๐ Recent SDK Compatibility Issues
+
+ Test failures that indicate Agent 365 SDK incompatibilities with sample code
+
+
+
+ No issues caught yet. When E2E tests detect SDK compatibility problems, they'll appear here.
+
+
+
+
+
+
+
๐ Related Pull Requests
+
+ PRs created to fix E2E test failures
+
+
+
+ No related PRs yet. When fixes are submitted, they'll appear here.
+
+
+
+
+
+
+
๐งช Sample Test Results
+
+
+
+ Sample
+ SDK Version
+ Total Runs
+ Bugs Caught
+ Success Rate
+
+
+
+
+
+
+
+
+
+ Agent 365 SDK Integration Testing Dashboard
+ Automated E2E testing ensures SDK compatibility across all sample implementations
+
+
+
+
+
+
+
diff --git a/scripts/e2e/Aggregate-Metrics.ps1 b/scripts/e2e/Aggregate-Metrics.ps1
new file mode 100644
index 00000000..7b70a33d
--- /dev/null
+++ b/scripts/e2e/Aggregate-Metrics.ps1
@@ -0,0 +1,197 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+
+<#
+.SYNOPSIS
+ Aggregates individual test metrics into a consolidated metrics history file.
+
+.DESCRIPTION
+ This script reads individual metric JSON files and appends them to a history file,
+ enabling historical trend analysis across multiple test runs.
+
+.PARAMETER MetricsDir
+ Directory containing individual metric JSON files
+
+.PARAMETER HistoryFile
+ Path to the consolidated history JSON file
+
+.PARAMETER MaxEntries
+ Maximum number of entries to keep in history (0 = unlimited)
+
+.EXAMPLE
+ ./Aggregate-Metrics.ps1 -MetricsDir "./metrics/raw" -HistoryFile "./docs/metrics/history.json"
+#>
+
+param(
+ [Parameter(Mandatory = $true)]
+ [string]$MetricsDir,
+
+ [Parameter(Mandatory = $true)]
+ [string]$HistoryFile,
+
+ [Parameter(Mandatory = $false)]
+ [int]$MaxEntries = 0
+)
+
+$ErrorActionPreference = "Stop"
+
+Write-Host "=== Aggregating Metrics ===" -ForegroundColor Cyan
+Write-Host "Source: $MetricsDir" -ForegroundColor Gray
+Write-Host "Target: $HistoryFile" -ForegroundColor Gray
+
+# Load existing history
+$history = @{
+ lastUpdated = (Get-Date).ToUniversalTime().ToString("yyyy-MM-ddTHH:mm:ssZ")
+ totalRuns = 0
+ entries = @()
+ summary = @{
+ byStage = @{
+ "pre-release" = @{ runs = 0; passed = 0; failed = 0; bugsCaught = 0 }
+ "pre-checkin" = @{ runs = 0; passed = 0; failed = 0; bugsCaught = 0 }
+ "post-checkin" = @{ runs = 0; passed = 0; failed = 0; bugsCaught = 0 }
+ "release" = @{ runs = 0; passed = 0; failed = 0; bugsCaught = 0 }
+ "scheduled" = @{ runs = 0; passed = 0; failed = 0; bugsCaught = 0 }
+ }
+ bySample = @{}
+ totalBugsCaught = 0
+ totalTestsRun = 0
+ totalPassed = 0
+ totalFailed = 0
+ }
+}
+
+if (Test-Path $HistoryFile) {
+ try {
+ $existingHistory = Get-Content $HistoryFile -Raw | ConvertFrom-Json -AsHashtable
+ # Validate parsed JSON has expected properties before accessing
+ if ($existingHistory -and $existingHistory.entries) {
+ $history.entries = $existingHistory.entries
+ }
+ if ($existingHistory -and $existingHistory.summary) {
+ $history.summary = $existingHistory.summary
+ }
+ Write-Host "Loaded existing history with $($history.entries.Count) entries" -ForegroundColor Green
+ }
+ catch {
+ Write-Host "Warning: Could not load existing history, starting fresh: $_" -ForegroundColor Yellow
+ }
+}
+
+# Get existing entry IDs to avoid duplicates
+$existingIds = @{}
+foreach ($entry in $history.entries) {
+ if ($entry.id) {
+ $existingIds[$entry.id] = $true
+ }
+}
+
+# Read new metrics files
+$newEntries = @()
+if (Test-Path $MetricsDir) {
+ $metricFiles = Get-ChildItem -Path $MetricsDir -Filter "*.json" -File
+
+ foreach ($file in $metricFiles) {
+ try {
+ $metrics = Get-Content $file.FullName -Raw | ConvertFrom-Json -AsHashtable
+
+ # Skip if already in history
+ if ($existingIds.ContainsKey($metrics.id)) {
+ Write-Host "Skipping duplicate: $($metrics.id)" -ForegroundColor Gray
+ continue
+ }
+
+ $newEntries += $metrics
+ Write-Host "Adding: $($metrics.sampleName) - $($metrics.stage) - $($metrics.testResults.status)" -ForegroundColor Green
+ }
+ catch {
+ Write-Host "Warning: Could not parse $($file.Name): $_" -ForegroundColor Yellow
+ }
+ }
+}
+
+Write-Host "Found $($newEntries.Count) new entries to add" -ForegroundColor Cyan
+
+# Add new entries
+$history.entries += $newEntries
+
+# Sort by timestamp (newest first)
+$history.entries = $history.entries | Sort-Object { $_.timestamp } -Descending
+
+# Apply max entries limit if specified
+if ($MaxEntries -gt 0 -and $history.entries.Count -gt $MaxEntries) {
+ $history.entries = $history.entries | Select-Object -First $MaxEntries
+ Write-Host "Trimmed to $MaxEntries entries" -ForegroundColor Yellow
+}
+
+# Recalculate summary statistics
+$history.summary = @{
+ byStage = @{
+ "pre-release" = @{ runs = 0; passed = 0; failed = 0; bugsCaught = 0 }
+ "pre-checkin" = @{ runs = 0; passed = 0; failed = 0; bugsCaught = 0 }
+ "post-checkin" = @{ runs = 0; passed = 0; failed = 0; bugsCaught = 0 }
+ "release" = @{ runs = 0; passed = 0; failed = 0; bugsCaught = 0 }
+ "scheduled" = @{ runs = 0; passed = 0; failed = 0; bugsCaught = 0 }
+ }
+ bySample = @{}
+ totalBugsCaught = 0
+ totalTestsRun = 0
+ totalPassed = 0
+ totalFailed = 0
+}
+
+foreach ($entry in $history.entries) {
+ $stage = $entry.stage
+ $sample = $entry.sampleName
+ $results = $entry.testResults
+ $bugs = $entry.bugsCaught
+
+ # Update stage stats
+ if ($history.summary.byStage.ContainsKey($stage)) {
+ $history.summary.byStage[$stage].runs++
+ $history.summary.byStage[$stage].passed += $results.passed
+ $history.summary.byStage[$stage].failed += $results.failed
+ $history.summary.byStage[$stage].bugsCaught += $bugs.count
+ }
+
+ # Update sample stats
+ if (-not $history.summary.bySample.ContainsKey($sample)) {
+ $history.summary.bySample[$sample] = @{ runs = 0; passed = 0; failed = 0; bugsCaught = 0 }
+ }
+ $history.summary.bySample[$sample].runs++
+ $history.summary.bySample[$sample].passed += $results.passed
+ $history.summary.bySample[$sample].failed += $results.failed
+ $history.summary.bySample[$sample].bugsCaught += $bugs.count
+
+ # Update totals
+ $history.summary.totalBugsCaught += $bugs.count
+ $history.summary.totalTestsRun += $results.total
+ $history.summary.totalPassed += $results.passed
+ $history.summary.totalFailed += $results.failed
+}
+
+$history.totalRuns = $history.entries.Count
+$history.lastUpdated = (Get-Date).ToUniversalTime().ToString("yyyy-MM-ddTHH:mm:ssZ")
+
+# Ensure output directory exists
+$outputDir = Split-Path $HistoryFile -Parent
+if ($outputDir -and !(Test-Path $outputDir)) {
+ New-Item -ItemType Directory -Path $outputDir -Force | Out-Null
+}
+
+# Write history file
+$historyJson = $history | ConvertTo-Json -Depth 10
+$historyJson | Out-File -FilePath $HistoryFile -Encoding UTF8
+
+Write-Host ""
+Write-Host "โ
History updated: $HistoryFile" -ForegroundColor Green
+Write-Host ""
+Write-Host "=== Summary ===" -ForegroundColor Cyan
+Write-Host "Total Runs: $($history.totalRuns)" -ForegroundColor Gray
+Write-Host "Total Bugs Caught: $($history.summary.totalBugsCaught)" -ForegroundColor $(if ($history.summary.totalBugsCaught -gt 0) { "Yellow" } else { "Green" })
+Write-Host "Tests: $($history.summary.totalPassed) passed, $($history.summary.totalFailed) failed" -ForegroundColor Gray
+Write-Host ""
+Write-Host "Bugs by Stage:" -ForegroundColor Cyan
+foreach ($stage in $history.summary.byStage.Keys) {
+ $stageStats = $history.summary.byStage[$stage]
+ Write-Host " $stage : $($stageStats.bugsCaught) bugs in $($stageStats.runs) runs" -ForegroundColor Gray
+}
diff --git a/scripts/e2e/Create-GitHubIssue.ps1 b/scripts/e2e/Create-GitHubIssue.ps1
new file mode 100644
index 00000000..72ed6167
--- /dev/null
+++ b/scripts/e2e/Create-GitHubIssue.ps1
@@ -0,0 +1,351 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+
+<#
+.SYNOPSIS
+ Automatically creates GitHub issues for E2E test failures.
+
+.DESCRIPTION
+ This script creates GitHub issues when E2E tests fail, categorizing errors
+ and linking them to the metrics dashboard for tracking.
+
+.PARAMETER MetricsFile
+ Path to the metrics JSON file from Emit-TestMetrics.ps1
+
+.PARAMETER Repository
+ GitHub repository in format "owner/repo"
+
+.PARAMETER Labels
+ Additional labels to add to the issue
+
+.PARAMETER DryRun
+ If set, only outputs what would be created without actually creating issues
+
+.EXAMPLE
+ ./Create-GitHubIssue.ps1 -MetricsFile "./metrics.json" -Repository "microsoft/Agent365-Samples"
+#>
+
+param(
+ [Parameter(Mandatory = $true)]
+ [string]$MetricsFile,
+
+ [Parameter(Mandatory = $false)]
+ [string]$Repository = $(if ($env:GITHUB_REPOSITORY) { $env:GITHUB_REPOSITORY } else { "microsoft/Agent365-Samples" }),
+
+ [Parameter(Mandatory = $false)]
+ [string[]]$Labels = @("e2e-failure", "automated"),
+
+ [Parameter(Mandatory = $false)]
+ [switch]$DryRun
+)
+
+$ErrorActionPreference = "Stop"
+
+# Error category patterns for classification
+$ErrorCategories = @{
+ "SDK:Authentication" = @(
+ "authentication",
+ "auth failed",
+ "unauthorized",
+ "401",
+ "credential",
+ "token",
+ "DefaultAzureCredential"
+ )
+ "SDK:Connection" = @(
+ "connection",
+ "timeout",
+ "network",
+ "socket",
+ "ECONNREFUSED",
+ "ETIMEDOUT",
+ "connection refused"
+ )
+ "SDK:Configuration" = @(
+ "configuration",
+ "config",
+ "missing.*key",
+ "environment variable",
+ "appsettings",
+ "not configured"
+ )
+ "SDK:BreakingChange" = @(
+ "breaking change",
+ "deprecated",
+ "removed",
+ "no longer",
+ "api changed",
+ "schema.*changed"
+ )
+ "SDK:TypeMismatch" = @(
+ "type.*error",
+ "cannot convert",
+ "invalid cast",
+ "type mismatch",
+ "expected.*got"
+ )
+ "SDK:MissingDependency" = @(
+ "module not found",
+ "package not found",
+ "import error",
+ "could not load",
+ "dependency"
+ )
+ "Test:Assertion" = @(
+ "assert",
+ "expected",
+ "should be",
+ "to equal",
+ "not equal"
+ )
+ "Test:Timeout" = @(
+ "test.*timeout",
+ "exceeded.*time",
+ "took too long"
+ )
+ "Infrastructure:Service" = @(
+ "service unavailable",
+ "503",
+ "502",
+ "bad gateway",
+ "server error"
+ )
+ "Other" = @()
+}
+
+function Get-ErrorCategory {
+ param([string]$ErrorMessage)
+
+ $lowerMessage = $ErrorMessage.ToLower()
+
+ foreach ($category in $ErrorCategories.Keys) {
+ if ($category -eq "Other") { continue }
+
+ foreach ($pattern in $ErrorCategories[$category]) {
+ if ($lowerMessage -match $pattern) {
+ return $category
+ }
+ }
+ }
+
+ return "Other"
+}
+
+function Get-IssuePriority {
+ param(
+ [string]$Stage,
+ [string]$Category
+ )
+
+ # Higher priority for issues caught later in the pipeline
+ $stagePriority = switch ($Stage) {
+ "release" { "P0" }
+ "post-checkin" { "P1" }
+ "pre-checkin" { "P2" }
+ "pre-release" { "P2" }
+ default { "P3" }
+ }
+
+ # SDK breaking changes are high priority
+ if ($Category -eq "SDK:BreakingChange") {
+ $stagePriority = "P1"
+ }
+
+ return $stagePriority
+}
+
+function New-GitHubIssue {
+ param(
+ [hashtable]$IssueData,
+ [string]$Repository,
+ [switch]$DryRun
+ )
+
+ $title = $IssueData.title
+ $body = $IssueData.body
+ $labels = $IssueData.labels -join ","
+
+ if ($DryRun) {
+ Write-Host ""
+ Write-Host "=== DRY RUN: Would create issue ===" -ForegroundColor Yellow
+ Write-Host "Title: $title" -ForegroundColor Cyan
+ Write-Host "Labels: $labels" -ForegroundColor Gray
+ Write-Host "Body:" -ForegroundColor Gray
+ Write-Host $body
+ Write-Host "===================================" -ForegroundColor Yellow
+ return @{ number = 0; html_url = "https://github.com/$Repository/issues/NEW" }
+ }
+
+ # Use GitHub CLI to create issue
+ $bodyFile = [System.IO.Path]::GetTempFileName()
+ $body | Out-File -FilePath $bodyFile -Encoding UTF8
+
+ try {
+ $result = gh issue create `
+ --repo $Repository `
+ --title $title `
+ --body-file $bodyFile `
+ --label ($IssueData.labels -join ",") 2>&1
+
+ # Check for gh CLI errors
+ if ($LASTEXITCODE -ne 0) {
+ Write-Host "Error: gh issue create failed with exit code $LASTEXITCODE" -ForegroundColor Red
+ Write-Host "Output: $result" -ForegroundColor Yellow
+ return $null
+ }
+
+ # Parse issue URL from result
+ if ($result -match "https://github.com/.+/issues/(\d+)") {
+ $issueNumber = $Matches[1]
+ return @{
+ number = [int]$issueNumber
+ html_url = $result.Trim()
+ }
+ }
+
+ Write-Host "Error: Failed to create issue. Could not parse URL from output: $result" -ForegroundColor Red
+ return $null
+ }
+ catch {
+ Write-Host "Error creating issue: $_" -ForegroundColor Red
+ return $null
+ }
+ finally {
+ try {
+ Remove-Item $bodyFile -Force -ErrorAction Stop
+ }
+ catch {
+ Write-Host "Warning: Could not clean up temp file $bodyFile : $_" -ForegroundColor Yellow
+ }
+ }
+}
+
+# Main logic
+Write-Host "=== GitHub Issue Creator for E2E Failures ===" -ForegroundColor Cyan
+Write-Host "Metrics File: $MetricsFile" -ForegroundColor Gray
+Write-Host "Repository: $Repository" -ForegroundColor Gray
+Write-Host ""
+
+# Load metrics
+if (!(Test-Path $MetricsFile)) {
+ Write-Host "Error: Metrics file not found: $MetricsFile" -ForegroundColor Red
+ exit 1
+}
+
+$metrics = Get-Content $MetricsFile | ConvertFrom-Json
+
+# Check if there are failures
+if ($metrics.testResults.failed -eq 0) {
+ Write-Host "โ
No failures detected. No issues to create." -ForegroundColor Green
+ exit 0
+}
+
+Write-Host "๐ Found $($metrics.testResults.failed) failure(s)" -ForegroundColor Yellow
+Write-Host ""
+
+# Process each failure
+$createdIssues = @()
+$categorizedErrors = @{}
+
+foreach ($bug in $metrics.bugsCaught.details) {
+ $category = Get-ErrorCategory -ErrorMessage $bug.errorMessage
+ $priority = Get-IssuePriority -Stage $metrics.stage -Category $category
+
+ # Track categorized errors
+ if (!$categorizedErrors[$category]) {
+ $categorizedErrors[$category] = @{
+ count = 0
+ samples = @()
+ tests = @()
+ }
+ }
+ $categorizedErrors[$category].count++
+ if ($categorizedErrors[$category].samples -notcontains $metrics.sampleName) {
+ $categorizedErrors[$category].samples += $metrics.sampleName
+ }
+ $categorizedErrors[$category].tests += $bug.testName
+
+ # Build SDK version info
+ $sdkInfo = if ($metrics.sdkVersions) {
+ ($metrics.sdkVersions.PSObject.Properties | ForEach-Object { "- $($_.Name): ``$($_.Value)``" }) -join "`n"
+ } else { "Not available" }
+
+ # Create issue body
+ $body = @"
+## E2E Test Failure Report
+
+**Category:** $category
+**Priority:** $priority
+**Stage:** $($metrics.stage)
+**Sample:** $($metrics.sampleName)
+**Test:** $($bug.testName)
+
+### Error Message
+``````
+$($bug.errorMessage)
+``````
+
+### SDK Versions
+$sdkInfo
+
+### Context
+- **Run ID:** $($metrics.runId)
+- **Commit:** $($metrics.commitSha)
+- **Branch:** $($metrics.branch)
+- **Timestamp:** $($metrics.timestamp)
+
+### Reproduction
+1. Checkout commit ``$($metrics.commitSha)``
+2. Navigate to the ``$($metrics.sampleName)`` sample
+3. Run the E2E tests
+
+---
+*This issue was automatically created by the E2E test pipeline.*
+*Dashboard: [View Metrics]($(if ($env:METRICS_DASHBOARD_URL) { $env:METRICS_DASHBOARD_URL } else { 'https://microsoft.github.io/Agent365-Samples/metrics/' }))*
+"@
+
+ $issueData = @{
+ title = "[$priority][$category] $($bug.testName) failed in $($metrics.sampleName)"
+ body = $body
+ labels = @($Labels) + @($category.Replace(":", "-").ToLower(), $priority.ToLower(), $metrics.stage)
+ }
+
+ Write-Host "Creating issue for: $($bug.testName)" -ForegroundColor Cyan
+ Write-Host " Category: $category" -ForegroundColor Gray
+ Write-Host " Priority: $priority" -ForegroundColor Gray
+
+ $issue = New-GitHubIssue -IssueData $issueData -Repository $Repository -DryRun:$DryRun
+
+ if ($issue) {
+ $createdIssues += @{
+ issueNumber = $issue.number
+ issueUrl = $issue.html_url
+ testName = $bug.testName
+ category = $category
+ priority = $priority
+ }
+ Write-Host " Created: $($issue.html_url)" -ForegroundColor Green
+ }
+}
+
+# Output summary
+Write-Host ""
+Write-Host "=== Summary ===" -ForegroundColor Cyan
+Write-Host "Issues created: $($createdIssues.Count)" -ForegroundColor Gray
+
+Write-Host ""
+Write-Host "Error Categories:" -ForegroundColor Cyan
+foreach ($cat in $categorizedErrors.Keys | Sort-Object) {
+ $data = $categorizedErrors[$cat]
+ Write-Host " $cat : $($data.count) failure(s) across $($data.samples.Count) sample(s)" -ForegroundColor Gray
+}
+
+# Output JSON for workflow consumption
+$output = @{
+ timestamp = (Get-Date).ToUniversalTime().ToString("yyyy-MM-ddTHH:mm:ssZ")
+ metricsId = $metrics.id
+ issuesCreated = $createdIssues
+ categorizedErrors = $categorizedErrors
+}
+
+$output | ConvertTo-Json -Depth 10
diff --git a/scripts/e2e/Emit-TestMetrics.ps1 b/scripts/e2e/Emit-TestMetrics.ps1
new file mode 100644
index 00000000..71ff389b
--- /dev/null
+++ b/scripts/e2e/Emit-TestMetrics.ps1
@@ -0,0 +1,257 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+
+<#
+.SYNOPSIS
+ Emits structured test metrics for tracking E2E test results and SDK versions.
+
+.DESCRIPTION
+ This script collects test results, SDK versions, and metadata to create a JSON
+ metrics file that can be used for dashboards and historical analysis.
+ It also validates that samples are using the latest SDK versions (including pre-release).
+
+.PARAMETER SampleName
+ Name of the sample being tested (e.g., "python-openai", "nodejs-langchain")
+
+.PARAMETER SamplePath
+ Path to the sample directory (for SDK version validation)
+
+.PARAMETER SampleType
+ Type of sample: "dotnet", "python", "nodejs" (for SDK version validation)
+
+.PARAMETER TestResultsPath
+ Path to the test results TRX file
+
+.PARAMETER SdkVersions
+ Hashtable of SDK versions (e.g., @{ "microsoft-agents-a365" = "0.1.5" })
+
+.PARAMETER Stage
+ The testing stage: "pre-release", "pre-checkin", "post-checkin", "release", "scheduled"
+
+.PARAMETER OutputPath
+ Path where the metrics JSON file will be written
+
+.PARAMETER SkipSdkValidation
+ Skip SDK version validation against latest available
+
+.EXAMPLE
+ ./Emit-TestMetrics.ps1 -SampleName "python-openai" -SamplePath "./python/openai/sample-agent" -SampleType "python" -Stage "pre-checkin"
+#>
+
+param(
+ [Parameter(Mandatory = $true)]
+ [string]$SampleName,
+
+ [Parameter(Mandatory = $false)]
+ [string]$SamplePath,
+
+ [Parameter(Mandatory = $false)]
+ [ValidateSet("dotnet", "python", "nodejs")]
+ [string]$SampleType,
+
+ [Parameter(Mandatory = $false)]
+ [string]$TestResultsPath,
+
+ [Parameter(Mandatory = $false)]
+ [hashtable]$SdkVersions = @{},
+
+ [Parameter(Mandatory = $true)]
+ [ValidateSet("pre-release", "pre-checkin", "post-checkin", "release", "scheduled")]
+ [string]$Stage,
+
+ [Parameter(Mandatory = $true)]
+ [string]$OutputPath,
+
+ [Parameter(Mandatory = $false)]
+ [int]$PassedTests = 0,
+
+ [Parameter(Mandatory = $false)]
+ [int]$FailedTests = 0,
+
+ [Parameter(Mandatory = $false)]
+ [int]$SkippedTests = 0,
+
+ [Parameter(Mandatory = $false)]
+ [string]$RunId = "",
+
+ [Parameter(Mandatory = $false)]
+ [string]$CommitSha = "",
+
+ [Parameter(Mandatory = $false)]
+ [string]$Branch = "",
+
+ [Parameter(Mandatory = $false)]
+ [switch]$SkipSdkValidation
+)
+
+$ErrorActionPreference = "Stop"
+
+Write-Host "=== Emitting Test Metrics ===" -ForegroundColor Cyan
+Write-Host "Sample: $SampleName" -ForegroundColor Gray
+Write-Host "Stage: $Stage" -ForegroundColor Gray
+
+# Parse TRX file if provided
+if ($TestResultsPath -and (Test-Path $TestResultsPath)) {
+ Write-Host "Parsing TRX file: $TestResultsPath" -ForegroundColor Gray
+
+ try {
+ [xml]$trx = Get-Content $TestResultsPath
+ $counters = $trx.TestRun.ResultSummary.Counters
+
+ $PassedTests = [int]$counters.passed
+ $FailedTests = [int]$counters.failed
+ $SkippedTests = [int]$counters.notExecuted
+
+ Write-Host "Parsed: Passed=$PassedTests, Failed=$FailedTests, Skipped=$SkippedTests" -ForegroundColor Green
+ }
+ catch {
+ Write-Host "Warning: Could not parse TRX file: $_" -ForegroundColor Yellow
+ }
+}
+
+# Get environment info
+$timestamp = (Get-Date).ToUniversalTime().ToString("yyyy-MM-ddTHH:mm:ssZ")
+$runId = if ($RunId) { $RunId } elseif ($env:GITHUB_RUN_ID) { $env:GITHUB_RUN_ID } else { "local-$(Get-Date -Format 'yyyyMMddHHmmss')" }
+$commitSha = if ($CommitSha) { $CommitSha } elseif ($env:GITHUB_SHA) { $env:GITHUB_SHA } else { (git rev-parse HEAD 2>$null) -or "unknown" }
+$branch = if ($Branch) { $Branch } elseif ($env:GITHUB_REF_NAME) { $env:GITHUB_REF_NAME } else { (git branch --show-current 2>$null) -or "unknown" }
+$actor = if ($env:GITHUB_ACTOR) { $env:GITHUB_ACTOR } else { $env:USERNAME }
+$workflow = if ($env:GITHUB_WORKFLOW) { $env:GITHUB_WORKFLOW } else { "local" }
+
+# Calculate test status
+$totalTests = $PassedTests + $FailedTests + $SkippedTests
+$status = if ($FailedTests -gt 0) { "failed" } elseif ($totalTests -eq 0) { "no-tests" } else { "passed" }
+
+# Run SDK version validation if sample path and type are provided
+$sdkValidation = $null
+if (-not $SkipSdkValidation -and $SamplePath -and $SampleType) {
+ Write-Host ""
+ Write-Host "๐ Validating SDK versions..." -ForegroundColor Cyan
+
+ $validateScript = Join-Path $PSScriptRoot "Validate-SdkVersions.ps1"
+ if (Test-Path $validateScript) {
+ try {
+ $validationJson = & $validateScript -SamplePath $SamplePath -SampleType $SampleType -IncludePreRelease $true -OutputJson 2>&1 | Select-Object -Last 1
+ if ($validationJson) {
+ $sdkValidation = $validationJson | ConvertFrom-Json
+
+ # Extract installed versions if not already provided
+ if ($SdkVersions.Count -eq 0 -and $sdkValidation.packages) {
+ foreach ($pkg in $sdkValidation.packages) {
+ $SdkVersions[$pkg.package] = $pkg.installed
+ }
+ }
+
+ Write-Host "SDK Validation: $($sdkValidation.validation.upToDate)/$($sdkValidation.validation.packagesChecked) packages up to date" -ForegroundColor $(if ($sdkValidation.validation.allUpToDate) { "Green" } else { "Yellow" })
+ }
+ }
+ catch {
+ Write-Host "Warning: SDK validation failed: $_" -ForegroundColor Yellow
+ }
+ }
+}
+
+# Build metrics object
+$metrics = @{
+ # Identifiers
+ id = "$runId-$SampleName"
+ runId = $runId
+ sampleName = $SampleName
+
+ # Timing
+ timestamp = $timestamp
+
+ # Git info
+ commitSha = $commitSha
+ branch = $branch
+ actor = $actor
+
+ # Workflow info
+ workflow = $workflow
+ stage = $Stage
+
+ # Test results
+ testResults = @{
+ status = $status
+ passed = $PassedTests
+ failed = $FailedTests
+ skipped = $SkippedTests
+ total = $totalTests
+ }
+
+ # SDK versions
+ sdkVersions = $SdkVersions
+
+ # SDK version validation
+ sdkValidation = if ($sdkValidation) {
+ @{
+ allUpToDate = $sdkValidation.validation.allUpToDate
+ packagesChecked = $sdkValidation.validation.packagesChecked
+ upToDate = $sdkValidation.validation.upToDate
+ outdated = $sdkValidation.validation.outdated
+ usingPreRelease = $sdkValidation.validation.usingPreRelease
+ packages = $sdkValidation.packages | ForEach-Object {
+ @{
+ package = $_.package
+ installed = $_.installed
+ latest = $_.latest
+ isUpToDate = $_.isUpToDate
+ isPreRelease = $_.isPreRelease
+ }
+ }
+ }
+ } else { $null }
+
+ # Bugs caught (will be populated if tests failed)
+ bugsCaught = @{
+ count = $FailedTests
+ stage = $Stage
+ details = @()
+ }
+}
+
+# If we have a TRX file, extract failed test details
+if ($TestResultsPath -and (Test-Path $TestResultsPath) -and $FailedTests -gt 0) {
+ try {
+ [xml]$trx = Get-Content $TestResultsPath
+ $failedResults = $trx.TestRun.Results.UnitTestResult | Where-Object { $_.outcome -eq "Failed" }
+
+ foreach ($result in $failedResults) {
+ # Safely extract error message with null check and redaction of potential secrets
+ $errorMsg = if ($result.Output.ErrorInfo.Message) {
+ $msg = ($result.Output.ErrorInfo.Message -replace "`r`n", " " -replace "`n", " ")
+ # Truncate to 500 chars safely
+ if ($msg.Length -gt 500) { $msg.Substring(0, 500) } else { $msg }
+ } else { "Test failed - see logs for details" }
+
+ $metrics.bugsCaught.details += @{
+ testName = $result.testName
+ errorMessage = $errorMsg
+ }
+ }
+ }
+ catch {
+ Write-Host "Warning: Could not extract failed test details: $_" -ForegroundColor Yellow
+ }
+}
+
+# Ensure output directory exists
+$outputDir = Split-Path $OutputPath -Parent
+if ($outputDir -and !(Test-Path $outputDir)) {
+ New-Item -ItemType Directory -Path $outputDir -Force | Out-Null
+}
+
+# Write metrics file
+$metricsJson = $metrics | ConvertTo-Json -Depth 10
+$metricsJson | Out-File -FilePath $OutputPath -Encoding UTF8
+
+Write-Host ""
+Write-Host "โ
Metrics written to: $OutputPath" -ForegroundColor Green
+Write-Host ""
+Write-Host "=== Metrics Summary ===" -ForegroundColor Cyan
+Write-Host "Status: $status" -ForegroundColor $(if ($status -eq "passed") { "Green" } else { "Red" })
+Write-Host "Tests: $PassedTests passed, $FailedTests failed, $SkippedTests skipped" -ForegroundColor Gray
+Write-Host "Stage: $Stage" -ForegroundColor Gray
+Write-Host "SDK Versions: $($SdkVersions.Count) tracked" -ForegroundColor Gray
+
+# Output the metrics for workflow consumption
+Write-Output $metricsJson
diff --git a/scripts/e2e/Validate-SdkVersions.ps1 b/scripts/e2e/Validate-SdkVersions.ps1
new file mode 100644
index 00000000..3629e248
--- /dev/null
+++ b/scripts/e2e/Validate-SdkVersions.ps1
@@ -0,0 +1,376 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+
+<#
+.SYNOPSIS
+ Validates that samples are using the latest SDK versions, including pre-release.
+
+.DESCRIPTION
+ This script checks the SDK versions used in samples against the latest available
+ versions from package registries (NuGet, PyPI, npm). It verifies that E2E tests
+ are testing against the most recent SDK versions to catch issues early.
+
+.PARAMETER SamplePath
+ Path to the sample directory
+
+.PARAMETER SampleType
+ Type of sample: "dotnet", "python", "nodejs"
+
+.PARAMETER IncludePreRelease
+ Whether to include pre-release versions in the check (default: true)
+
+.EXAMPLE
+ ./Validate-SdkVersions.ps1 -SamplePath "./python/openai/sample-agent" -SampleType "python"
+#>
+
+param(
+ [Parameter(Mandatory = $true)]
+ [string]$SamplePath,
+
+ [Parameter(Mandatory = $true)]
+ [ValidateSet("dotnet", "python", "nodejs")]
+ [string]$SampleType,
+
+ [Parameter(Mandatory = $false)]
+ [bool]$IncludePreRelease = $true,
+
+ [Parameter(Mandatory = $false)]
+ [switch]$OutputJson
+)
+
+$ErrorActionPreference = "Stop"
+
+# SDK packages to track for each platform
+$SdkPackages = @{
+ dotnet = @(
+ "Microsoft.Agents.Hosting.AspNetCore",
+ "Microsoft.Agents.Core",
+ "Microsoft.Agents.CopilotStudio.Client",
+ "Microsoft.SemanticKernel"
+ )
+ python = @(
+ "microsoft-agents-core",
+ "microsoft-agents-hosting-aiohttp",
+ "microsoft-agents-a365-tooling-extensions-openai",
+ "openai",
+ "google-adk"
+ )
+ nodejs = @(
+ "@anthropic-ai/sdk",
+ "langchain",
+ "@langchain/core",
+ "@langchain/openai",
+ "openai",
+ "ai"
+ )
+}
+
+function Get-NuGetLatestVersion {
+ param(
+ [string]$PackageName,
+ [bool]$IncludePreRelease
+ )
+
+ try {
+ $url = "https://api.nuget.org/v3-flatcontainer/$($PackageName.ToLower())/index.json"
+ $response = Invoke-RestMethod -Uri $url -ErrorAction SilentlyContinue
+
+ if ($response.versions) {
+ $versions = $response.versions
+
+ if (-not $IncludePreRelease) {
+ # Filter out pre-release versions (contain -)
+ $versions = $versions | Where-Object { $_ -notmatch '-' }
+ }
+
+ return $versions[-1] # Last version is the latest
+ }
+ }
+ catch {
+ Write-Host " Warning: Could not fetch NuGet version for $PackageName" -ForegroundColor Yellow
+ }
+
+ return $null
+}
+
+function Get-PyPILatestVersion {
+ param(
+ [string]$PackageName,
+ [bool]$IncludePreRelease
+ )
+
+ try {
+ $url = "https://pypi.org/pypi/$PackageName/json"
+ $response = Invoke-RestMethod -Uri $url -ErrorAction SilentlyContinue
+
+ if ($response.info.version) {
+ $latestStable = $response.info.version
+
+ if ($IncludePreRelease -and $response.releases) {
+ # Get all versions and find the latest (including pre-release)
+ $allVersions = $response.releases.PSObject.Properties.Name |
+ Where-Object { $response.releases.$_.Count -gt 0 } |
+ Sort-Object { [Version]($_ -replace '[^0-9.]', '' -replace '\.+', '.').TrimEnd('.') } -ErrorAction SilentlyContinue
+
+ # Get the latest pre-release if available
+ $preReleases = $allVersions | Where-Object { $_ -match '(a|b|rc|dev|pre|alpha|beta)' }
+ if ($preReleases) {
+ $latestPreRelease = $preReleases[-1]
+ # Compare versions to see if pre-release is newer
+ # For simplicity, return pre-release if it exists with higher base version
+ return $latestPreRelease
+ }
+ }
+
+ return $latestStable
+ }
+ }
+ catch {
+ Write-Host " Warning: Could not fetch PyPI version for $PackageName" -ForegroundColor Yellow
+ }
+
+ return $null
+}
+
+function Get-NpmLatestVersion {
+ param(
+ [string]$PackageName,
+ [bool]$IncludePreRelease
+ )
+
+ try {
+ $url = "https://registry.npmjs.org/$PackageName"
+ $response = Invoke-RestMethod -Uri $url -ErrorAction SilentlyContinue
+
+ if ($response.'dist-tags') {
+ if ($IncludePreRelease) {
+ # Check for next, beta, alpha, rc tags
+ $preTags = @('next', 'beta', 'alpha', 'rc', 'canary', 'preview')
+ foreach ($tag in $preTags) {
+ if ($response.'dist-tags'.$tag) {
+ return @{
+ version = $response.'dist-tags'.$tag
+ tag = $tag
+ }
+ }
+ }
+ }
+
+ return @{
+ version = $response.'dist-tags'.latest
+ tag = 'latest'
+ }
+ }
+ }
+ catch {
+ Write-Host " Warning: Could not fetch npm version for $PackageName" -ForegroundColor Yellow
+ }
+
+ return $null
+}
+
+function Get-InstalledVersions {
+ param(
+ [string]$SamplePath,
+ [string]$SampleType
+ )
+
+ $versions = @{}
+
+ switch ($SampleType) {
+ "dotnet" {
+ # Parse .csproj files for PackageReference
+ $csprojFiles = Get-ChildItem -Path $SamplePath -Filter "*.csproj" -Recurse
+ foreach ($csproj in $csprojFiles) {
+ [xml]$content = Get-Content $csproj.FullName
+ $packageRefs = $content.Project.ItemGroup.PackageReference
+ foreach ($pkg in $packageRefs) {
+ if ($pkg.Include -and $pkg.Version) {
+ $versions[$pkg.Include] = $pkg.Version
+ }
+ }
+ }
+ }
+ "python" {
+ # Parse requirements.txt
+ $reqFile = Join-Path $SamplePath "requirements.txt"
+ if (Test-Path $reqFile) {
+ $lines = Get-Content $reqFile
+ foreach ($line in $lines) {
+ if ($line -match '^([a-zA-Z0-9_-]+)\s*([=<>!~]+)?\s*([\d.a-zA-Z-]+)?') {
+ $pkgName = $Matches[1]
+ $version = if ($Matches.Count -ge 4 -and $Matches[3]) { $Matches[3] } else { "not-pinned" }
+ $versions[$pkgName] = $version
+ }
+ }
+ }
+
+ # Also check pyproject.toml
+ $pyprojectFile = Join-Path $SamplePath "pyproject.toml"
+ if (Test-Path $pyprojectFile) {
+ $content = Get-Content $pyprojectFile -Raw
+ if ($content -match 'dependencies\s*=\s*\[([\s\S]*?)\]') {
+ $deps = $Matches[1]
+ $depMatches = [regex]::Matches($deps, '"([a-zA-Z0-9_-]+)\s*([=<>!~]+)?\s*([\d.a-zA-Z-]+)?"')
+ foreach ($match in $depMatches) {
+ $pkgName = $match.Groups[1].Value
+ $version = if ($match.Groups[3].Value) { $match.Groups[3].Value } else { "not-pinned" }
+ $versions[$pkgName] = $version
+ }
+ }
+ }
+ }
+ "nodejs" {
+ # Parse package.json
+ $pkgJsonFile = Join-Path $SamplePath "package.json"
+ if (Test-Path $pkgJsonFile) {
+ $pkgJson = Get-Content $pkgJsonFile | ConvertFrom-Json
+
+ $allDeps = @{}
+ if ($pkgJson.dependencies) {
+ $pkgJson.dependencies.PSObject.Properties | ForEach-Object {
+ $allDeps[$_.Name] = $_.Value -replace '[\^~>=<]', ''
+ }
+ }
+ if ($pkgJson.devDependencies) {
+ $pkgJson.devDependencies.PSObject.Properties | ForEach-Object {
+ $allDeps[$_.Name] = $_.Value -replace '[\^~>=<]', ''
+ }
+ }
+
+ $versions = $allDeps
+ }
+ }
+ }
+
+ return $versions
+}
+
+# Main validation logic
+Write-Host "=== SDK Version Validation ===" -ForegroundColor Cyan
+Write-Host "Sample Path: $SamplePath" -ForegroundColor Gray
+Write-Host "Sample Type: $SampleType" -ForegroundColor Gray
+Write-Host "Include Pre-Release: $IncludePreRelease" -ForegroundColor Gray
+Write-Host ""
+
+# Get installed versions
+Write-Host "๐ฆ Reading installed versions..." -ForegroundColor Cyan
+$installedVersions = Get-InstalledVersions -SamplePath $SamplePath -SampleType $SampleType
+
+if ($installedVersions.Count -eq 0) {
+ Write-Host "โ ๏ธ No package versions found in sample" -ForegroundColor Yellow
+ exit 0
+}
+
+Write-Host "Found $($installedVersions.Count) packages" -ForegroundColor Gray
+Write-Host ""
+
+# Get tracked SDK packages for this type
+$trackedPackages = $SdkPackages[$SampleType]
+
+# Check each tracked package
+$validationResults = @()
+$hasOutdated = $false
+
+Write-Host "๐ Checking against latest versions..." -ForegroundColor Cyan
+Write-Host ""
+
+foreach ($pkgName in $trackedPackages) {
+ $installed = $installedVersions[$pkgName]
+
+ if (-not $installed) {
+ continue # Package not used in this sample
+ }
+
+ $latest = $null
+ $latestTag = "latest"
+
+ switch ($SampleType) {
+ "dotnet" {
+ $latest = Get-NuGetLatestVersion -PackageName $pkgName -IncludePreRelease $IncludePreRelease
+ }
+ "python" {
+ $latest = Get-PyPILatestVersion -PackageName $pkgName -IncludePreRelease $IncludePreRelease
+ }
+ "nodejs" {
+ $result = Get-NpmLatestVersion -PackageName $pkgName -IncludePreRelease $IncludePreRelease
+ if ($result) {
+ $latest = $result.version
+ $latestTag = $result.tag
+ }
+ }
+ }
+
+ if ($latest) {
+ # Note: This uses simple string equality for version comparison.
+ # For semantic versioning with pre-release identifiers, a more robust
+ # comparison would be needed, but equality check suffices for our use case
+ # of detecting when installed version matches latest available.
+ $isUpToDate = ($installed -eq $latest) -or ($installed -eq "not-pinned")
+ $isPreRelease = $latest -match '(alpha|beta|preview|pre|rc|dev|a\d|b\d|-)'
+
+ $result = @{
+ package = $pkgName
+ installed = $installed
+ latest = $latest
+ latestTag = $latestTag
+ isUpToDate = $isUpToDate
+ isPreRelease = $isPreRelease
+ }
+
+ $validationResults += $result
+
+ $statusIcon = if ($isUpToDate) { "โ
" } else { "โ ๏ธ"; $hasOutdated = $true }
+ $preReleaseLabel = if ($isPreRelease) { " (pre-release)" } else { "" }
+
+ Write-Host "$statusIcon $pkgName" -ForegroundColor $(if ($isUpToDate) { "Green" } else { "Yellow" })
+ Write-Host " Installed: $installed" -ForegroundColor Gray
+ Write-Host " Latest: $latest$preReleaseLabel" -ForegroundColor $(if ($isPreRelease) { "Magenta" } else { "Gray" })
+ }
+}
+
+Write-Host ""
+
+# Summary
+$upToDateCount = ($validationResults | Where-Object { $_.isUpToDate }).Count
+$outdatedCount = ($validationResults | Where-Object { -not $_.isUpToDate }).Count
+$preReleaseCount = ($validationResults | Where-Object { $_.isPreRelease }).Count
+
+Write-Host "=== Validation Summary ===" -ForegroundColor Cyan
+Write-Host "Packages checked: $($validationResults.Count)" -ForegroundColor Gray
+Write-Host "Up to date: $upToDateCount" -ForegroundColor Green
+Write-Host "Outdated: $outdatedCount" -ForegroundColor $(if ($outdatedCount -gt 0) { "Yellow" } else { "Gray" })
+Write-Host "Using pre-release: $preReleaseCount" -ForegroundColor $(if ($preReleaseCount -gt 0) { "Magenta" } else { "Gray" })
+
+if ($hasOutdated) {
+ Write-Host ""
+ Write-Host "โ ๏ธ Some SDK packages are not using the latest version!" -ForegroundColor Yellow
+ Write-Host "Consider updating to test against the newest SDK releases." -ForegroundColor Yellow
+}
+
+# Output JSON if requested
+if ($OutputJson) {
+ $output = @{
+ samplePath = $SamplePath
+ sampleType = $SampleType
+ timestamp = (Get-Date).ToUniversalTime().ToString("yyyy-MM-ddTHH:mm:ssZ")
+ includePreRelease = $IncludePreRelease
+ validation = @{
+ allUpToDate = (-not $hasOutdated)
+ packagesChecked = $validationResults.Count
+ upToDate = $upToDateCount
+ outdated = $outdatedCount
+ usingPreRelease = $preReleaseCount
+ }
+ packages = $validationResults
+ }
+
+ $output | ConvertTo-Json -Depth 10
+}
+
+# Return exit code based on validation
+if ($hasOutdated) {
+ exit 1
+} else {
+ exit 0
+}