From d4cc69f55bb4d96621a53c4bf789cdaa0ce631e2 Mon Sep 17 00:00:00 2001 From: Kasturi Narra Date: Thu, 16 Apr 2026 13:48:22 +0530 Subject: [PATCH] Add lvms ci manager & shared scripts across plugins --- plugins/lvms-ci/.claude-plugin/plugin.json | 10 + plugins/lvms-ci/README.md | 71 ++++ .../lvms-ci/skills/analyze-release/SKILL.md | 78 ++++ plugins/lvms-ci/skills/doctor/SKILL.md | 149 +++++++ .../skills/generate-html-report/SKILL.md | 44 ++ plugins/microshift-ci/skills/doctor/SKILL.md | 8 +- .../scripts/aggregate.py | 0 .../scripts/create-report.py | 401 ++++++++++-------- .../scripts/doctor.sh | 160 ++++--- .../scripts/download-jobs.sh | 0 .../scripts/prow-jobs-for-release.sh | 27 +- 11 files changed, 702 insertions(+), 246 deletions(-) create mode 100644 plugins/lvms-ci/.claude-plugin/plugin.json create mode 100644 plugins/lvms-ci/README.md create mode 100644 plugins/lvms-ci/skills/analyze-release/SKILL.md create mode 100644 plugins/lvms-ci/skills/doctor/SKILL.md create mode 100644 plugins/lvms-ci/skills/generate-html-report/SKILL.md rename plugins/{microshift-ci => shared}/scripts/aggregate.py (100%) rename plugins/{microshift-ci => shared}/scripts/create-report.py (76%) rename plugins/{microshift-ci => shared}/scripts/doctor.sh (55%) rename plugins/{microshift-ci => shared}/scripts/download-jobs.sh (100%) rename plugins/{microshift-ci => shared}/scripts/prow-jobs-for-release.sh (57%) diff --git a/plugins/lvms-ci/.claude-plugin/plugin.json b/plugins/lvms-ci/.claude-plugin/plugin.json new file mode 100644 index 00000000..c34e256e --- /dev/null +++ b/plugins/lvms-ci/.claude-plugin/plugin.json @@ -0,0 +1,10 @@ +{ + "name": "lvms-ci", + "description": "LVMS CI Release Manager - Analyze LVMS periodic job failures and generate HTML reports", + "version": "1.0.0", + "author": { + "name": "kasturinarra" + }, + "homepage": "https://github.com/openshift-eng/edge-tooling", + "license": "Apache-2.0" +} diff --git a/plugins/lvms-ci/README.md b/plugins/lvms-ci/README.md new file mode 100644 index 00000000..38ff0df7 --- /dev/null +++ b/plugins/lvms-ci/README.md @@ -0,0 +1,71 @@ +# lvms-ci + +Analyze LVMS CI periodic job failures and generate HTML release manager reports. + +## Installation + +```text +/plugin marketplace add openshift-eng/edge-tooling +/plugin install lvms-ci +``` + +## Skills + +| Skill | Description | +|---|---| +| `/lvms-ci:doctor` | Analyze CI for multiple releases and produce an HTML summary | +| `/lvms-ci:analyze-release` | Analyze all failed LVMS periodic jobs for a single release | +| `/lvms-ci:generate-html-report` | Re-generate HTML report from existing analysis files | + +## Usage + +### Full pipeline +```text +/lvms-ci:doctor 4.20,4.21,4.22 +``` + +### Single release analysis +```text +/lvms-ci:analyze-release 4.22 +``` + +### Re-generate report +```text +/lvms-ci:generate-html-report 4.20,4.21,4.22 +``` + +## Architecture + +The pipeline follows the same pattern as `microshift-ci` and reuses shared scripts where possible: + +1. **Prepare** (`doctor.sh prepare`) -- collects failed jobs and downloads artifacts +2. **Analyze** -- LLM agents analyze each job in parallel via `/ci:prow-job-analyze-test-failure` +3. **Finalize** (`doctor.sh finalize`) -- aggregates results and generates HTML + +### Scripts + +All scripts are shared across plugins in `plugins/shared/scripts/`: + +| Script | Purpose | +|---|---| +| `doctor.sh` | Orchestrator with prepare/finalize phases (`--product lvms --filter lvm`) | +| `prow-jobs-for-release.sh` | Fetch failed periodic jobs from Prow API (`--filter lvm`) | +| `download-jobs.sh` | Download job artifacts in parallel | +| `aggregate.py` | Aggregate per-job reports into release summary JSON | +| `create-report.py` | Generate HTML report (`--product lvms` enables index image section) | + +### LVMS-Specific Features + +- **Index image extraction**: Per-job analysis extracts the LVMS catalog index image (digest, build date, source commit) and displays it in the HTML report +- **Prow API**: Uses the standard Prow `data.js` API to discover LVMS periodic jobs + +## Requirements + +- `gcloud` CLI (for downloading artifacts from public GCS buckets) +- `skopeo` (for index image inspection) +- Python 3 +- **Category:** ci-cd + +## Author + +kasturinarra diff --git a/plugins/lvms-ci/skills/analyze-release/SKILL.md b/plugins/lvms-ci/skills/analyze-release/SKILL.md new file mode 100644 index 00000000..6c7cdfdf --- /dev/null +++ b/plugins/lvms-ci/skills/analyze-release/SKILL.md @@ -0,0 +1,78 @@ +--- +name: lvms-ci:analyze-release +argument-hint: +description: Analyze all failed LVMS periodic jobs for a single release +user-invocable: true +allowed-tools: Skill, Bash, Read, Write, Glob, Grep, Agent +--- + +# lvms-ci:analyze-release + +## Synopsis +```bash +/lvms-ci:analyze-release +``` + +## Description +Fetches failed LVMS periodic jobs for a release, downloads artifacts, analyzes each job via `/ci:prow-job-analyze-test-failure`, and produces an aggregated summary. This is a standalone version of what `/lvms-ci:doctor` does for a single release. + +## Arguments +- `` (required): e.g., 4.22, 4.21 + +## Scripts Directory + +Shared scripts are in: +```bash +SHARED_SCRIPTS=plugins/shared/scripts +``` + +## Work Directory +```bash +WORKDIR=/tmp/lvms-ci-claude-workdir.$(date +%y%m%d) +``` + +## Steps + +### Step 1: Prepare -- Collect and Download Artifacts +1. `WORKDIR=/tmp/lvms-ci-claude-workdir.$(date +%y%m%d)` +2. Run: + ```bash + bash ${SHARED_SCRIPTS}/doctor.sh prepare --product lvms --filter lvm --workdir ${WORKDIR} + ``` +3. Read the JSON output. If no failed jobs, report success and stop. + +### Step 2: Analyze Each Job +For each failed job, launch a separate **Agent** with `run_in_background: true`: + +``` +This is an LVMS job. Artifacts are in gs://test-platform-results/. +Some build-log.txt files are gzip-compressed -- pipe through zcat if binary. + +Before analyzing test failures, check artifacts//lvms-catalogsource/finished.json -- if "passed":false, that is the root cause. Report it and skip test analysis. + +## Extract Index Image Info +Before running test analysis, extract the LVMS catalog index image from the job artifacts: +1. Fetch artifacts//lvms-catalogsource/build-log.txt (may be gzip-compressed) +2. Look for the line containing "LVM_INDEX_IMAGE is set to:" and extract the image reference +3. If found, run skopeo inspect --no-tags "docker://" to get: + - Digest, Build date, Source commit +4. Include this in the report under an "## Index Image" section + +Run /ci:prow-job-analyze-test-failure + +Save the full report to: /analyze-ci-release--job--.txt +``` + +Launch ALL agents in parallel. Wait for all to complete. + +### Step 3: Finalize +1. Run: + ```bash + bash ${SHARED_SCRIPTS}/doctor.sh finalize --product lvms --workdir ${WORKDIR} + ``` +2. Display the summary and path to the generated HTML report. + +## Prerequisites +- `gcloud` CLI installed (for downloading artifacts from public GCS buckets) +- `skopeo` for index image inspection +- Python 3 diff --git a/plugins/lvms-ci/skills/doctor/SKILL.md b/plugins/lvms-ci/skills/doctor/SKILL.md new file mode 100644 index 00000000..b5d72ce1 --- /dev/null +++ b/plugins/lvms-ci/skills/doctor/SKILL.md @@ -0,0 +1,149 @@ +--- +name: lvms-ci:doctor +argument-hint: +description: Analyze CI for multiple LVMS releases and produce an HTML summary +user-invocable: true +allowed-tools: Skill, Bash, Read, Write, Glob, Grep, Agent +--- + +# lvms-ci:doctor + +## Synopsis +```bash +/lvms-ci:doctor +``` + +## Description +Accepts a comma-separated list of release versions, runs analysis for each release, and produces a single HTML summary file consolidating all results. Uses deterministic scripts for data collection, artifact download, aggregation, and HTML generation. LLM agents are used only for per-job root cause analysis. + +## Arguments +- `$ARGUMENTS` (required): Comma-separated list of release versions (e.g., `4.20,4.21,4.22`) + +## Scripts Directory + +Shared scripts are in: +```bash +SHARED_SCRIPTS=plugins/shared/scripts +``` + +## Work Directory + +Set once at the start and reference throughout: +```bash +WORKDIR=/tmp/lvms-ci-claude-workdir.$(date +%y%m%d) +``` + +## Implementation Steps + +### Step 1: Prepare -- Collect and Download All Artifacts + +**Goal**: Deterministically collect all failed jobs and download their artifacts before any LLM analysis. + +**Actions**: +1. Run `WORKDIR=/tmp/lvms-ci-claude-workdir.$(date +%y%m%d)` using the `Bash` tool +2. Run the prepare script: + ```bash + bash ${SHARED_SCRIPTS}/doctor.sh prepare --product lvms --filter lvm --workdir ${WORKDIR} $ARGUMENTS + ``` +3. The script deterministically: + - For each release: fetches failed periodic jobs, downloads artifacts, writes `${WORKDIR}/analyze-ci-release--jobs.json` + - Outputs a JSON summary listing all releases, job counts, and file paths +4. Read the JSON output to know which releases have jobs to analyze and how many + +**Error Handling**: +- If `$ARGUMENTS` is empty, show usage and stop +- If a release has no failed jobs, its jobs JSON will be an empty array -- skip analysis for that release + +### Step 2: Analyze Each Job Using /lvms-ci:analyze-release + +**Goal**: Get detailed root cause analysis for each failed job using pre-downloaded artifacts. + +**Actions**: +1. Use the JSON summary output from Step 1 to build agent prompts. Do NOT read the job JSON files into the main conversation -- the prepare script already printed all job details (artifacts_dir, build_id, job name) and agents receive artifacts_dir directly in their prompt. +2. For **every** failed job across all releases, launch a separate **Agent** (using the `Agent` tool, NOT the `Skill` tool). + + ```text + Agent: subagent_type=general_purpose, prompt="Analyze this LVMS Prow job and save the report: + + This is an LVMS job. Artifacts are in gs://test-platform-results/. + Some build-log.txt files are gzip-compressed -- pipe through zcat if binary. + + Before analyzing test failures, check artifacts//lvms-catalogsource/finished.json -- if 'passed':false, that is the root cause. Report it and skip test analysis. + + ## Extract Index Image Info + Before running test analysis, extract the LVMS catalog index image from the job artifacts: + 1. Fetch artifacts//lvms-catalogsource/build-log.txt (may be gzip-compressed) + 2. Look for the line containing 'LVM_INDEX_IMAGE is set to:' and extract the image reference + 3. If found, run skopeo inspect --no-tags 'docker://' to get: + - Digest (sha256) + - Build date (from org.opencontainers.image.created label) + - Source commit (from vcs-ref or org.opencontainers.image.revision label) + 4. Include this in the report under an '## Index Image' section + + Run /ci:prow-job-analyze-test-failure + + Save the full report to: ${WORKDIR}/analyze-ci-release--job--.txt" + ``` + +3. Launch **ALL** agents in a single message using `run_in_background: true` +4. After launching, say "Analyzing N jobs in parallel..." and STOP. +5. As agent completion notifications arrive, respond with only "." (a single period). +6. Only after ALL agents are confirmed complete, proceed to Step 3. + +### Step 3: Finalize -- Aggregate and Generate HTML Report + +**Goal**: Deterministically aggregate results and generate the HTML report. + +**Actions**: +1. Run the finalize script: + ```bash + bash ${SHARED_SCRIPTS}/doctor.sh finalize --product lvms --workdir ${WORKDIR} $ARGUMENTS + ``` +2. The script deterministically: + - Runs `aggregate.py` for each release -> `summary.json` files + - Runs `create-report.py` -> `lvms-ci-doctor-report.html` +3. Report the script's output to the user + +### Step 4: Report Completion + +**Actions**: +1. Display the path to the generated HTML file +2. Summarize: failed job counts per release + +**Example Output**: +```text +Summary: + Release 4.20: 3 failed periodic jobs + Release 4.21: 0 failed periodic jobs + Release 4.22: 7 failed periodic jobs + +HTML report generated: ${WORKDIR}/lvms-ci-doctor-report.html +``` + +## Examples + +### Example 1: Analyze Multiple Releases +```bash +/lvms-ci:doctor 4.20,4.21,4.22 +``` + +### Example 2: Single Release +```bash +/lvms-ci:doctor 4.22 +``` + +## Prerequisites + +- `gcloud` CLI installed (for downloading artifacts from public GCS buckets) +- `skopeo` for index image inspection +- Python 3 +- Bash shell + +## Notes +- **Deterministic scripts** handle: data collection, artifact download, aggregation, HTML generation +- **LLM agents** handle: per-job root cause analysis (Step 2) +- All agents are launched in a single parallel wave +- The `prepare` script downloads all artifacts upfront so prow-job agents use local paths +- The `finalize` script runs aggregation and HTML generation in one call +- All intermediate files use prescribed filenames in `${WORKDIR}` +- The HTML report is self-contained (no external CSS/JS dependencies) diff --git a/plugins/lvms-ci/skills/generate-html-report/SKILL.md b/plugins/lvms-ci/skills/generate-html-report/SKILL.md new file mode 100644 index 00000000..d597ab94 --- /dev/null +++ b/plugins/lvms-ci/skills/generate-html-report/SKILL.md @@ -0,0 +1,44 @@ +--- +name: lvms-ci:generate-html-report +argument-hint: +description: Generate an HTML report from existing LVMS CI analysis files +user-invocable: true +allowed-tools: Bash, Read, Glob, Grep +--- + +# lvms-ci:generate-html-report + +## Synopsis +```bash +/lvms-ci:generate-html-report +``` + +## Description +Generates an HTML report from existing analysis files in the work directory. This is useful for re-generating the report after analysis has already been completed by `/lvms-ci:doctor` or `/lvms-ci:analyze-release`. + +## Arguments +- `$ARGUMENTS` (required): Comma-separated release versions (e.g., `4.20,4.21,4.22`) + +## Scripts Directory +```bash +SHARED_SCRIPTS=plugins/shared/scripts +``` + +## Work Directory +```bash +WORKDIR=/tmp/lvms-ci-claude-workdir.$(date +%y%m%d) +``` + +## Steps + +### Step 1: Run Finalize +```bash +bash ${SHARED_SCRIPTS}/doctor.sh finalize --product lvms --workdir ${WORKDIR} $ARGUMENTS +``` + +### Step 2: Report Completion +Display the path to the generated HTML file. + +## Prerequisites +- Analysis files must already exist in `${WORKDIR}` (produced by `/lvms-ci:doctor` or `/lvms-ci:analyze-release`) +- Python 3 diff --git a/plugins/microshift-ci/skills/doctor/SKILL.md b/plugins/microshift-ci/skills/doctor/SKILL.md index 19726767..32f297c5 100644 --- a/plugins/microshift-ci/skills/doctor/SKILL.md +++ b/plugins/microshift-ci/skills/doctor/SKILL.md @@ -21,9 +21,9 @@ Accepts a comma-separated list of MicroShift release versions, runs analysis for ## Scripts Directory -All scripts are run relative to the repository root: +Shared scripts are in: ```bash -SCRIPTS_DIR=plugins/microshift-ci/scripts +SHARED_SCRIPTS=plugins/shared/scripts ``` ## Work Directory @@ -43,7 +43,7 @@ WORKDIR=/tmp/microshift-ci-claude-workdir.$(date +%y%m%d) 1. Determine today's WORKDIR path by running `date +%y%m%d` and substituting into `/tmp/microshift-ci-claude-workdir.YYMMDD`. Use this value in all subsequent `--workdir` arguments. 2. Run the prepare script: ```bash - bash ${SCRIPTS_DIR}/doctor.sh prepare --workdir ${WORKDIR} $ARGUMENTS --rebase + bash ${SHARED_SCRIPTS}/doctor.sh prepare --product microshift --filter microshift --workdir ${WORKDIR} $ARGUMENTS --rebase ``` 3. The script deterministically: - For each release: fetches failed periodic jobs, downloads artifacts, writes `${WORKDIR}/analyze-ci-release--jobs.json` @@ -122,7 +122,7 @@ WORKDIR=/tmp/microshift-ci-claude-workdir.$(date +%y%m%d) **Actions**: 1. Run the finalize script: ```bash - bash ${SCRIPTS_DIR}/doctor.sh finalize --workdir ${WORKDIR} $ARGUMENTS + bash ${SHARED_SCRIPTS}/doctor.sh finalize --product microshift --workdir ${WORKDIR} $ARGUMENTS ``` 2. The script deterministically: - Runs `aggregate.py` for each release and for PRs → `summary.json` files diff --git a/plugins/microshift-ci/scripts/aggregate.py b/plugins/shared/scripts/aggregate.py similarity index 100% rename from plugins/microshift-ci/scripts/aggregate.py rename to plugins/shared/scripts/aggregate.py diff --git a/plugins/microshift-ci/scripts/create-report.py b/plugins/shared/scripts/create-report.py similarity index 76% rename from plugins/microshift-ci/scripts/create-report.py rename to plugins/shared/scripts/create-report.py index 6c824abf..bbeda638 100755 --- a/plugins/microshift-ci/scripts/create-report.py +++ b/plugins/shared/scripts/create-report.py @@ -1,12 +1,13 @@ #!/usr/bin/env python3 """ -Generate an HTML report from analyze-ci JSON files. +Generate an HTML report from CI analysis JSON files. -Reads JSON summary files (from aggregate.py) and JSON bug mapping -files (from microshift-ci:create-bugs) to produce a consolidated HTML report. +Supports multiple products via --product flag: + - microshift: PR tab with rebase PR analysis, tab bar navigation + - lvms: index image section per release Usage: - create-report.py [--workdir DIR] + create-report.py --product PRODUCT [--workdir DIR] """ import json @@ -22,11 +23,12 @@ # Constants # --------------------------------------------------------------------------- +PRODUCT_TITLES = { + "microshift": "MicroShift", + "lvms": "LVMS", +} + # Threshold for fuzzy matching issue titles to bug candidate signatures. -# Uses asymmetric formula: overlap / len(sig_tokens) — measures what fraction -# of the bug candidate's signature is covered by the issue title. This differs -# from the symmetric min-based formula in aggregate.py/search-bugs.py because -# issue titles are short summaries while signatures are detailed. MATCH_THRESHOLD = 0.50 STOP_WORDS = frozenset({ @@ -103,19 +105,11 @@ .ftype-badge { display: inline-block; padding: 2px 8px; border-radius: 4px; font-size: 0.75em; font-weight: 700; text-transform: uppercase; } .ftype-test { background: #cce5ff; color: #004085; } .ftype-build { background: #e2d5f1; color: #4a235a; } - .ftype-infra { background: #fde2cc; color: #7d4e24; }""" + .ftype-infra { background: #fde2cc; color: #7d4e24; } + .index-image-info { background: #e8f4fd; border-left: 3px solid #0366d6; padding: 8px 12px; margin: 8px 0; font-size: 0.9em; } + .index-image-info code { background: #f1f1f1; padding: 2px 4px; border-radius: 3px; font-size: 0.9em; }""" -JS = """\ -function showTab(e, name) { - document.querySelectorAll('.tab-content').forEach(function(el) { - el.classList.remove('active'); - }); - document.querySelectorAll('.tab-btn').forEach(function(el) { - el.classList.remove('active'); - }); - document.getElementById('tab-' + name).classList.add('active'); - e.target.classList.add('active'); -} +JS_EXPAND_COLLAPSE = """\ document.querySelectorAll('.col-title').forEach(function(el) { el.addEventListener('click', function() { this.classList.toggle('active'); @@ -126,6 +120,18 @@ }); });""" +JS_TAB_SWITCH = """\ +function showTab(e, name) { + document.querySelectorAll('.tab-content').forEach(function(el) { + el.classList.remove('active'); + }); + document.querySelectorAll('.tab-btn').forEach(function(el) { + el.classList.remove('active'); + }); + document.getElementById('tab-' + name).classList.add('active'); + e.target.classList.add('active'); +}""" + # --------------------------------------------------------------------------- # File discovery @@ -162,7 +168,7 @@ def discover_files(workdir, releases): # --------------------------------------------------------------------------- -# JSON loading (replaces all text parsers) +# JSON loading # --------------------------------------------------------------------------- def load_json(filepath): @@ -183,6 +189,45 @@ def load_bug_candidates(filepath): return data.get("candidates", []) +# --------------------------------------------------------------------------- +# Index image extraction (LVMS-specific) +# --------------------------------------------------------------------------- + +def extract_index_image(workdir, version): + """Extract index image info from per-job report files. + + Scans per-job report files for an '## Index Image' section containing + Image, Digest, Built, and Source Commit fields. + """ + pattern = os.path.join(workdir, f"analyze-ci-release-{version}-job-*.txt") + for filepath in sorted(glob_mod.glob(pattern)): + try: + with open(filepath, "r") as f: + content = f.read() + except IOError: + continue + + if "## Index Image" not in content: + continue + + info = {} + for line in content.split("\n"): + line = line.strip() + if line.startswith("- **Image:**"): + info["image"] = line.split("**Image:**", 1)[1].strip() + elif line.startswith("- **Digest:**"): + info["digest"] = line.split("**Digest:**", 1)[1].strip() + elif line.startswith("- **Built:**"): + info["built"] = line.split("**Built:**", 1)[1].strip() + elif line.startswith("- **Source Commit:**"): + info["commit"] = line.split("**Source Commit:**", 1)[1].strip() + + if info.get("image"): + return info + + return None + + # --------------------------------------------------------------------------- # Fuzzy matching # --------------------------------------------------------------------------- @@ -212,12 +257,7 @@ def match_issue_to_bugs(issue_title, bug_candidates): def _extract_pr_numbers(candidate): - """Extract PR numbers from a bug candidate's job names/URLs. - - Handles two patterns: - - File-derived job names: "-pr123-" (from analyze-ci-prs-job-*-pr-*.txt) - - Prow URLs: ".../pull/openshift_microshift/123/..." - """ + """Extract PR numbers from a bug candidate's job names/URLs.""" pr_nums = set() for job in candidate.get("jobs", []): url = job.get("job_url", "") @@ -232,11 +272,7 @@ def _extract_pr_numbers(candidate): def _index_pr_bugs(bug_paths): - """Load PR bug candidates and index them by PR number. - - Returns a dict mapping PR number (int) to list of bug candidates. - Candidates affecting multiple PRs appear under each PR. - """ + """Load PR bug candidates and index them by PR number.""" by_pr = {} for path in bug_paths: for cand in load_bug_candidates(path): @@ -292,42 +328,33 @@ def _render_bug_links(bug_match): return "".join(parts) -# --------------------------------------------------------------------------- -# HTML rendering -# --------------------------------------------------------------------------- - -def render_release_section(version, rdata, bug_candidates): - if rdata is None: - return ( - f'
\n' - '
\n' - f'

Release {_e(version)}

\n' - ' no data\n' - '
\n' - "

Analysis failed to produce results.

\n" - "
" +def _render_index_image(index_info): + """Render index image info box HTML (LVMS-specific).""" + if not index_info: + return "" + lines = ['
'] + if index_info.get("image"): + lines.append(f' Catalog Index Image: {_e(index_info["image"])}
') + if index_info.get("digest"): + lines.append(f' Digest: {_e(index_info["digest"])}
') + if index_info.get("built"): + lines.append(f' Built: {_e(index_info["built"])}
') + if index_info.get("commit"): + commit = index_info["commit"] + short = commit[:12] if len(commit) >= 12 else commit + lines.append( + f' Source Commit: ' + f'{_e(short)}' ) + lines.append("
") + return "\n".join(lines) - total = rdata["total_failed"] - has_critical = any(i.get("severity", "").upper() == "CRITICAL" for i in rdata["issues"]) - badge = _badge_class(total, has_critical) - b = rdata["breakdown"] +def _render_issues_table(issues, bug_candidates): + """Render the issues table rows (shared between release and PR sections).""" lines = [] - lines.append(f'
') - lines.append('
') - lines.append(f"

Release {_e(version)}

") - label = "failure" if total == 1 else "failures" - lines.append(f' {total} {label}') - lines.append("
") - lines.append('
') - lines.append(f' {b["build"]} Build') - lines.append(f' {b["test"]} Test') - lines.append(f' {b["infrastructure"]} Infrastructure') - lines.append("
") - lines.append(' ') - for issue in rdata["issues"]: + for issue in issues: bug_match = match_issue_to_bugs(issue["title"], bug_candidates) jc = issue["job_count"] sev = issue.get("severity", "UNKNOWN").upper() @@ -360,25 +387,62 @@ def render_release_section(version, rdata, bug_candidates): lines.append(f"

Next Steps: {_e(issue['next_steps'])}

") lines.append(" ") lines.append('
') + return "\n".join(lines) + + +# --------------------------------------------------------------------------- +# HTML rendering +# --------------------------------------------------------------------------- + +def render_release_section(version, rdata, bug_candidates, index_info=None): + if rdata is None: + return ( + f'
\n' + '
\n' + f'

Release {_e(version)}

\n' + ' no data\n' + '
\n' + "

Analysis failed to produce results.

\n" + "
" + ) + + total = rdata["total_failed"] + has_critical = any(i.get("severity", "").upper() == "CRITICAL" for i in rdata["issues"]) + badge = _badge_class(total, has_critical) + b = rdata["breakdown"] + + lines = [] + lines.append(f'
') + lines.append('
') + lines.append(f"

Release {_e(version)}

") + label = "failure" if total == 1 else "failures" + lines.append(f' {total} {label}') + lines.append("
") + + # Index image info (LVMS-specific, shared across all jobs in a release) + idx_html = _render_index_image(index_info) + if idx_html: + lines.append(idx_html) + + lines.append('
') + lines.append(f' {b["build"]} Build') + lines.append(f' {b["test"]} Test') + lines.append(f' {b["infrastructure"]} Infrastructure') + lines.append("
") + + lines.append(_render_issues_table(rdata["issues"], bug_candidates)) lines.append("
") return "\n".join(lines) def render_pr_section(pr_data, all_pr_bugs, pr_status): - """Render the Pull Requests tab. - - pr_data: analyzed PR summary (from aggregate), may be None. - all_pr_bugs: dict mapping PR number (int) to list of bug candidates. - pr_status: list of all PR status snapshots (from prepare), may be None. - """ - # Build a lookup of analyzed PRs by number + """Render the Pull Requests tab.""" analyzed = {} if pr_data and pr_data.get("has_content"): for pr in pr_data["prs"]: analyzed[pr["number"]] = pr - # Build the full PR list: all PRs from status, merged with analysis all_prs = [] if pr_status: for s in pr_status: @@ -396,7 +460,6 @@ def render_pr_section(pr_data, all_pr_bugs, pr_status): entry["analysis"] = analyzed[num] all_prs.append(entry) elif analyzed: - # No status file — fall back to analyzed data only for pr in pr_data["prs"]: all_prs.append({ "number": pr["number"], @@ -458,8 +521,6 @@ def render_pr_section(pr_data, all_pr_bugs, pr_status): lines.append("
") - # Breakdown: same format as periodics (Build/Test/Infrastructure) - # Plus job status (passed/running) when available pending = pr.get("pending", 0) if analysis and analysis.get("breakdown"): b = analysis["breakdown"] @@ -477,49 +538,17 @@ def render_pr_section(pr_data, all_pr_bugs, pr_status): pr_bugs = all_pr_bugs.get(pr["number"], []) if analysis and analysis.get("issues"): - - lines.append(' ') - for issue in analysis["issues"]: - bug_match = match_issue_to_bugs(issue.get("title", ""), pr_bugs) - jc = issue["job_count"] - sev = issue.get("severity", "UNKNOWN").upper() - sev_css = f"severity-{sev.lower()}" if sev in ("HIGH", "MEDIUM", "LOW", "CRITICAL") else "" - ftype = issue.get("failure_type", "test") - ftype_label = "INFRA" if ftype == "infrastructure" else ftype.upper() - ftype_css = "ftype-infra" if ftype == "infrastructure" else f"ftype-{ftype}" - jobs_label = f'{jc} {"job" if jc == 1 else "jobs"}' - - lines.append(' ') - lines.append(f' ') - lines.append(f' ') - lines.append(f' ') - lines.append(f' ') - lines.append(f' ') - lines.append(' ') - lines.append(' ") - lines.append('
{issue["number"]}.{sev}{ftype_label}{_e(issue["title"])}{jobs_label}
') - if issue.get("root_cause"): - lines.append(f'
Root Cause: {_e(issue["root_cause"])}
') - lines.append(f' ') - if issue.get("affected_jobs"): - lines.append("

Affected Jobs:

    ") - for job in issue["affected_jobs"]: - if job.get("url"): - lines.append(f'
  • [{_e(job["date"])}] {_e(job["name"])}
  • ') - else: - lines.append(f'
  • [{_e(job["date"])}] {_e(job["name"])}
  • ') - lines.append("
") - if issue.get("next_steps"): - lines.append(f"

Next Steps: {_e(issue['next_steps'])}

") - lines.append("
') + lines.append(_render_issues_table(analysis["issues"], pr_bugs)) lines.append(" ") return "\n".join(toc_lines) + "\n\n" + "\n".join(lines) -def generate_html(releases_data, bug_data, pr_data, all_pr_bugs, pr_status, timestamp): +def generate_html(product, releases_data, bug_data, index_data, pr_data, all_pr_bugs, pr_status, timestamp): + product_title = PRODUCT_TITLES.get(product, product.upper()) date_str = timestamp.strftime("%Y-%m-%d") time_str = timestamp.strftime("%Y-%m-%d %H:%M:%S") + has_prs = bool(pr_data or pr_status) cards = [] for version, rdata in releases_data.items(): @@ -531,20 +560,21 @@ def generate_html(releases_data, bug_data, pr_data, all_pr_bugs, pr_status, time f'
Release {_e(version)}
\n' " " ) - # PR overview: count failures from status (all PRs) or analysis - if pr_status: - pr_failed = sum(p.get("failed", 0) for p in pr_status) - elif pr_data: - pr_failed = pr_data.get("total_failed", 0) - else: - pr_failed = 0 - pr_css = "status-fail" if pr_failed > 0 else "status-pass" - cards.append( - '
\n' - f'
{pr_failed}
\n' - f'
Rebase PRs
\n' - "
" - ) + + if has_prs: + if pr_status: + pr_failed = sum(p.get("failed", 0) for p in pr_status) + elif pr_data: + pr_failed = pr_data.get("total_failed", 0) + else: + pr_failed = 0 + pr_css = "status-fail" if pr_failed > 0 else "status-pass" + cards.append( + '
\n' + f'
{pr_failed}
\n' + f'
Rebase PRs
\n' + "
" + ) toc = [] for version, rdata in releases_data.items(): @@ -560,53 +590,77 @@ def generate_html(releases_data, bug_data, pr_data, all_pr_bugs, pr_status, time sections = [] for version, rdata in releases_data.items(): bugs = bug_data.get(version, []) - sections.append(render_release_section(version, rdata, bugs)) - - pr_section = render_pr_section(pr_data, all_pr_bugs, pr_status) + idx = index_data.get(version) + sections.append(render_release_section(version, rdata, bugs, idx)) + + # Build JS + js_parts = [JS_EXPAND_COLLAPSE] + if has_prs: + js_parts.insert(0, JS_TAB_SWITCH) + js = "\n".join(js_parts) + + # Build body content + body_parts = [] + body_parts.append(f'

{product_title} CI Doctor Report

') + body_parts.append(f'

Generated: {time_str} UTC

') + body_parts.append('') + body_parts.append('
') + body_parts.append(chr(10).join(cards)) + body_parts.append('
') + + if has_prs: + # Tabbed layout + pr_section = render_pr_section(pr_data, all_pr_bugs, pr_status) + body_parts.append('') + body_parts.append('
') + body_parts.append(' ') + body_parts.append(' ') + body_parts.append('
') + body_parts.append('') + body_parts.append('
') + body_parts.append('
') + body_parts.append('

Table of Contents

') + body_parts.append('
    ') + body_parts.append(chr(10).join(toc)) + body_parts.append('
') + body_parts.append('
') + body_parts.append('') + body_parts.append(chr(10).join(sections)) + body_parts.append('
') + body_parts.append('') + body_parts.append('
') + body_parts.append(pr_section) + body_parts.append('
') + else: + # Simple layout (no tabs) + body_parts.append('') + body_parts.append('
') + body_parts.append('

Table of Contents

') + body_parts.append('
    ') + body_parts.append(chr(10).join(toc)) + body_parts.append('
') + body_parts.append('
') + body_parts.append('') + body_parts.append(chr(10).join(sections)) return f"""\ - MicroShift CI Doctor Report - {date_str} + {product_title} CI Doctor Report - {date_str}
-

MicroShift CI Doctor Report

-

Generated: {time_str} UTC

- -
-{chr(10).join(cards)} -
- -
- - -
- -
-
-

Table of Contents

-
    -{chr(10).join(toc)} -
-
- -{chr(10).join(sections)} -
- -
-{pr_section} -
+{chr(10).join(body_parts)}

 

 

 

 

@@ -619,6 +673,7 @@ def generate_html(releases_data, bug_data, pr_data, all_pr_bugs, pr_status, time def main(): workdir = None + product = None releases_arg = None args = sys.argv[1:] @@ -630,6 +685,12 @@ def main(): sys.exit(1) workdir = args[i + 1] i += 2 + elif args[i] == "--product": + if i + 1 >= len(args): + print("Error: --product requires an argument", file=sys.stderr) + sys.exit(1) + product = args[i + 1] + i += 2 elif args[i].startswith("-"): print(f"Unknown option: {args[i]}", file=sys.stderr) sys.exit(1) @@ -637,8 +698,13 @@ def main(): releases_arg = args[i] i += 1 + if not product: + print("Error: --product is required", file=sys.stderr) + print("Usage: create-report.py --product PRODUCT [--workdir DIR] ", file=sys.stderr) + sys.exit(1) + if not releases_arg: - print("Usage: create-report.py [--workdir DIR] ", file=sys.stderr) + print("Usage: create-report.py --product PRODUCT [--workdir DIR] ", file=sys.stderr) sys.exit(1) releases = [v.strip() for v in releases_arg.split(",") if v.strip()] @@ -647,7 +713,7 @@ def main(): sys.exit(1) if workdir is None: - workdir = f"/tmp/microshift-ci-claude-workdir.{datetime.now().strftime('%y%m%d')}" + workdir = f"/tmp/{product}-ci-claude-workdir.{datetime.now().strftime('%y%m%d')}" if not os.path.isdir(workdir): print(f"Error: work directory does not exist: {workdir}", file=sys.stderr) @@ -686,15 +752,15 @@ def main(): print(f"\nError: no analysis files found in {workdir}", file=sys.stderr) sys.exit(1) - # Load everything via json.load + # Load release data releases_data = {} bug_data = {} + index_data = {} _EMPTY_BREAKDOWN = {"build": 0, "test": 0, "infrastructure": 0} for version in releases: entry = files["releases"][version] rdata = load_json(entry["summary"]) if rdata is None: - # Distinguish "no failures" from "analysis failed" by checking the jobs file jobs = load_json(entry["jobs"]) if jobs is not None and len(jobs) == 0: rdata = { @@ -704,22 +770,26 @@ def main(): } releases_data[version] = rdata bug_data[version] = load_bug_candidates(entry["bugs"]) + if product == "lvms": + index_data[version] = extract_index_image(workdir, version) + # Load PR data pr_data = load_json(pr_entry["summary"]) pr_status = load_json(pr_entry["status"]) - all_pr_bugs = _index_pr_bugs(pr_entry["bugs"]) # Generate HTML + product_title = PRODUCT_TITLES.get(product, product.upper()) timestamp = datetime.now(timezone.utc) - html_content = generate_html(releases_data, bug_data, pr_data, all_pr_bugs, pr_status, timestamp) + html_content = generate_html(product, releases_data, bug_data, index_data, + pr_data, all_pr_bugs, pr_status, timestamp) - output_path = os.path.join(workdir, "microshift-ci-doctor-report.html") + output_path = os.path.join(workdir, f"{product}-ci-doctor-report.html") with open(output_path, "w") as f: f.write(html_content) # Summary - print("\nSummary:") + print(f"\nSummary:") print(" Periodics:") for version in releases: rdata = releases_data[version] @@ -727,8 +797,8 @@ def main(): print(f" Release {version}: {rdata['total_failed']} failed periodic jobs") else: print(f" Release {version}: no data") - print(" Pull Requests:") if pr_status: + print(" Pull Requests:") pr_total_failed = sum(p.get("failed", 0) for p in pr_status) pr_total_pending = sum(p.get("pending", 0) for p in pr_status) parts = [f"{len(pr_status)} rebase PRs", f"{pr_total_failed} failed jobs"] @@ -736,9 +806,8 @@ def main(): parts.append(f"{pr_total_pending} running") print(f" {', '.join(parts)}") elif pr_data and pr_data.get("has_content"): + print(" Pull Requests:") print(f" {len(pr_data['prs'])} rebase PRs with {pr_data['total_failed']} total failed jobs") - else: - print(" No PR data") print(f"\nHTML report generated: {output_path}") diff --git a/plugins/microshift-ci/scripts/doctor.sh b/plugins/shared/scripts/doctor.sh similarity index 55% rename from plugins/microshift-ci/scripts/doctor.sh rename to plugins/shared/scripts/doctor.sh index bf6fc863..96e2e159 100755 --- a/plugins/microshift-ci/scripts/doctor.sh +++ b/plugins/shared/scripts/doctor.sh @@ -1,27 +1,28 @@ #!/bin/bash set -euo pipefail -# Deterministic orchestration for microshift-ci:doctor. +# Shared CI doctor orchestration. # -# Two phases called by the doctor skill with LLM steps in between: +# Two phases called by doctor skills with LLM steps in between: # -# doctor.sh prepare --workdir DIR [--rebase] -# - Collects failed jobs for each release and rebase PRs +# doctor.sh prepare --product PRODUCT --filter FILTER --workdir DIR [--rebase] +# - Collects failed jobs for each release (and rebase PRs if --rebase) # - Downloads all artifacts in parallel # - Writes per-release and PR jobs JSON files # -# doctor.sh finalize --workdir DIR +# doctor.sh finalize --product PRODUCT --workdir DIR # - Runs aggregate.py for each release and PRs # - Runs create-report.py to generate HTML # -# Usage from doctor skill: -# 1. doctor.sh prepare --workdir $WORKDIR 4.18,4.19,4.20,main --rebase -# 2. (LLM launches prow-job agents for all jobs) -# 3. (LLM launches create-bugs agents for Jira search) -# 4. doctor.sh finalize --workdir $WORKDIR 4.18,4.19,4.20,main +# Examples: +# doctor.sh prepare --product microshift --filter microshift --workdir $W 4.22 --rebase +# doctor.sh finalize --product microshift --workdir $W 4.22 +# doctor.sh prepare --product lvms --filter lvm --workdir $W 4.22 +# doctor.sh finalize --product lvms --workdir $W 4.22 SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" WORKDIR="" +PRODUCT="" # --------------------------------------------------------------------------- # prepare @@ -29,22 +30,28 @@ WORKDIR="" cmd_prepare() { local releases_arg="" + local filter="" local do_rebase=false while [[ ${#} -gt 0 ]]; do case "${1}" in --workdir) WORKDIR="${2}"; shift 2 ;; + --product) PRODUCT="${2}"; shift 2 ;; + --filter) filter="${2}"; shift 2 ;; --rebase) do_rebase=true; shift ;; -*) echo "Unknown option: ${1}" >&2; return 1 ;; *) releases_arg="${1}"; shift ;; esac done - WORKDIR="${WORKDIR:-/tmp/microshift-ci-claude-workdir.$(date +%y%m%d)}" + [[ -z "${PRODUCT}" ]] && { echo "Error: --product is required" >&2; return 1; } + [[ -z "${filter}" ]] && { echo "Error: --filter is required" >&2; return 1; } + + WORKDIR="${WORKDIR:-/tmp/${PRODUCT}-ci-claude-workdir.$(date +%y%m%d)}" if [[ -z "${releases_arg}" ]]; then echo "Error: releases argument required" >&2 - echo "Usage: $(basename "$0") prepare [--workdir DIR] [--rebase]" >&2 + echo "Usage: $(basename "$0") prepare --product PRODUCT --filter FILTER [--workdir DIR] [--rebase]" >&2 return 1 fi @@ -63,7 +70,7 @@ cmd_prepare() { echo " Collecting failed periodic jobs..." >&2 local raw_json raw_err raw_err=$(mktemp) - if ! raw_json=$(bash "${SCRIPT_DIR}/prow-jobs-for-release.sh" "${release}" 2>"${raw_err}"); then + if ! raw_json=$(bash "${SCRIPT_DIR}/prow-jobs-for-release.sh" --filter "${filter}" "${release}" 2>"${raw_err}"); then echo " ERROR: failed to collect jobs for release ${release}:" >&2 cat "${raw_err}" >&2 rm -f "${raw_err}" @@ -104,63 +111,72 @@ cmd_prepare() { local prs_file="${WORKDIR}/analyze-ci-prs-jobs.json" local prs_status_file="${WORKDIR}/analyze-ci-prs-status.json" - echo " Collecting rebase PRs..." >&2 - local pr_json pr_err - pr_err=$(mktemp) - if ! pr_json=$(bash "${SCRIPT_DIR}/prow-jobs-for-pull-requests.sh" \ - --mode detail --author "microshift-rebase-script[bot]" 2>"${pr_err}"); then - echo " ERROR: failed to collect rebase PRs:" >&2 - cat "${pr_err}" >&2 - rm -f "${pr_err}" + # Find the product-specific PR script + local pr_script="${SCRIPT_DIR}/../../${PRODUCT}-ci/scripts/prow-jobs-for-pull-requests.sh" + if [[ ! -f "${pr_script}" ]]; then + echo " ERROR: PR script not found: ${pr_script}" >&2 + echo " --rebase requires plugins/${PRODUCT}-ci/scripts/prow-jobs-for-pull-requests.sh" >&2 echo "[]" > "${prs_file}" echo "[]" > "${prs_status_file}" else - rm -f "${pr_err}" - - local pr_count - pr_count=$(echo "${pr_json}" | jq 'length') - - if [[ "${pr_count}" -eq 0 ]]; then - echo " No rebase PRs found" >&2 + echo " Collecting rebase PRs..." >&2 + local pr_json pr_err + pr_err=$(mktemp) + if ! pr_json=$(bash "${pr_script}" \ + --mode detail --author "microshift-rebase-script[bot]" 2>"${pr_err}"); then + echo " ERROR: failed to collect rebase PRs:" >&2 + cat "${pr_err}" >&2 + rm -f "${pr_err}" echo "[]" > "${prs_file}" echo "[]" > "${prs_status_file}" else - # Save job status snapshot for all PRs (used by HTML report) - echo "${pr_json}" | jq '[.[] | { - pr_number, title, url, - passed: [.jobs[] | select(.status == "SUCCESS")] | length, - failed: [.jobs[] | select(.status == "FAILURE")] | length, - pending: [.jobs[] | select(.status != "SUCCESS" and .status != "FAILURE")] | length, - total: (.jobs | length) - }]' > "${prs_status_file}" - echo " Saved status for ${pr_count} rebase PRs" >&2 - - # Filter to PRs with failed jobs for artifact download - local failed_prs - failed_prs=$(echo "${pr_json}" | \ - jq '[.[] | select(.jobs | map(select(.status == "FAILURE")) | length > 0)]') - - local failed_pr_count - failed_pr_count=$(echo "${failed_prs}" | jq 'length') - - if [[ "${failed_pr_count}" -eq 0 ]]; then - echo " No PRs with failures to investigate" >&2 + rm -f "${pr_err}" + + local pr_count + pr_count=$(echo "${pr_json}" | jq 'length') + + if [[ "${pr_count}" -eq 0 ]]; then + echo " No rebase PRs found" >&2 echo "[]" > "${prs_file}" + echo "[]" > "${prs_status_file}" else - local job_count - job_count=$(echo "${failed_prs}" | jq '[.[].jobs[] | select(.status == "FAILURE")] | length') - - echo " Downloading artifacts for ${job_count} failed jobs across ${failed_pr_count} PRs..." >&2 - local dl_err - dl_err=$(mktemp) - echo "${failed_prs}" | \ - bash "${SCRIPT_DIR}/download-jobs.sh" --workdir "${WORKDIR}" 2>"${dl_err}" \ - > "${prs_file}" - [[ -s "${dl_err}" ]] && cat "${dl_err}" >&2 - rm -f "${dl_err}" - - total_jobs=$((total_jobs + job_count)) - echo " Done: ${prs_file}" >&2 + # Save job status snapshot for all PRs (used by HTML report) + echo "${pr_json}" | jq '[.[] | { + pr_number, title, url, + passed: [.jobs[] | select(.status == "SUCCESS")] | length, + failed: [.jobs[] | select(.status == "FAILURE")] | length, + pending: [.jobs[] | select(.status != "SUCCESS" and .status != "FAILURE")] | length, + total: (.jobs | length) + }]' > "${prs_status_file}" + echo " Saved status for ${pr_count} rebase PRs" >&2 + + # Filter to PRs with failed jobs for artifact download + local failed_prs + failed_prs=$(echo "${pr_json}" | \ + jq '[.[] | select(.jobs | map(select(.status == "FAILURE")) | length > 0)]') + + local failed_pr_count + failed_pr_count=$(echo "${failed_prs}" | jq 'length') + + if [[ "${failed_pr_count}" -eq 0 ]]; then + echo " No PRs with failures to investigate" >&2 + echo "[]" > "${prs_file}" + else + local job_count + job_count=$(echo "${failed_prs}" | jq '[.[].jobs[] | select(.status == "FAILURE")] | length') + + echo " Downloading artifacts for ${job_count} failed jobs across ${failed_pr_count} PRs..." >&2 + local dl_err + dl_err=$(mktemp) + echo "${failed_prs}" | \ + bash "${SCRIPT_DIR}/download-jobs.sh" --workdir "${WORKDIR}" 2>"${dl_err}" \ + > "${prs_file}" + [[ -s "${dl_err}" ]] && cat "${dl_err}" >&2 + rm -f "${dl_err}" + + total_jobs=$((total_jobs + job_count)) + echo " Done: ${prs_file}" >&2 + fi fi fi fi @@ -211,16 +227,19 @@ cmd_finalize() { while [[ ${#} -gt 0 ]]; do case "${1}" in --workdir) WORKDIR="${2}"; shift 2 ;; + --product) PRODUCT="${2}"; shift 2 ;; -*) echo "Unknown option: ${1}" >&2; return 1 ;; *) releases_arg="${1}"; shift ;; esac done - WORKDIR="${WORKDIR:-/tmp/microshift-ci-claude-workdir.$(date +%y%m%d)}" + [[ -z "${PRODUCT}" ]] && { echo "Error: --product is required" >&2; return 1; } + + WORKDIR="${WORKDIR:-/tmp/${PRODUCT}-ci-claude-workdir.$(date +%y%m%d)}" if [[ -z "${releases_arg}" ]]; then echo "Error: releases argument required" >&2 - echo "Usage: $(basename "$0") finalize [--workdir DIR] " >&2 + echo "Usage: $(basename "$0") finalize --product PRODUCT [--workdir DIR] " >&2 return 1 fi @@ -248,7 +267,7 @@ cmd_finalize() { # Generate HTML report echo "=== Generating HTML report ===" >&2 python3 "${SCRIPT_DIR}/create-report.py" \ - --workdir "${WORKDIR}" "${releases_arg}" + --product "${PRODUCT}" --workdir "${WORKDIR}" "${releases_arg}" } # --------------------------------------------------------------------------- @@ -256,14 +275,17 @@ cmd_finalize() { # --------------------------------------------------------------------------- usage() { - echo "Usage: $(basename "$0") [--workdir DIR] [options]" >&2 + echo "Usage: $(basename "$0") --product PRODUCT [options] " >&2 echo "" >&2 echo "Commands:" >&2 - echo " prepare [--workdir DIR] [--rebase] Collect jobs and download artifacts" >&2 - echo " finalize [--workdir DIR] Aggregate results and generate HTML" >&2 + echo " prepare --product PRODUCT --filter FILTER [--workdir DIR] [--rebase]" >&2 + echo " finalize --product PRODUCT [--workdir DIR] " >&2 echo "" >&2 + echo " --product PRODUCT: Product name (e.g., microshift, lvms)" >&2 + echo " --filter FILTER: Job name filter for Prow (e.g., microshift, lvm)" >&2 echo " : comma-separated release versions (e.g., 4.18,4.19,4.20,main)" >&2 - echo " --workdir DIR: work directory (default: /tmp/microshift-ci-claude-workdir.YYMMDD)" >&2 + echo " --workdir DIR: work directory (default: /tmp/PRODUCT-ci-claude-workdir.YYMMDD)" >&2 + echo " --rebase: collect rebase PR data (prepare only)" >&2 exit 1 } diff --git a/plugins/microshift-ci/scripts/download-jobs.sh b/plugins/shared/scripts/download-jobs.sh similarity index 100% rename from plugins/microshift-ci/scripts/download-jobs.sh rename to plugins/shared/scripts/download-jobs.sh diff --git a/plugins/microshift-ci/scripts/prow-jobs-for-release.sh b/plugins/shared/scripts/prow-jobs-for-release.sh similarity index 57% rename from plugins/microshift-ci/scripts/prow-jobs-for-release.sh rename to plugins/shared/scripts/prow-jobs-for-release.sh index 1c522e6e..fc3e190d 100755 --- a/plugins/microshift-ci/scripts/prow-jobs-for-release.sh +++ b/plugins/shared/scripts/prow-jobs-for-release.sh @@ -1,17 +1,24 @@ #!/bin/bash set -euo pipefail -# Prow Jobs Analyzer for MicroShift +# Prow Jobs Analyzer +# Fetches periodic job status from the Prow data.js API # Output: JSON array of job objects on stdout # Progress/errors: stderr +# +# Usage: +# prow-jobs-for-release.sh --filter microshift 4.22 +# prow-jobs-for-release.sh --filter lvm 4.22 +# prow-jobs-for-release.sh --filter lvm --mode status 4.22 PROW_URL="https://prow.ci.openshift.org/data.js" -# Fetch all MicroShift jobs for a release, return latest run per job as JSON +# Fetch all jobs matching a filter for a release, return latest run per job as JSON fetch_latest_per_job() { local release="${1}" - curl -s --max-time 60 "${PROW_URL}" | jq --arg release "${release}" ' - [.[] | select((.job | contains("microshift")) and (.job | contains($release)))] | + local filter="${2}" + curl -s --max-time 60 "${PROW_URL}" | jq --arg release "${release}" --arg filter "${filter}" ' + [.[] | select((.job | contains($filter)) and (.job | contains($release)))] | group_by(.job) | map(sort_by(.started | tonumber) | reverse | first) | [.[] | { @@ -27,7 +34,8 @@ fetch_latest_per_job() { } usage() { - echo "Usage: ${0} [--mode MODE] " >&2 + echo "Usage: ${0} --filter FILTER [--mode MODE] " >&2 + echo " --filter FILTER: Job name filter (e.g., microshift, lvm)" >&2 echo " --mode MODE: Operation mode (default: failed)" >&2 echo " status: Latest run status for each job" >&2 echo " failed: Only jobs with failure status" >&2 @@ -37,6 +45,7 @@ usage() { main() { local mode="failed" + local filter="" local release="" while [[ ${#} -gt 0 ]]; do @@ -44,16 +53,20 @@ main() { --mode) [[ ${#} -lt 2 ]] && { echo "Error: mode requires an argument" >&2; usage; } mode="${2}"; shift 2 ;; + --filter) + [[ ${#} -lt 2 ]] && { echo "Error: filter requires an argument" >&2; usage; } + filter="${2}"; shift 2 ;; -*) echo "Unknown option: ${1}" >&2; usage ;; *) release="${1}"; shift ;; esac done + [[ -z "${filter}" ]] && { echo "Error: --filter is required" >&2; usage; } [[ -z "${release}" ]] && { echo "Error: release argument is required" >&2; usage; } case "${mode}" in - status) fetch_latest_per_job "${release}" ;; - failed) fetch_latest_per_job "${release}" | jq '[.[] | select(.status == "failure")]' ;; + status) fetch_latest_per_job "${release}" "${filter}" ;; + failed) fetch_latest_per_job "${release}" "${filter}" | jq '[.[] | select(.status == "failure")]' ;; *) echo "Error: Unknown mode '${mode}'" >&2; usage ;; esac }