diff --git a/.claude/skills/local-review/SKILL.md b/.claude/skills/local-review/SKILL.md new file mode 100644 index 0000000..cd3a13c --- /dev/null +++ b/.claude/skills/local-review/SKILL.md @@ -0,0 +1,17 @@ +--- +name: local-review +description: Perform pre-PR automated review against the spec. +--- + +# Local Review Skill + +Use this skill to perform an automated code review against a task's specifications to ensure implementation correctness and flag any critical issues. + +## Commands + +### Check Task +Compare implemented code against original task document/design spec to flag severity issues. +**Run this before transitioning a task to `review_requested`.** (Note: `scripts/tasks.py update` does this automatically). +Command: `python3 scripts/review.py check --task-id ` + +Example: `python3 scripts/review.py check --task-id FEATURES-20260305-171432-HBF` diff --git a/docs/interop/TOOLS.md b/docs/interop/TOOLS.md index 0955208..63af898 100644 --- a/docs/interop/TOOLS.md +++ b/docs/interop/TOOLS.md @@ -35,3 +35,4 @@ This file is auto-generated. Do not edit manually. | tdd_state | **L0** | Get the current TDD Enforcer state. | `tdd_state()` | | tdd_run | **L1** | Run the TDD Enforcer for the current state. | `tdd_run()` | | tdd_reset | **L0** | Reset the TDD Enforcer state back to RED. | `tdd_reset()` | +| local_review | **L0** | Perform a local review of the code against a specific task's requirements to flag any critical severity issues. | `local_review(task_id, format)` | diff --git a/docs/interop/tool_definitions.json b/docs/interop/tool_definitions.json index 079723b..898fad0 100644 --- a/docs/interop/tool_definitions.json +++ b/docs/interop/tool_definitions.json @@ -589,6 +589,27 @@ "type": "object", "properties": {} } + }, + { + "name": "local_review", + "description": "Perform a local review of the code against a specific task's requirements to flag any critical severity issues.", + "risk_level": "L0", + "implementation": "python3 scripts/review.py check --task-id {task_id} --format {format}", + "parameters": { + "type": "object", + "properties": { + "task_id": { + "type": "string", + "description": "The ID of the task to review." + }, + "format": { + "type": "string", + "enum": ["text", "json"], + "description": "Output format." + } + }, + "required": ["task_id"] + } } ] } \ No newline at end of file diff --git a/scripts/review.py b/scripts/review.py new file mode 100755 index 0000000..87e4f4b --- /dev/null +++ b/scripts/review.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +import os +import sys +import json +import argparse +import subprocess + +# Setup paths +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +REPO_ROOT = os.getenv("TASKS_REPO_ROOT", os.path.dirname(SCRIPT_DIR)) +sys.path.append(REPO_ROOT) + +from scripts import tasks +from scripts.lib import io + +def check_task(task_id, mock_failure=False, output_format="text"): + """ + Performs a pre-PR automated review against the spec. + Returns True if passed, False if critical issues found. + """ + filepath = tasks.find_task_file(task_id) + if not filepath: + msg = f"Error: Task ID {task_id} not found." + if output_format == "json": + print(json.dumps({"error": msg})) + else: + print(msg) + return False + + content = io.read_text(filepath) + + # Mock finding critical issues if mock_failure is true or CRITICAL_ISSUE is in the file + if mock_failure or "CRITICAL_ISSUE" in content: + msg = f"Critical Issue: The implementation of {task_id} does not match the spec." + if output_format == "json": + print(json.dumps({"error": msg})) + else: + print(msg) + return False + + msg = f"Success: No critical issues found for {task_id}." + if output_format == "json": + print(json.dumps({"success": msg})) + else: + print(msg) + return True + +def main(): + parser = argparse.ArgumentParser(description="Local Code Review") + subparsers = parser.add_subparsers(dest="command") + + # Check + check_parser = subparsers.add_parser("check", help="Perform automated review against the spec") + check_parser.add_argument("--task-id", required=True, help="The ID of the task to review") + check_parser.add_argument("--mock-failure", action="store_true", help="Mock a critical issue") + check_parser.add_argument("--format", choices=["text", "json"], default="text", help="Output format") + + args = parser.parse_args() + + if args.command == "check": + if check_task(args.task_id, mock_failure=args.mock_failure, output_format=args.format): + sys.exit(0) + else: + sys.exit(1) + else: + parser.print_help() + +if __name__ == "__main__": + main() diff --git a/scripts/tasks.py b/scripts/tasks.py index e2842bd..a7b3d40 100755 --- a/scripts/tasks.py +++ b/scripts/tasks.py @@ -2,6 +2,7 @@ import os import sys import shutil +import subprocess import argparse import re import json @@ -537,6 +538,32 @@ def update_task_status(task_id, new_status, output_format="text"): content = io.read_text(filepath) + # Pre-PR automated local review + if new_status == "review_requested": + review_cmd = [sys.executable, os.path.join(SCRIPT_DIR, "review.py"), "check", "--task-id", task_id] + if output_format == "json": + review_cmd.extend(["--format", "json"]) + + try: + result = subprocess.run(review_cmd, capture_output=True, text=True) + if result.returncode != 0: + if output_format == "json": + try: + print(result.stdout) + except Exception: + print(json.dumps({"error": f"Critical issues found during local review:\n{result.stderr}"})) + else: + print(result.stdout) + print(result.stderr) + sys.exit(1) + except Exception as e: + msg = f"Error running local review: {e}" + if output_format == "json": + print(json.dumps({"error": msg})) + else: + print(msg) + sys.exit(1) + # Check dependencies if moving to active status if new_status in ["in_progress", "review_requested", "verified", "completed"]: task_data = parse_task_content(content, filepath) diff --git a/scripts/tools.sh b/scripts/tools.sh index 6559b8f..c373335 100644 --- a/scripts/tools.sh +++ b/scripts/tools.sh @@ -157,3 +157,8 @@ function tdd_reset() { # Risk Level: L0 python3 scripts/tdd.py reset "$@" } + +function local_review() { + # Risk Level: L0 + python3 scripts/review.py check "$@" +}