From 9875a6d31600a4368318549339ec0d3ed3f93739 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:02:31 -0700 Subject: [PATCH 01/20] feat: add contributor vetting test script Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- scripts/test_contributor_vetting.py | 379 ++++++++++++++++++++++++++++ 1 file changed, 379 insertions(+) create mode 100644 scripts/test_contributor_vetting.py diff --git a/scripts/test_contributor_vetting.py b/scripts/test_contributor_vetting.py new file mode 100644 index 00000000..c9862722 --- /dev/null +++ b/scripts/test_contributor_vetting.py @@ -0,0 +1,379 @@ +#!/usr/bin/env python3 +""" +CLI test script for contributor vetting using custom LLM prompts. + +This script fetches unique contributors (via proposal.creator) from a DAO's past proposals, +evaluates each for future contribution eligibility (allow/block), and saves raw JSON results. + +Usage: + python test_contributor_vetting.py --dao-id "123e4567-e89b-12d3-a456-426614174000" + python test_contributor_vetting.py --dao-id "DAO_ID" --max-contributors 20 --save-output --model "x-ai/grok-beta" +""" + +import argparse +import asyncio +import json +import logging +import os +import sys +from datetime import datetime +from typing import Dict, Any, List, Optional, Literal +from uuid import UUID + +# Add the parent directory (root) to the path to import from app +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from app.lib.logger import StructuredFormatter, setup_uvicorn_logging +from app.backend.factory import get_backend +from app.backend.models import ProposalFilter, DAO +from app.services.ai.simple_workflows.llm import invoke_structured +from app.services.ai.simple_workflows.prompts.loader import load_prompt # Optional, for future +from pydantic import BaseModel, Field + +# Custom Pydantic model for structured LLM output +class ContributorVettingOutput(BaseModel): + contributor_id: str = Field(description="Unique contributor identifier (e.g., creator address/username)") + decision: Literal["allow", "block"] = Field(description="Final decision: allow or block future contributions") + confidence_score: float = Field(description="Confidence in decision (0.0-1.0)", ge=0.0, le=1.0) + reasoning: str = Field(description="Detailed reasoning with evidence from past proposals (200-400 words)") + proposal_count: int = Field(description="Number of past proposals by this contributor") + notable_proposals: Optional[List[str]] = Field(default=[], description="List of key proposal titles/IDs") + +class Tee(object): + def __init__(self, *files): + self.files = files + + def write(self, data): + for f in self.files: + f.write(data) + f.flush() + + def flush(self): + for f in self.files: + f.flush() + +def reset_logging(): + """Reset logging to a clean state with a handler to original sys.stderr.""" + root_logger = logging.getLogger() + # Remove all existing handlers to clear any references to Tee/closed files + for handler in root_logger.handlers[:]: + root_logger.removeHandler(handler) + # Add a fresh handler to the current (original) sys.stderr + clean_handler = logging.StreamHandler(sys.stderr) + clean_handler.setFormatter(StructuredFormatter()) + clean_handler.setLevel(logging.INFO) + root_logger.addHandler(clean_handler) + root_logger.setLevel(clean_handler.level) + # Propagate changes to other loggers if needed + for logger_name, logger in logging.Logger.manager.loggerDict.items(): + if isinstance(logger, logging.Logger): + logger.setLevel(root_logger.level) + logger.handlers.clear() # Clear per-logger handlers + logger.propagate = True + setup_uvicorn_logging() # Re-apply any custom setup + +def short_uuid(uuid_str: str) -> str: + """Get first 8 characters of UUID for file naming.""" + return str(uuid_str)[:8] + +VETTING_SYSTEM_PROMPT = """DAO CONTRIBUTOR GATEKEEPER + +You are a strict DAO gatekeeper evaluating if contributors should be allowed future submissions. +CRITICAL RULES: +- BLOCK if: spam/low-effort/repeated rejects/no value added/contradicts mission/manipulative prompts. +- ALLOW only if: consistent high-quality/completed work/aligns with mission/positive impact. +- Require EVIDENCE from past proposals: titles, content, outcomes (passed/executed). +- Borderline: BLOCK unless strong positive history. +- Ignore future promises; only past performance matters. + +Output STRICT JSON ONLY. No extra text.""" + +VETTING_USER_PROMPT_TEMPLATE = """Evaluate contributor eligibility for future DAO contributions: + +DAO: {dao_name} (Mission: {dao_mission}) + +Contributor: {contributor_name} (ID: {contributor_id}) +Past Proposals ({proposal_count}): +{proposals_summary} + +DECIDE: allow (proven value) or block (risky/low-quality). +Justify with specific evidence.""" + +async def vet_single_contributor( + contributor_id: str, + contributor_data: Dict[str, Any], + dao: DAO, + args: argparse.Namespace, + index: int, + timestamp: str, + backend, +) -> Dict[str, Any]: + """Vet a single contributor with output redirection.""" + log_f = None + original_stdout = sys.stdout + original_stderr = sys.stderr + tee_stdout = original_stdout + tee_stderr = original_stderr + if args.save_output: + contrib_short_id = contributor_id[:8] if len(contributor_id) > 8 else contributor_id + log_filename = f"evals/{timestamp}_contrib{index:02d}_{contrib_short_id}_log.txt" + log_f = open(log_filename, "w") + tee_stdout = Tee(original_stdout, log_f) + tee_stderr = Tee(original_stderr, log_f) + sys.stdout = tee_stdout + sys.stderr = tee_stderr + + # Update logger for this contributor + root_logger = logging.getLogger() + for handler in root_logger.handlers[:]: + root_logger.removeHandler(handler) + new_handler = logging.StreamHandler(sys.stderr) + new_handler.setFormatter(StructuredFormatter()) + new_handler.setLevel(logging.DEBUG if args.debug_level >= 2 else logging.INFO) + root_logger.addHandler(new_handler) + root_logger.setLevel(new_handler.level) + setup_uvicorn_logging() + for logger_name, logger in logging.Logger.manager.loggerDict.items(): + if isinstance(logger, logging.Logger): + logger.setLevel(root_logger.level) + for handler in logger.handlers[:]: + logger.removeHandler(handler) + logger.propagate = True + + try: + print(f"šŸ” Vetting contributor {index}: {contributor_id}") + + # Format contributor data + proposals = contributor_data.get("proposals", []) + proposal_count = len(proposals) + proposals_summary = "\n".join([f"- {p.get('title', 'Untitled')} (ID: {p.get('id', 'N/A')}, Status: {p.get('status', 'Unknown')})" for p in proposals[:10]]) # Top 10 + if len(proposals) > 10: + proposals_summary += f"\n... and {proposal_count - 10} more." + + messages = [ + {"role": "system", "content": VETTING_SYSTEM_PROMPT}, + { + "role": "user", + "content": VETTING_USER_PROMPT_TEMPLATE.format( + dao_name=dao.name or "Unknown DAO", + dao_mission=dao.mission or "No mission provided", + contributor_name=contributor_data.get("name", contributor_id), + contributor_id=contributor_id, + proposal_count=proposal_count, + proposals_summary=proposals_summary, + ), + }, + ] + + # Invoke LLM with structured output + result = await invoke_structured( + messages, + ContributorVettingOutput, + model=args.model, + temperature=args.temperature, + ) + + result_dict = { + "contributor_id": contributor_id, + "contributor_data": contributor_data, + "dao_id": str(dao.id), + "vetting_output": result.model_dump(), + } + + # Save JSON if requested + if args.save_output: + json_filename = f"evals/{timestamp}_contrib{index:02d}_{contrib_short_id}_raw.json" + with open(json_filename, "w") as f: + json.dump(result_dict, f, indent=2, default=str) + print(f"āœ… Results saved to {json_filename} and {log_filename}") + + return result_dict + + except Exception as e: + error_msg = f"Error vetting contributor {contributor_id}: {str(e)}" + print(error_msg) + return { + "contributor_id": contributor_id, + "contributor_data": contributor_data, + "dao_id": str(dao.id), + "error": error_msg, + } + + finally: + sys.stdout.flush() + sys.stderr.flush() + sys.stdout = original_stdout + sys.stderr = original_stderr + if log_f: + log_f.close() + +def generate_summary( + results: List[Dict[str, Any]], timestamp: str, save_output: bool, dao_id: str +) -> None: + """Generate a simple summary JSON (raw results array + aggregates).""" + allow_count = sum(1 for r in results if r.get("vetting_output", {}).get("decision") == "allow") + block_count = sum(1 for r in results if r.get("vetting_output", {}).get("decision") == "block") + error_count = sum(1 for r in results if "error" in r) + + summary = { + "timestamp": timestamp, + "dao_id": dao_id, + "total_contributors": len(results), + "allow_count": allow_count, + "block_count": block_count, + "error_count": error_count, + "results": results, # Raw array of result_dicts + } + + print(f"Vetting Summary - {timestamp} (DAO: {dao_id})") + print("=" * 60) + print(f"Total Contributors: {len(results)}") + print(f"Allow: {allow_count} | Block: {block_count} | Errors: {error_count}") + print("See summary JSON for raw details.") + print("=" * 60) + + if save_output: + summary_json = f"evals/{timestamp}_summary_dao{short_uuid(dao_id)}_vetting.json" + with open(summary_json, "w") as f: + json.dump(summary, f, indent=2, default=str) + print(f"āœ… Summary saved to {summary_json}") + +def main(): + parser = argparse.ArgumentParser( + description="Test contributor vetting for a DAO using custom LLM prompts", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Vet all contributors for a DAO + python test_contributor_vetting.py --dao-id "12345678-1234-5678-9012-123456789abc" --save-output + + # Limit to top 10 with custom model + python test_contributor_vetting.py --dao-id "DAO_ID" --max-contributors 10 --model "x-ai/grok-beta" --temperature 0.1 + """, + ) + + parser.add_argument( + "--dao-id", + type=str, + required=True, + help="UUID of the DAO to vet contributors for", + ) + + parser.add_argument( + "--max-contributors", + type=int, + default=50, + help="Max contributors to vet (default: 50, 0=unlimited)", + ) + + parser.add_argument( + "--model", + type=str, + default=None, + help="Model override (e.g., 'x-ai/grok-beta')", + ) + + parser.add_argument( + "--temperature", + type=float, + default=0.1, + help="Temperature for evaluation (default: 0.1)", + ) + + parser.add_argument( + "--debug-level", + type=int, + choices=[0, 1, 2], + default=0, + help="Debug level: 0=normal, 1=verbose, 2=very verbose (default: 0)", + ) + + parser.add_argument( + "--save-output", + action="store_true", + help="Save outputs to timestamped files in evals/", + ) + + args = parser.parse_args() + + now = datetime.now() + timestamp = now.strftime("%Y%m%d_%H%M%S") + + if args.save_output: + os.makedirs("evals", exist_ok=True) + + print("šŸš€ Starting DAO Contributor Vetting Test") + print("=" * 60) + print(f"DAO ID: {args.dao_id}") + print(f"Max Contributors: {args.max_contributors}") + print(f"Model: {args.model or 'default'}") + print(f"Temperature: {args.temperature}") + print(f"Debug Level: {args.debug_level}") + print(f"Save Output: {args.save_output}") + print("=" * 60) + + # Create single backend instance + backend = get_backend() + + try: + # Fetch DAO + dao_uuid = UUID(args.dao_id) + dao = backend.get_dao(dao_uuid) + if not dao: + print(f"āŒ DAO not found: {args.dao_id}") + sys.exit(1) + print(f"āœ… DAO loaded: {dao.name} (ID: {dao.id})") + + # Fetch all proposals for DAO + proposals = backend.list_proposals(ProposalFilter(dao_id=dao_uuid)) + print(f"šŸ“Š Found {len(proposals)} proposals") + + if not proposals: + print("āŒ No proposals found for DAO. Nothing to vet.") + sys.exit(0) + + # Group by unique contributors (using proposal.creator str) + contributors: Dict[str, List[Dict]] = {} + for p in proposals: + creator = p.creator + if creator: # Skip if no creator + if creator not in contributors: + contributors[creator] = [] + contributors[creator].append({ + "id": str(p.id), + "title": p.title or "Untitled", + "content": p.content[:200] + "..." if p.content and len(p.content) > 200 else p.content or "", + "status": str(p.status), + "passed": p.passed, + "executed": p.executed, + }) + + contributor_list = list(contributors.items()) + if args.max_contributors > 0: + contributor_list = contributor_list[:args.max_contributors] + print(f"šŸ‘„ Unique contributors to vet: {len(contributor_list)}") + + results = [] + for index, (contributor_id, proposals) in enumerate(contributor_list, 1): + contributor_data = { + "name": contributor_id, # Use ID as name fallback + "proposals": proposals, + } + result = asyncio.run( + vet_single_contributor(contributor_id, contributor_data, dao, args, index, timestamp, backend) + ) + results.append(result) + + # Reset logging + reset_logging() + + generate_summary(results, timestamp, args.save_output, str(dao_uuid)) + + print("\nšŸŽ‰ Contributor vetting test completed!") + + finally: + # Clean up backend + backend.sqlalchemy_engine.dispose() + +if __name__ == "__main__": + main() From 41aeb5b3c302932fb33fb6dd7cdaf8d1943c9173 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:04:40 -0700 Subject: [PATCH 02/20] feat: use DAO name instead of ID and add dry-run flag Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- scripts/test_contributor_vetting.py | 43 ++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/scripts/test_contributor_vetting.py b/scripts/test_contributor_vetting.py index c9862722..2f5255b4 100644 --- a/scripts/test_contributor_vetting.py +++ b/scripts/test_contributor_vetting.py @@ -25,7 +25,7 @@ from app.lib.logger import StructuredFormatter, setup_uvicorn_logging from app.backend.factory import get_backend -from app.backend.models import ProposalFilter, DAO +from app.backend.models import ProposalFilter, DAO, DAOFilter from app.services.ai.simple_workflows.llm import invoke_structured from app.services.ai.simple_workflows.prompts.loader import load_prompt # Optional, for future from pydantic import BaseModel, Field @@ -225,7 +225,7 @@ def generate_summary( "results": results, # Raw array of result_dicts } - print(f"Vetting Summary - {timestamp} (DAO: {dao_id})") + print(f"Vetting Summary - {timestamp} (DAO ID: {dao_id})") print("=" * 60) print(f"Total Contributors: {len(results)}") print(f"Allow: {allow_count} | Block: {block_count} | Errors: {error_count}") @@ -244,19 +244,22 @@ def main(): formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" Examples: + # Dry run: list contributors without evaluating + python test_contributor_vetting.py --dao-name "MyDAO" --dry-run + # Vet all contributors for a DAO - python test_contributor_vetting.py --dao-id "12345678-1234-5678-9012-123456789abc" --save-output + python test_contributor_vetting.py --dao-name "MyDAO" --save-output # Limit to top 10 with custom model - python test_contributor_vetting.py --dao-id "DAO_ID" --max-contributors 10 --model "x-ai/grok-beta" --temperature 0.1 + python test_contributor_vetting.py --dao-name "MyDAO" --max-contributors 10 --model "x-ai/grok-beta" --temperature 0.1 """, ) parser.add_argument( - "--dao-id", + "--dao-name", type=str, required=True, - help="UUID of the DAO to vet contributors for", + help="Name of the DAO to vet contributors for", ) parser.add_argument( @@ -266,6 +269,12 @@ def main(): help="Max contributors to vet (default: 50, 0=unlimited)", ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Dry run: list contributors without performing LLM evaluations", + ) + parser.add_argument( "--model", type=str, @@ -304,8 +313,9 @@ def main(): print("šŸš€ Starting DAO Contributor Vetting Test") print("=" * 60) - print(f"DAO ID: {args.dao_id}") + print(f"DAO Name: {args.dao_name}") print(f"Max Contributors: {args.max_contributors}") + print(f"Dry Run: {args.dry_run}") print(f"Model: {args.model or 'default'}") print(f"Temperature: {args.temperature}") print(f"Debug Level: {args.debug_level}") @@ -316,16 +326,16 @@ def main(): backend = get_backend() try: - # Fetch DAO - dao_uuid = UUID(args.dao_id) - dao = backend.get_dao(dao_uuid) - if not dao: - print(f"āŒ DAO not found: {args.dao_id}") + # Fetch DAO by name + daos = backend.list_daos(DAOFilter(name=args.dao_name)) + if not daos: + print(f"āŒ DAO '{args.dao_name}' not found.") sys.exit(1) + dao = daos[0] print(f"āœ… DAO loaded: {dao.name} (ID: {dao.id})") # Fetch all proposals for DAO - proposals = backend.list_proposals(ProposalFilter(dao_id=dao_uuid)) + proposals = backend.list_proposals(ProposalFilter(dao_id=dao.id)) print(f"šŸ“Š Found {len(proposals)} proposals") if not proposals: @@ -353,6 +363,13 @@ def main(): contributor_list = contributor_list[:args.max_contributors] print(f"šŸ‘„ Unique contributors to vet: {len(contributor_list)}") + if args.dry_run: + print("\n--- DRY RUN: Contributors that would be vetted ---") + for index, (contributor_id, proposals_list) in enumerate(contributor_list, 1): + print(f" {index}. {contributor_id} ({len(proposals_list)} proposals)") + print("Dry run complete. No LLM evaluations performed.\n") + sys.exit(0) + results = [] for index, (contributor_id, proposals) in enumerate(contributor_list, 1): contributor_data = { From fb636d985e48904c774a47515f8dc99e73c0b88e Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:08:46 -0700 Subject: [PATCH 03/20] fix: implement direct OpenRouter call in contributor vetting test Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- scripts/test_contributor_vetting.py | 87 +++++++++++++++++++++++++++-- 1 file changed, 83 insertions(+), 4 deletions(-) diff --git a/scripts/test_contributor_vetting.py b/scripts/test_contributor_vetting.py index 2f5255b4..4d2ddadf 100644 --- a/scripts/test_contributor_vetting.py +++ b/scripts/test_contributor_vetting.py @@ -28,6 +28,8 @@ from app.backend.models import ProposalFilter, DAO, DAOFilter from app.services.ai.simple_workflows.llm import invoke_structured from app.services.ai.simple_workflows.prompts.loader import load_prompt # Optional, for future +from app.config import config +import httpx from pydantic import BaseModel, Field # Custom Pydantic model for structured LLM output @@ -165,8 +167,8 @@ async def vet_single_contributor( }, ] - # Invoke LLM with structured output - result = await invoke_structured( + # Call OpenRouter directly with structured JSON parsing (mirrors evaluation_openrouter_v2.py) + openrouter_response = await call_openrouter_structured( messages, ContributorVettingOutput, model=args.model, @@ -177,7 +179,8 @@ async def vet_single_contributor( "contributor_id": contributor_id, "contributor_data": contributor_data, "dao_id": str(dao.id), - "vetting_output": result.model_dump(), + "vetting_output": openrouter_response.model_dump(), + "usage": getattr(openrouter_response, "usage", None), } # Save JSON if requested @@ -384,7 +387,7 @@ def main(): # Reset logging reset_logging() - generate_summary(results, timestamp, args.save_output, str(dao_uuid)) + generate_summary(results, timestamp, args.save_output, str(dao.id)) print("\nšŸŽ‰ Contributor vetting test completed!") @@ -392,5 +395,81 @@ def main(): # Clean up backend backend.sqlalchemy_engine.dispose() + +async def call_openrouter_structured( + messages: List[Dict[str, Any]], + output_model: type[BaseModel], + model: Optional[str] = None, + temperature: Optional[float] = None, +) -> ContributorVettingOutput: + """Direct OpenRouter API call with JSON parsing and Pydantic validation (adapted from evaluation_openrouter_v2.py).""" + config_data = { + "api_key": config.chat_llm.api_key, + "model": model or config.chat_llm.default_model, + "temperature": temperature or config.chat_llm.default_temperature, + "base_url": config.chat_llm.api_base, + } + + payload = { + "messages": messages, + "model": config_data["model"], + "temperature": config_data["temperature"], + } + + headers = { + "Authorization": f"Bearer {config_data['api_key']}", + "HTTP-Referer": "https://aibtc.com", + "X-Title": "AIBTC", + "Content-Type": "application/json", + } + + print(f"šŸ“” Calling OpenRouter: {config_data['model']} (temp={config_data['temperature']:.1f})") + + async with httpx.AsyncClient(timeout=120.0) as client: + response = await client.post( + f"{config_data['base_url']}/chat/completions", + json=payload, + headers=headers, + ) + response.raise_for_status() + + data = response.json() + choices = data.get("choices", []) + if not choices: + raise ValueError("No choices in OpenRouter response") + + choice_message = choices[0].get("message") + if not choice_message or not isinstance(choice_message.get("content"), str): + raise ValueError("Invalid message content in response") + + try: + # Parse strict JSON from content + evaluation_json = json.loads(choice_message["content"]) + + # Extract usage + usage = data.get("usage", {}) + input_tokens = usage.get("prompt_tokens") + output_tokens = usage.get("completion_tokens") + usage_info = { + "input_tokens": input_tokens, + "output_tokens": output_tokens, + } if input_tokens is not None and output_tokens is not None else None + + # Validate with Pydantic + add usage + result = output_model(**evaluation_json) + if usage_info: + # Monkey-patch usage to model instance (for summary/export) + object.__setattr__(result, "usage", usage_info) + + print(f"āœ… OpenRouter success: {result.decision} (conf: {result.confidence_score:.2f})") + return result + + except json.JSONDecodeError as e: + print(f"āŒ JSON decode error: {e}\nRaw content: {choice_message['content'][:500]}...") + raise + except ValueError as e: + print(f"āŒ Pydantic validation error: {e}") + raise + if __name__ == "__main__": main() From fb361c2f9df1debb46900efb2e97b8983642677b Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:11:38 -0700 Subject: [PATCH 04/20] docs: update docstring to use dao-name instead of dao-id Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- scripts/test_contributor_vetting.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/test_contributor_vetting.py b/scripts/test_contributor_vetting.py index 4d2ddadf..07f34dc5 100644 --- a/scripts/test_contributor_vetting.py +++ b/scripts/test_contributor_vetting.py @@ -6,8 +6,9 @@ evaluates each for future contribution eligibility (allow/block), and saves raw JSON results. Usage: - python test_contributor_vetting.py --dao-id "123e4567-e89b-12d3-a456-426614174000" - python test_contributor_vetting.py --dao-id "DAO_ID" --max-contributors 20 --save-output --model "x-ai/grok-beta" + python test_contributor_vetting.py --dao-name "MyDAO" --dry-run + python test_contributor_vetting.py --dao-name "MyDAO" --save-output + python test_contributor_vetting.py --dao-name "MyDAO" --max-contributors 20 --model "x-ai/grok-beta" """ import argparse From 77248aeb312f83b5b1ab302e3d3b844ed8f2be85 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:18:07 -0700 Subject: [PATCH 05/20] fix: update prompting, formatting --- scripts/test_contributor_vetting.py | 151 ++++++++++++++++++++-------- 1 file changed, 111 insertions(+), 40 deletions(-) diff --git a/scripts/test_contributor_vetting.py b/scripts/test_contributor_vetting.py index 07f34dc5..1b182e49 100644 --- a/scripts/test_contributor_vetting.py +++ b/scripts/test_contributor_vetting.py @@ -19,7 +19,6 @@ import sys from datetime import datetime from typing import Dict, Any, List, Optional, Literal -from uuid import UUID # Add the parent directory (root) to the path to import from app sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) @@ -27,20 +26,32 @@ from app.lib.logger import StructuredFormatter, setup_uvicorn_logging from app.backend.factory import get_backend from app.backend.models import ProposalFilter, DAO, DAOFilter -from app.services.ai.simple_workflows.llm import invoke_structured -from app.services.ai.simple_workflows.prompts.loader import load_prompt # Optional, for future from app.config import config import httpx from pydantic import BaseModel, Field + # Custom Pydantic model for structured LLM output class ContributorVettingOutput(BaseModel): - contributor_id: str = Field(description="Unique contributor identifier (e.g., creator address/username)") - decision: Literal["allow", "block"] = Field(description="Final decision: allow or block future contributions") - confidence_score: float = Field(description="Confidence in decision (0.0-1.0)", ge=0.0, le=1.0) - reasoning: str = Field(description="Detailed reasoning with evidence from past proposals (200-400 words)") - proposal_count: int = Field(description="Number of past proposals by this contributor") - notable_proposals: Optional[List[str]] = Field(default=[], description="List of key proposal titles/IDs") + contributor_id: str = Field( + description="Unique contributor identifier (e.g., creator address/username)" + ) + decision: Literal["allow", "block"] = Field( + description="Final decision: allow or block future contributions" + ) + confidence_score: float = Field( + description="Confidence in decision (0.0-1.0)", ge=0.0, le=1.0 + ) + reasoning: str = Field( + description="Detailed reasoning with evidence from past proposals (200-400 words)" + ) + proposal_count: int = Field( + description="Number of past proposals by this contributor" + ) + notable_proposals: Optional[List[str]] = Field( + default=[], description="List of key proposal titles/IDs" + ) + class Tee(object): def __init__(self, *files): @@ -55,6 +66,7 @@ def flush(self): for f in self.files: f.flush() + def reset_logging(): """Reset logging to a clean state with a handler to original sys.stderr.""" root_logger = logging.getLogger() @@ -75,13 +87,16 @@ def reset_logging(): logger.propagate = True setup_uvicorn_logging() # Re-apply any custom setup + def short_uuid(uuid_str: str) -> str: """Get first 8 characters of UUID for file naming.""" return str(uuid_str)[:8] + VETTING_SYSTEM_PROMPT = """DAO CONTRIBUTOR GATEKEEPER You are a strict DAO gatekeeper evaluating if contributors should be allowed future submissions. + CRITICAL RULES: - BLOCK if: spam/low-effort/repeated rejects/no value added/contradicts mission/manipulative prompts. - ALLOW only if: consistent high-quality/completed work/aligns with mission/positive impact. @@ -89,18 +104,32 @@ def short_uuid(uuid_str: str) -> str: - Borderline: BLOCK unless strong positive history. - Ignore future promises; only past performance matters. -Output STRICT JSON ONLY. No extra text.""" +Your output MUST follow this EXACT structure: + +{{ + "contributor_id": "", + "decision": "", + "confidence_score": , + "reasoning": "", + "proposal_count": , + "notable_proposals": [""] +}} + +GUIDELINES +- Use only the specified JSON format; no extra fields or text.""" VETTING_USER_PROMPT_TEMPLATE = """Evaluate contributor eligibility for future DAO contributions: -DAO: {dao_name} (Mission: {dao_mission}) +DAO INFO: includes AIBTC charter and current order +{dao_info_for_evaluation} + +Contributor: {contributor_name} -Contributor: {contributor_name} (ID: {contributor_id}) -Past Proposals ({proposal_count}): -{proposals_summary} +USER'S PAST PROPOSALS: (optional) includes past proposals submitted by the user for this DAO +{user_past_proposals_for_evaluation} + +Output the evaluation as a JSON object, strictly following the system guidelines.""" -DECIDE: allow (proven value) or block (risky/low-quality). -Justify with specific evidence.""" async def vet_single_contributor( contributor_id: str, @@ -118,8 +147,12 @@ async def vet_single_contributor( tee_stdout = original_stdout tee_stderr = original_stderr if args.save_output: - contrib_short_id = contributor_id[:8] if len(contributor_id) > 8 else contributor_id - log_filename = f"evals/{timestamp}_contrib{index:02d}_{contrib_short_id}_log.txt" + contrib_short_id = ( + contributor_id[:8] if len(contributor_id) > 8 else contributor_id + ) + log_filename = ( + f"evals/{timestamp}_contrib{index:02d}_{contrib_short_id}_log.txt" + ) log_f = open(log_filename, "w") tee_stdout = Tee(original_stdout, log_f) tee_stderr = Tee(original_stderr, log_f) @@ -149,7 +182,12 @@ async def vet_single_contributor( # Format contributor data proposals = contributor_data.get("proposals", []) proposal_count = len(proposals) - proposals_summary = "\n".join([f"- {p.get('title', 'Untitled')} (ID: {p.get('id', 'N/A')}, Status: {p.get('status', 'Unknown')})" for p in proposals[:10]]) # Top 10 + proposals_summary = "\n".join( + [ + f"- {p.get('title', 'Untitled')} (ID: {p.get('id', 'N/A')}, Status: {p.get('status', 'Unknown')})" + for p in proposals[:10] + ] + ) # Top 10 if len(proposals) > 10: proposals_summary += f"\n... and {proposal_count - 10} more." @@ -186,7 +224,9 @@ async def vet_single_contributor( # Save JSON if requested if args.save_output: - json_filename = f"evals/{timestamp}_contrib{index:02d}_{contrib_short_id}_raw.json" + json_filename = ( + f"evals/{timestamp}_contrib{index:02d}_{contrib_short_id}_raw.json" + ) with open(json_filename, "w") as f: json.dump(result_dict, f, indent=2, default=str) print(f"āœ… Results saved to {json_filename} and {log_filename}") @@ -211,12 +251,17 @@ async def vet_single_contributor( if log_f: log_f.close() + def generate_summary( results: List[Dict[str, Any]], timestamp: str, save_output: bool, dao_id: str ) -> None: """Generate a simple summary JSON (raw results array + aggregates).""" - allow_count = sum(1 for r in results if r.get("vetting_output", {}).get("decision") == "allow") - block_count = sum(1 for r in results if r.get("vetting_output", {}).get("decision") == "block") + allow_count = sum( + 1 for r in results if r.get("vetting_output", {}).get("decision") == "allow" + ) + block_count = sum( + 1 for r in results if r.get("vetting_output", {}).get("decision") == "block" + ) error_count = sum(1 for r in results if "error" in r) summary = { @@ -242,6 +287,7 @@ def generate_summary( json.dump(summary, f, indent=2, default=str) print(f"āœ… Summary saved to {summary_json}") + def main(): parser = argparse.ArgumentParser( description="Test contributor vetting for a DAO using custom LLM prompts", @@ -353,23 +399,29 @@ def main(): if creator: # Skip if no creator if creator not in contributors: contributors[creator] = [] - contributors[creator].append({ - "id": str(p.id), - "title": p.title or "Untitled", - "content": p.content[:200] + "..." if p.content and len(p.content) > 200 else p.content or "", - "status": str(p.status), - "passed": p.passed, - "executed": p.executed, - }) + contributors[creator].append( + { + "id": str(p.id), + "title": p.title or "Untitled", + "content": p.content[:200] + "..." + if p.content and len(p.content) > 200 + else p.content or "", + "status": str(p.status), + "passed": p.passed, + "executed": p.executed, + } + ) contributor_list = list(contributors.items()) if args.max_contributors > 0: - contributor_list = contributor_list[:args.max_contributors] + contributor_list = contributor_list[: args.max_contributors] print(f"šŸ‘„ Unique contributors to vet: {len(contributor_list)}") if args.dry_run: print("\n--- DRY RUN: Contributors that would be vetted ---") - for index, (contributor_id, proposals_list) in enumerate(contributor_list, 1): + for index, (contributor_id, proposals_list) in enumerate( + contributor_list, 1 + ): print(f" {index}. {contributor_id} ({len(proposals_list)} proposals)") print("Dry run complete. No LLM evaluations performed.\n") sys.exit(0) @@ -381,7 +433,15 @@ def main(): "proposals": proposals, } result = asyncio.run( - vet_single_contributor(contributor_id, contributor_data, dao, args, index, timestamp, backend) + vet_single_contributor( + contributor_id, + contributor_data, + dao, + args, + index, + timestamp, + backend, + ) ) results.append(result) @@ -424,7 +484,9 @@ async def call_openrouter_structured( "Content-Type": "application/json", } - print(f"šŸ“” Calling OpenRouter: {config_data['model']} (temp={config_data['temperature']:.1f})") + print( + f"šŸ“” Calling OpenRouter: {config_data['model']} (temp={config_data['temperature']:.1f})" + ) async with httpx.AsyncClient(timeout=120.0) as client: response = await client.post( @@ -451,10 +513,14 @@ async def call_openrouter_structured( usage = data.get("usage", {}) input_tokens = usage.get("prompt_tokens") output_tokens = usage.get("completion_tokens") - usage_info = { - "input_tokens": input_tokens, - "output_tokens": output_tokens, - } if input_tokens is not None and output_tokens is not None else None + usage_info = ( + { + "input_tokens": input_tokens, + "output_tokens": output_tokens, + } + if input_tokens is not None and output_tokens is not None + else None + ) # Validate with Pydantic + add usage result = output_model(**evaluation_json) @@ -462,15 +528,20 @@ async def call_openrouter_structured( # Monkey-patch usage to model instance (for summary/export) object.__setattr__(result, "usage", usage_info) - print(f"āœ… OpenRouter success: {result.decision} (conf: {result.confidence_score:.2f})") + print( + f"āœ… OpenRouter success: {result.decision} (conf: {result.confidence_score:.2f})" + ) return result except json.JSONDecodeError as e: - print(f"āŒ JSON decode error: {e}\nRaw content: {choice_message['content'][:500]}...") + print( + f"āŒ JSON decode error: {e}\nRaw content: {choice_message['content'][:500]}..." + ) raise except ValueError as e: print(f"āŒ Pydantic validation error: {e}") raise + if __name__ == "__main__": main() From 5a47606afdbcc914854c2cbc213a79a02ec9f9f1 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:20:32 -0700 Subject: [PATCH 06/20] feat: update contributor vetting with production proposal formatting and full history Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- scripts/test_contributor_vetting.py | 177 +++++++++++++++++++++------- 1 file changed, 137 insertions(+), 40 deletions(-) diff --git a/scripts/test_contributor_vetting.py b/scripts/test_contributor_vetting.py index 1b182e49..732c6e1d 100644 --- a/scripts/test_contributor_vetting.py +++ b/scripts/test_contributor_vetting.py @@ -25,10 +25,13 @@ from app.lib.logger import StructuredFormatter, setup_uvicorn_logging from app.backend.factory import get_backend -from app.backend.models import ProposalFilter, DAO, DAOFilter +from app.backend.models import ProposalFilter, DAO, DAOFilter, Proposal from app.config import config import httpx from pydantic import BaseModel, Field +from datetime import datetime +from typing import Any, Dict, List +from urllib.parse import urlparse # Custom Pydantic model for structured LLM output @@ -67,6 +70,110 @@ def flush(self): f.flush() +def safe_int_votes(value: Any, default: int = 0) -> int: + """Safely convert value to int, handling None/non-numeric cases.""" + if value is None: + return default + try: + return int(value) + except (ValueError, TypeError): + return default + + +def format_proposals_for_context(proposals: List[Dict[str, Any]]) -> str: + """Format proposals for context in evaluation prompt (adapted from evaluation_openrouter_v2.py).""" + if not proposals: + return "None Found." + + # Sort by created_at descending (newest first) + def get_created_at(p: Dict[str, Any]): + created_at = p.get("created_at") + if created_at: + try: + # Handle ISO format + if isinstance(created_at, str): + return datetime.fromisoformat(created_at.replace("Z", "+00:00")) + except ValueError: + pass + return datetime.min + + sorted_proposals = sorted(proposals, key=get_created_at, reverse=True) + + formatted_proposals = [] + for proposal in sorted_proposals: + # Extract basic info + proposal_id = str(proposal.get("proposal_id") or proposal.get("id", ""))[:8] # Short ID + title = proposal.get("title", "Untitled") + + # Extract x_handle from x_url + x_url = proposal.get("x_url", "") + x_handle = "unknown" + if x_url: + try: + parsed_path = urlparse(x_url).path.split("/") + if len(parsed_path) > 1: + x_handle = parsed_path[1] + except (AttributeError, IndexError): + pass + + # Get creation info + created_at_btc = proposal.get("created_btc") + created_at_timestamp = proposal.get("created_at") + + created_str = "unknown" + if created_at_timestamp: + try: + if isinstance(created_at_timestamp, str): + created_str = created_at_timestamp[:10] + else: + created_str = str(created_at_timestamp)[:10] + except (AttributeError, ValueError): + created_str = str(created_at_timestamp) + + if created_at_btc and created_at_timestamp: + created_at = f"BTC Block {created_at_btc} (at {created_str})" + elif created_at_btc: + created_at = f"BTC Block {created_at_btc}" + elif created_at_timestamp: + created_at = created_str + else: + created_at = "unknown" + + # Get status + proposal_status = proposal.get("status") + passed = proposal.get("passed", False) + concluded = proposal.get("concluded_by") is not None + yes_votes = safe_int_votes(proposal.get("votes_for", 0)) + no_votes = safe_int_votes(proposal.get("votes_against", 0)) + + if ( + proposal_status + and isinstance(proposal_status, str) + and proposal_status == "FAILED" + ): + proposal_passed = "n/a (failed tx)" + elif passed: + proposal_passed = "yes" + elif concluded: + proposal_passed = "no" + else: + proposal_passed = "pending" + + # handle special case of no votes + if concluded and (yes_votes + no_votes == 0): + proposal_passed = "n/a (no votes)" + + # Get content + content = proposal.get("summary") or proposal.get("content", "") + content_preview = content[:500] + "..." if len(content) > 500 else content + + formatted_proposal = f"""\n- #{proposal_id} by @{x_handle} Created: {created_at} Passed: {proposal_passed} Title: {title} Summary: {content_preview}""" + + formatted_proposals.append(formatted_proposal) + + return "\n".join(formatted_proposals) + + def reset_logging(): """Reset logging to a clean state with a handler to original sys.stderr.""" root_logger = logging.getLogger() @@ -120,12 +227,12 @@ def short_uuid(uuid_str: str) -> str: VETTING_USER_PROMPT_TEMPLATE = """Evaluate contributor eligibility for future DAO contributions: -DAO INFO: includes AIBTC charter and current order +DAO INFO: includes DAO name and mission {dao_info_for_evaluation} -Contributor: {contributor_name} +CONTRIBUTOR ID: {contributor_id} -USER'S PAST PROPOSALS: (optional) includes past proposals submitted by the user for this DAO +CONTRIBUTOR'S PAST PROPOSALS: includes all past proposals submitted by this contributor for this DAO {user_past_proposals_for_evaluation} Output the evaluation as a JSON object, strictly following the system guidelines.""" @@ -179,29 +286,26 @@ async def vet_single_contributor( try: print(f"šŸ” Vetting contributor {index}: {contributor_id}") - # Format contributor data + # Format contributor data using production-style helpers proposals = contributor_data.get("proposals", []) proposal_count = len(proposals) - proposals_summary = "\n".join( - [ - f"- {p.get('title', 'Untitled')} (ID: {p.get('id', 'N/A')}, Status: {p.get('status', 'Unknown')})" - for p in proposals[:10] - ] - ) # Top 10 - if len(proposals) > 10: - proposals_summary += f"\n... and {proposal_count - 10} more." + user_past_proposals_for_evaluation = format_proposals_for_context(proposals) + + dao_info = { + "dao_id": str(dao.id), + "name": dao.name or "unknown", + "mission": dao.mission or "unknown", + } + dao_info_for_evaluation = json.dumps(dao_info, default=str) messages = [ {"role": "system", "content": VETTING_SYSTEM_PROMPT}, { "role": "user", "content": VETTING_USER_PROMPT_TEMPLATE.format( - dao_name=dao.name or "Unknown DAO", - dao_mission=dao.mission or "No mission provided", - contributor_name=contributor_data.get("name", contributor_id), + dao_info_for_evaluation=dao_info_for_evaluation, contributor_id=contributor_id, - proposal_count=proposal_count, - proposals_summary=proposals_summary, + user_past_proposals_for_evaluation=user_past_proposals_for_evaluation, ), }, ] @@ -214,9 +318,14 @@ async def vet_single_contributor( temperature=args.temperature, ) + serializable_proposals = [p for p in proposals] # Already dicts from model_dump() result_dict = { "contributor_id": contributor_id, - "contributor_data": contributor_data, + "contributor_data": { + "name": contributor_id, + "proposal_count": proposal_count, + "proposals": serializable_proposals, + }, "dao_id": str(dao.id), "vetting_output": openrouter_response.model_dump(), "usage": getattr(openrouter_response, "usage", None), @@ -392,36 +501,24 @@ def main(): print("āŒ No proposals found for DAO. Nothing to vet.") sys.exit(0) - # Group by unique contributors (using proposal.creator str) - contributors: Dict[str, List[Dict]] = {} + # Group by unique contributors (using proposal.creator str), full history with model_dump for serialization + contributors: Dict[str, List[Dict[str, Any]]] = {} for p in proposals: creator = p.creator if creator: # Skip if no creator if creator not in contributors: contributors[creator] = [] - contributors[creator].append( - { - "id": str(p.id), - "title": p.title or "Untitled", - "content": p.content[:200] + "..." - if p.content and len(p.content) > 200 - else p.content or "", - "status": str(p.status), - "passed": p.passed, - "executed": p.executed, - } - ) + contributors[creator].append(p.model_dump()) - contributor_list = list(contributors.items()) + # Sort contributors by number of proposals descending (most active first) + contributor_list = sorted(contributors.items(), key=lambda x: len(x[1]), reverse=True) if args.max_contributors > 0: - contributor_list = contributor_list[: args.max_contributors] - print(f"šŸ‘„ Unique contributors to vet: {len(contributor_list)}") + contributor_list = contributor_list[:args.max_contributors] + print(f"šŸ‘„ Unique contributors to vet: {len(contributor_list)} (sorted by activity)") if args.dry_run: print("\n--- DRY RUN: Contributors that would be vetted ---") - for index, (contributor_id, proposals_list) in enumerate( - contributor_list, 1 - ): + for index, (contributor_id, proposals_list) in enumerate(contributor_list, 1): print(f" {index}. {contributor_id} ({len(proposals_list)} proposals)") print("Dry run complete. No LLM evaluations performed.\n") sys.exit(0) @@ -430,7 +527,7 @@ def main(): for index, (contributor_id, proposals) in enumerate(contributor_list, 1): contributor_data = { "name": contributor_id, # Use ID as name fallback - "proposals": proposals, + "proposals": proposals, # Full history dicts } result = asyncio.run( vet_single_contributor( From 19e6768fb19f782d5e34536df490dac5408a2bbb Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:22:07 -0700 Subject: [PATCH 07/20] feat: add system prompt to summary and filled user prompts to results Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- scripts/test_contributor_vetting.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/test_contributor_vetting.py b/scripts/test_contributor_vetting.py index 732c6e1d..ba733992 100644 --- a/scripts/test_contributor_vetting.py +++ b/scripts/test_contributor_vetting.py @@ -310,6 +310,8 @@ async def vet_single_contributor( }, ] + user_prompt_filled = messages[1]["content"] + # Call OpenRouter directly with structured JSON parsing (mirrors evaluation_openrouter_v2.py) openrouter_response = await call_openrouter_structured( messages, @@ -327,6 +329,7 @@ async def vet_single_contributor( "proposals": serializable_proposals, }, "dao_id": str(dao.id), + "user_prompt_filled": user_prompt_filled, "vetting_output": openrouter_response.model_dump(), "usage": getattr(openrouter_response, "usage", None), } @@ -349,6 +352,7 @@ async def vet_single_contributor( "contributor_id": contributor_id, "contributor_data": contributor_data, "dao_id": str(dao.id), + "user_prompt_filled": None, "error": error_msg, } @@ -376,6 +380,7 @@ def generate_summary( summary = { "timestamp": timestamp, "dao_id": dao_id, + "system_prompt": VETTING_SYSTEM_PROMPT, "total_contributors": len(results), "allow_count": allow_count, "block_count": block_count, From faea7f31d56d365c66c4076a0e413089e34afc84 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:46:07 -0700 Subject: [PATCH 08/20] feat: add contributor vetting viewer and manifest generator Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- scripts/generate_vettings_manifest.py | 41 ++ vetting_viewer.html | 604 ++++++++++++++++++++++++++ 2 files changed, 645 insertions(+) create mode 100644 scripts/generate_vettings_manifest.py create mode 100644 vetting_viewer.html diff --git a/scripts/generate_vettings_manifest.py b/scripts/generate_vettings_manifest.py new file mode 100644 index 00000000..22dfdc11 --- /dev/null +++ b/scripts/generate_vettings_manifest.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 +""" +Utility script to generate or update vettings-manifest.json based on contents of ./evals/. +Scans for files matching *_summary_dao*_vetting.json and creates a manifest with path and name (timestamp). +""" + +import json +import os +import re +from datetime import datetime + + +def generate_manifest(evals_dir="./evals", manifest_path="./evals/vettings-manifest.json"): + """Generate manifest from vetting summary JSON files in evals_dir.""" + manifest = [] + timestamp_pattern = re.compile(r"^(\d{8}_\d{6})_summary_dao.*_vetting\.json$") + for filename in os.listdir(evals_dir): + match = timestamp_pattern.match(filename) + if match: + timestamp_str = match.group(1) # YYYYMMDD_HHMMSS + try: + timestamp = datetime.strptime(timestamp_str, "%Y%m%d_%H%M%S") + name = timestamp.strftime("%Y-%m-%d %H:%M:%S") + except ValueError: + name = filename + manifest.append({"path": f"./evals/{filename}", "name": name}) + + # Sort by timestamp descending + manifest.sort(key=lambda x: x["name"], reverse=True) + + os.makedirs(os.path.dirname(manifest_path), exist_ok=True) + with open(manifest_path, "w") as f: + json.dump(manifest, f, indent=2) + + print( + f"āœ… Vettings manifest generated/updated at {manifest_path} with {len(manifest)} entries." + ) + + +if __name__ == "__main__": + generate_manifest() diff --git a/vetting_viewer.html b/vetting_viewer.html new file mode 100644 index 00000000..81f8df3b --- /dev/null +++ b/vetting_viewer.html @@ -0,0 +1,604 @@ + + + + + + Contributor Vetting Viewer V1 + + + + + + + + + +
+ +
+

Contributor Vetting Viewer V1

+

+ Analyze contributor vetting results from JSON reports in ./evals/. View per-contributor details, + decisions, reasoning, confidence, past proposals, prompts, and aggregates for prompt tuning. +

+
+ + +
+ + +
+ + + +
+ + +
+ +
+ + +
+
+ + +
+
+ + + + + + + From 9f554836c37fe70b4ec7ff5dc8eb50b23112c165 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:49:21 -0700 Subject: [PATCH 09/20] feat: show details by default and add explorer links for contributors Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- vetting_viewer.html | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/vetting_viewer.html b/vetting_viewer.html index 81f8df3b..48467ab7 100644 --- a/vetting_viewer.html +++ b/vetting_viewer.html @@ -322,10 +322,11 @@

Contributor Vetting Viewer V1

if (arr.length === 0) return ""; return arr.map((contrib, idx) => { const labelCell = idx === 0 ? `${label}` : ""; + const explorerUrl = `https://explorer.hiro.so/address/${contrib.contributor_id}?chain=testnet`; return ` ${labelCell} - ${contrib.short_id} + ${contrib.short_id} ${contrib.proposal_count} ${contrib.confidence.toFixed(2)} @@ -458,6 +459,7 @@

Top Blocked

contentDiv.appendChild(card); }); wrapper.appendChild(contentDiv); + contentDiv.style.display = "block"; container.appendChild(wrapper); // Restore expansion @@ -499,15 +501,17 @@

Top Blocked

const decisionClass = contrib.decision === true ? "text-green-600 border-green-600" : contrib.decision === false ? "text-red-600 border-red-600" : "text-gray-600 border-gray-600"; const decisionText = contrib.decision === true ? "ALLOW" : contrib.decision === false ? "BLOCK" : "ERROR"; + const explorerUrl = `https://explorer.hiro.so/address/${contrib.contributor_id}?chain=testnet`; title.innerHTML = ` - ${contrib.short_id} - ${decisionText} (Conf: ${contrib.confidence.toFixed(2)}) - ${contrib.proposal_count} Proposals + ${contrib.short_id} - ${decisionText} (Conf: ${contrib.confidence.toFixed(2)}) - ${contrib.proposal_count} Proposals `; section.appendChild(title); + title.querySelector(".chevron").classList.add("rotate-90"); const details = document.createElement("div"); details.className = "details space-y-4"; - details.style.display = "none"; + details.style.display = "block"; title.addEventListener("click", () => { const collapsed = details.style.display === "none"; From 6f2e5fd74b3131eeda92fb857fc2cfdb23768ee5 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:51:24 -0700 Subject: [PATCH 10/20] refactor: simplify contributor tables by removing category column Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- vetting_viewer.html | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/vetting_viewer.html b/vetting_viewer.html index 48467ab7..89151d92 100644 --- a/vetting_viewer.html +++ b/vetting_viewer.html @@ -318,15 +318,13 @@

Contributor Vetting Viewer V1

.sort((a, b) => b.confidence - a.confidence) .slice(0, 10); - const createRows = (label, arr) => { + const createRows = (arr) => { if (arr.length === 0) return ""; - return arr.map((contrib, idx) => { - const labelCell = idx === 0 ? `${label}` : ""; + return arr.map((contrib) => { const explorerUrl = `https://explorer.hiro.so/address/${contrib.contributor_id}?chain=testnet`; return ` - ${labelCell} - ${contrib.short_id} + ${contrib.contributor_id} ${contrib.proposal_count} ${contrib.confidence.toFixed(2)} @@ -356,15 +354,15 @@

Overall Statistics

Top Allowed

- - ${createRows("Top Allowed", topAllowed)} + + ${createRows(topAllowed)}
CategoryContributorProposalsConfidence
ContributorProposalsConfidence

Top Blocked

- - ${createRows("Top Blocked", topBlocked)} + + ${createRows(topBlocked)}
CategoryContributorProposalsConfidence
ContributorProposalsConfidence
From 2e3ad07191be860530d15adf18c1ec5a783191da Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:52:37 -0700 Subject: [PATCH 11/20] feat: add raw JSON display to contributor details Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- vetting_viewer.html | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/vetting_viewer.html b/vetting_viewer.html index 89151d92..96bce2f9 100644 --- a/vetting_viewer.html +++ b/vetting_viewer.html @@ -234,6 +234,7 @@

Contributor Vetting Viewer V1

notable_proposals: [], proposals: item.contributor_data?.proposals || [], usage: null, + raw_result: item, }; } @@ -249,6 +250,7 @@

Contributor Vetting Viewer V1

proposals: item.contributor_data?.proposals || [], user_prompt_filled: item.user_prompt_filled || "", usage: item.usage || null, + raw_result: item, }; }); @@ -597,7 +599,15 @@

Past Proposals (${contrib.proposals.length})

`; - details.innerHTML = outcomeHtml + reasoningHtml + notableHtml + promptsHtml + usageHtml + proposalsHtml; + // Raw JSON + const rawHtml = contrib.raw_result ? ` +
+ Raw JSON Result +
${escapeHtml(JSON.stringify(contrib.raw_result, null, 2))}
+
+ ` : ""; + + details.innerHTML = outcomeHtml + reasoningHtml + notableHtml + promptsHtml + usageHtml + proposalsHtml + rawHtml; section.appendChild(details); return section; } From 0dcc0fc966752126afbe2131e267585a1539bbfd Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:55:19 -0700 Subject: [PATCH 12/20] feat: enhance proposals table with details toggle and more info Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- vetting_viewer.html | 43 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/vetting_viewer.html b/vetting_viewer.html index 96bce2f9..293fc52d 100644 --- a/vetting_viewer.html +++ b/vetting_viewer.html @@ -156,6 +156,14 @@

Contributor Vetting Viewer V1

const vettingNames = new Map(); let currentVettings = []; + // Proposal details toggle + function toggleProposalDetails(row) { + const detailsRow = row.nextElementSibling; + if (detailsRow && detailsRow.classList.contains('proposal-details')) { + detailsRow.classList.toggle('hidden'); + } + } + /** * Init: Load manifest, preload reports, render first. */ @@ -566,7 +574,7 @@

Notable Proposals

`; } - // Proposals table + // Proposals table with more details const proposalsHtml = `

Past Proposals (${contrib.proposals.length})

@@ -575,22 +583,39 @@

Past Proposals (${contrib.proposals.length})

ID Title - Status - Passed + Summary Preview + Status / Passed + Tags + X URL ${contrib.proposals.map((p) => { const shortId = (p.proposal_id || p.id || "").slice(0, 8); const title = escapeHtml(p.title || "Untitled"); + const summary = escapeHtml((p.summary || p.content || "").slice(0, 100)) + (p.summary && p.summary.length > 100 ? "..." : ""); const status = escapeHtml(p.status || "Unknown"); - const passed = p.passed ? "Yes" : p.concluded ? "No" : "Pending"; + const passed = p.passed ? "Yes" : (p.concluded_by ? "No" : "Pending"); + const tags = escapeHtml((p.tags || []).slice(0, 3).join(", ")); + const xUrl = p.x_url ? `View` : "N/A"; return ` - - ${shortId} - ${title} - ${status} - ${passed} + + ${shortId} + ${title} + ${summary} + ${status}
${passed} + ${tags}${tags.length > 0 ? '
+' + (p.tags?.length - 3 || 0) + ' more' : ''} + ${xUrl} + + + +
+
Full Summary: ${escapeHtml(p.summary || p.content || "N/A")}
+ ${p.tags ? `
All Tags: ${escapeHtml(p.tags.join(', '))}
` : ''} + ${p.created_at ? `
Created: ${escapeHtml(p.created_at)}
` : ''} + ${p.votes_for !== undefined && p.votes_against !== undefined ? `
Votes: For: ${p.votes_for}, Against: ${p.votes_against}
` : ''} +
+ `; }).join("")} From 8038120f1af4ebdc2ee6881d46a2e0855c5740e2 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 20:57:15 -0700 Subject: [PATCH 13/20] fix: limit proposals to 20, fix event delegation and empty state Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- vetting_viewer.html | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/vetting_viewer.html b/vetting_viewer.html index 293fc52d..36dece77 100644 --- a/vetting_viewer.html +++ b/vetting_viewer.html @@ -156,13 +156,13 @@

Contributor Vetting Viewer V1

const vettingNames = new Map(); let currentVettings = []; - // Proposal details toggle - function toggleProposalDetails(row) { - const detailsRow = row.nextElementSibling; - if (detailsRow && detailsRow.classList.contains('proposal-details')) { - detailsRow.classList.toggle('hidden'); + // Global event delegation for proposal row clicks (works with dynamic content) + document.addEventListener('click', (e) => { + const row = e.target.closest('.proposal-row'); + if (row && row.nextElementSibling && row.nextElementSibling.classList.contains('proposal-details')) { + row.nextElementSibling.classList.toggle('hidden'); } - } + }); /** * Init: Load manifest, preload reports, render first. @@ -437,7 +437,8 @@

Top Blocked

container.innerHTML = ""; if (contributors.length === 0) { - container.innerHTML = `

No contributors match the filter.

`; + container.innerHTML = `

No contributors match the filter.

(Total filtered: 0 / ${currentVettings.contributors.length})

`; + console.warn('No contributors after filter/sort'); return; } @@ -484,6 +485,8 @@

Top Blocked

} } }); + + console.log(`Rendered ${contributors.length} contributor cards`); } /** @@ -590,16 +593,16 @@

Past Proposals (${contrib.proposals.length})

- ${contrib.proposals.map((p) => { + ${contrib.proposals.slice(0, 20).map((p) => { // Limit to 20 to avoid huge tables const shortId = (p.proposal_id || p.id || "").slice(0, 8); const title = escapeHtml(p.title || "Untitled"); - const summary = escapeHtml((p.summary || p.content || "").slice(0, 100)) + (p.summary && p.summary.length > 100 ? "..." : ""); + const summary = escapeHtml((p.summary || p.content || "").slice(0, 100)) + ((p.summary || p.content || "").length > 100 ? "..." : ""); const status = escapeHtml(p.status || "Unknown"); const passed = p.passed ? "Yes" : (p.concluded_by ? "No" : "Pending"); const tags = escapeHtml((p.tags || []).slice(0, 3).join(", ")); const xUrl = p.x_url ? `View` : "N/A"; return ` - + ${shortId} ${title} ${summary} @@ -614,11 +617,13 @@

Past Proposals (${contrib.proposals.length})

${p.tags ? `
All Tags: ${escapeHtml(p.tags.join(', '))}
` : ''} ${p.created_at ? `
Created: ${escapeHtml(p.created_at)}
` : ''} ${p.votes_for !== undefined && p.votes_against !== undefined ? `
Votes: For: ${p.votes_for}, Against: ${p.votes_against}
` : ''} + ${contrib.proposals.length > 20 ? '
... and ' + (contrib.proposals.length - 20) + ' more proposals
' : ''}
`; }).join("")} + ${contrib.proposals.length > 20 ? `Showing first 20 of ${contrib.proposals.length} proposals` : ''} From 06cddc533f393d979004408d6ba994b6f9913bff Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 21:04:30 -0700 Subject: [PATCH 14/20] fix: add fallback loading, error UI, and simplify proposals table Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- vetting_viewer.html | 80 +++++++++++++++++---------------------------- 1 file changed, 30 insertions(+), 50 deletions(-) diff --git a/vetting_viewer.html b/vetting_viewer.html index 36dece77..c2da1bd9 100644 --- a/vetting_viewer.html +++ b/vetting_viewer.html @@ -183,6 +183,24 @@

Contributor Vetting Viewer V1

console.warn("No vettings-manifest.json found; skipping."); } + // Fallback: try known summary file if no manifest + if (manifest.length === 0) { + const fallbackFiles = ['20251213_202652_summary_daoc5d4f63f_vetting.json']; + for (const fname of fallbackFiles) { + try { + const resp = await fetch(`./evals/${fname}`); + if (resp.ok) { + const data = await resp.json(); + const processed = processVettingData(data); + vettingData.set(fname, processed); + vettingNames.set(fname, 'Fallback Report'); + manifest = [{path: `./evals/${fname}`, name: 'Fallback'}]; + break; + } + } catch {} + } + } + // Populate selector const selector = document.getElementById("eval-select"); manifest.forEach((report) => { @@ -405,8 +423,9 @@

Top Blocked

} }); + console.log('Rendering:', {count: currentVettings?.contributors?.length || 0, filter: document.getElementById("contributor-filter-select").value, sort: document.getElementById("sort-select").value}); if (!currentVettings || currentVettings.contributors.length === 0) { - container.innerHTML = ""; + container.innerHTML = `

No contributors loaded

Check console for fetch errors. Run generate_vettings_manifest.py?

`; return; } @@ -577,57 +596,18 @@

Notable Proposals

`; } - // Proposals table with more details + // Simplified proposals table const proposalsHtml = ` -
-

Past Proposals (${contrib.proposals.length})

- - - - - - - - - - - - - ${contrib.proposals.slice(0, 20).map((p) => { // Limit to 20 to avoid huge tables - const shortId = (p.proposal_id || p.id || "").slice(0, 8); - const title = escapeHtml(p.title || "Untitled"); - const summary = escapeHtml((p.summary || p.content || "").slice(0, 100)) + ((p.summary || p.content || "").length > 100 ? "..." : ""); - const status = escapeHtml(p.status || "Unknown"); - const passed = p.passed ? "Yes" : (p.concluded_by ? "No" : "Pending"); - const tags = escapeHtml((p.tags || []).slice(0, 3).join(", ")); - const xUrl = p.x_url ? `View` : "N/A"; - return ` - - - - - - - - - - - - `; - }).join("")} - ${contrib.proposals.length > 20 ? `` : ''} - +
+

Proposals (${contrib.proposals.length})

+
IDTitleSummary PreviewStatus / PassedTagsX URL
${shortId}${title}${summary}${status}
${passed}
${tags}${tags.length > 0 ? '
+' + (p.tags?.length - 3 || 0) + ' more' : ''}
${xUrl}
Showing first 20 of ${contrib.proposals.length} proposals
+ + ${contrib.proposals.slice(0,10).map(p => { + const id = (p.proposal_id || '').slice(0,8); + return ``; + }).join('')}
IDTitle
${id}${escapeHtml(p.title)}
-
- `; + `; // Raw JSON const rawHtml = contrib.raw_result ? ` From b2dc87ae075dcd8a7b8ad9c7949531ad269eb36d Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 21:07:42 -0700 Subject: [PATCH 15/20] fix: ensure proposal ID is string before slicing Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- vetting_viewer.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vetting_viewer.html b/vetting_viewer.html index c2da1bd9..f19fa76e 100644 --- a/vetting_viewer.html +++ b/vetting_viewer.html @@ -603,7 +603,7 @@

Proposals (${contrib.proposals.length})

${contrib.proposals.slice(0,10).map(p => { - const id = (p.proposal_id || '').slice(0,8); + const id = String(p.proposal_id || '').slice(0,8); return ``; }).join('')}
IDTitle
${id}${escapeHtml(p.title)}
From 59217adf63812c871f9594e4bba02de169b4efeb Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 21:09:03 -0700 Subject: [PATCH 16/20] feat: display contributor ID after dot instead of short slice Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- vetting_viewer.html | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/vetting_viewer.html b/vetting_viewer.html index f19fa76e..52f05eb8 100644 --- a/vetting_viewer.html +++ b/vetting_viewer.html @@ -248,10 +248,13 @@

Contributor Vetting Viewer V1

} const contributors = data.results.map((item) => { + const parts = item.contributor_id.split('.'); + const short_id = parts.length > 1 ? parts.slice(1).join('.') : item.contributor_id.slice(0, 8); + if (item.error) { return { contributor_id: item.contributor_id, - short_id: item.contributor_id.slice(0, 8), + short_id, error: item.error, decision: null, confidence: 0, @@ -267,7 +270,7 @@

Contributor Vetting Viewer V1

const vetOut = item.vetting_output; return { contributor_id: item.contributor_id, - short_id: item.contributor_id.slice(0, 8), + short_id, decision: vetOut.decision === "allow", confidence: vetOut.confidence_score, reasoning: vetOut.reasoning, From c7e6b49e38bc0bf7e00d5fb8505d22ae6f252f69 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 21:22:20 -0700 Subject: [PATCH 17/20] feat: add x_handle to vetting output and display in UI Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- scripts/test_contributor_vetting.py | 5 +++++ vetting_viewer.html | 9 ++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/scripts/test_contributor_vetting.py b/scripts/test_contributor_vetting.py index ba733992..464f0479 100644 --- a/scripts/test_contributor_vetting.py +++ b/scripts/test_contributor_vetting.py @@ -39,6 +39,10 @@ class ContributorVettingOutput(BaseModel): contributor_id: str = Field( description="Unique contributor identifier (e.g., creator address/username)" ) + x_handle: Optional[str] = Field( + default=None, + description="Primary X/Twitter handle extracted from contributor's proposals" + ) decision: Literal["allow", "block"] = Field( description="Final decision: allow or block future contributions" ) @@ -215,6 +219,7 @@ def short_uuid(uuid_str: str) -> str: {{ "contributor_id": "", + "x_handle": "", "decision": "", "confidence_score": , "reasoning": "", diff --git a/vetting_viewer.html b/vetting_viewer.html index 52f05eb8..94fcb911 100644 --- a/vetting_viewer.html +++ b/vetting_viewer.html @@ -262,6 +262,7 @@

Contributor Vetting Viewer V1

reasoning: "", notable_proposals: [], proposals: item.contributor_data?.proposals || [], + x_handle: null, usage: null, raw_result: item, }; @@ -277,6 +278,7 @@

Contributor Vetting Viewer V1

proposal_count: vetOut.proposal_count, notable_proposals: vetOut.notable_proposals || [], proposals: item.contributor_data?.proposals || [], + x_handle: vetOut.x_handle || null, user_prompt_filled: item.user_prompt_filled || "", usage: item.usage || null, raw_result: item, @@ -356,6 +358,7 @@

Contributor Vetting Viewer V1

return ` ${contrib.contributor_id} + ${contrib.x_handle || 'N/A'} ${contrib.proposal_count} ${contrib.confidence.toFixed(2)} @@ -385,14 +388,14 @@

Overall Statistics

Top Allowed

- + ${createRows(topAllowed)}
ContributorProposalsConfidence
ContributorX HandleProposalsConfidence

Top Blocked

- + ${createRows(topBlocked)}
ContributorProposalsConfidence
ContributorX HandleProposalsConfidence
@@ -536,7 +539,7 @@

Top Blocked

const decisionText = contrib.decision === true ? "ALLOW" : contrib.decision === false ? "BLOCK" : "ERROR"; const explorerUrl = `https://explorer.hiro.so/address/${contrib.contributor_id}?chain=testnet`; title.innerHTML = ` - ${contrib.short_id} - ${decisionText} (Conf: ${contrib.confidence.toFixed(2)}) - ${contrib.proposal_count} Proposals + ${contrib.short_id}${contrib.x_handle ? ` (@${contrib.x_handle})` : ''} - ${decisionText} (Conf: ${contrib.confidence.toFixed(2)}) - ${contrib.proposal_count} Proposals `; section.appendChild(title); From de7a8c76e065d123af5b8ceb9dc7f56564e866d2 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 21:30:19 -0700 Subject: [PATCH 18/20] feat: add manifest generation after vetting test Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- scripts/test_contributor_vetting.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/test_contributor_vetting.py b/scripts/test_contributor_vetting.py index 464f0479..b81733d9 100644 --- a/scripts/test_contributor_vetting.py +++ b/scripts/test_contributor_vetting.py @@ -33,6 +33,8 @@ from typing import Any, Dict, List from urllib.parse import urlparse +from scripts.generate_vettings_manifest import generate_manifest + # Custom Pydantic model for structured LLM output class ContributorVettingOutput(BaseModel): @@ -557,6 +559,8 @@ def main(): generate_summary(results, timestamp, args.save_output, str(dao.id)) + generate_manifest() + print("\nšŸŽ‰ Contributor vetting test completed!") finally: From b766b60edc7060849b4ea7c53dac086c84991568 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 21:32:20 -0700 Subject: [PATCH 19/20] feat: prioritize X handle in title and add agent account line Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- vetting_viewer.html | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/vetting_viewer.html b/vetting_viewer.html index 94fcb911..85b422c6 100644 --- a/vetting_viewer.html +++ b/vetting_viewer.html @@ -534,17 +534,29 @@

Top Blocked

section.dataset.contributorId = contrib.contributor_id; const title = document.createElement("h2"); title.className = "text-xl font-bold text-gray-800 mb-4 cursor-pointer flex justify-between items-center"; - const decisionClass = contrib.decision === true ? "text-green-600 border-green-600" : - contrib.decision === false ? "text-red-600 border-red-600" : "text-gray-600 border-gray-600"; + const decisionClass = contrib.decision === true ? "text-green-600" : + contrib.decision === false ? "text-red-600" : "text-gray-600"; const decisionText = contrib.decision === true ? "ALLOW" : contrib.decision === false ? "BLOCK" : "ERROR"; const explorerUrl = `https://explorer.hiro.so/address/${contrib.contributor_id}?chain=testnet`; + + const displayName = contrib.x_handle || contrib.short_id; + const nameLinkUrl = contrib.x_handle ? `https://x.com/${contrib.x_handle}` : explorerUrl; + const nameLinkTitle = contrib.x_handle ? `View on X: @${contrib.x_handle}` : `Full address: ${contrib.contributor_id}`; + const nameLinkClass = contrib.x_handle ? `${decisionClass} hover:underline` : `${decisionClass} hover:underline font-mono`; + title.innerHTML = ` - ${contrib.short_id}${contrib.x_handle ? ` (@${contrib.x_handle})` : ''} - ${decisionText} (Conf: ${contrib.confidence.toFixed(2)}) - ${contrib.proposal_count} Proposals + ${displayName} - ${decisionText} (Conf: ${contrib.confidence.toFixed(2)}) - ${contrib.proposal_count} Proposals `; section.appendChild(title); title.querySelector(".chevron").classList.add("rotate-90"); + // Agent account line + const agentAccountDiv = document.createElement("div"); + agentAccountDiv.className = "text-sm text-gray-500 mb-4 pl-1 font-mono border-l-4 border-gray-200"; + agentAccountDiv.innerHTML = `Agent account: ${contrib.contributor_id}`; + section.appendChild(agentAccountDiv); + const details = document.createElement("div"); details.className = "details space-y-4"; details.style.display = "block"; From ad4403bad0fa80ec9771c244288804d78d924634 Mon Sep 17 00:00:00 2001 From: Jason Schrader Date: Sat, 13 Dec 2025 21:33:12 -0700 Subject: [PATCH 20/20] refactor: move agent account info inside collapsible details Co-authored-by: aider (openrouter/x-ai/grok-4.1-fast) --- vetting_viewer.html | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/vetting_viewer.html b/vetting_viewer.html index 85b422c6..51bf444e 100644 --- a/vetting_viewer.html +++ b/vetting_viewer.html @@ -551,12 +551,6 @@

Top Blocked

section.appendChild(title); title.querySelector(".chevron").classList.add("rotate-90"); - // Agent account line - const agentAccountDiv = document.createElement("div"); - agentAccountDiv.className = "text-sm text-gray-500 mb-4 pl-1 font-mono border-l-4 border-gray-200"; - agentAccountDiv.innerHTML = `Agent account: ${contrib.contributor_id}`; - section.appendChild(agentAccountDiv); - const details = document.createElement("div"); details.className = "details space-y-4"; details.style.display = "block"; @@ -567,6 +561,13 @@

Top Blocked

title.querySelector(".chevron").classList.toggle("rotate-90", collapsed); }); + // Agent account + const agentAccountHtml = ` +
+ Agent account: ${contrib.contributor_id} +
+ `; + // Decision badge let outcomeHtml = `

Decision: ${decisionText}

`; if (contrib.error) { @@ -635,7 +636,7 @@

Proposals (${contrib.proposals.length})

` : ""; - details.innerHTML = outcomeHtml + reasoningHtml + notableHtml + promptsHtml + usageHtml + proposalsHtml + rawHtml; + details.innerHTML = agentAccountHtml + outcomeHtml + reasoningHtml + notableHtml + promptsHtml + usageHtml + proposalsHtml + rawHtml; section.appendChild(details); return section; }