diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9521ca3..3f29ef4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -94,6 +94,13 @@ jobs: contents: read steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Configure git identity for graph materialization + run: | + git config user.name "ci-bot" + git config user.email "ci@xyph.dev" - name: Fetch WARP graph refs run: git fetch origin 'refs/warp/xyph-roadmap/writers/*:refs/warp/xyph-roadmap/writers/*' diff --git a/.gitignore b/.gitignore index ea438b4..dce4778 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ coverage/ .codex/ .claude/ .xyph.json +docs/work/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 749b145..46b5064 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,18 @@ All notable changes to XYPH will be documented in this file. ## [Unreleased] +### Added — Work DAG Analysis Suite + +- **`DagAnalysis.ts`** — pure functions for DAG structure analysis: level assignment, DAG width, greedy worker scheduling, transitive reduction/closure, anti-chain decomposition, reverse reachability, and provenance tracing +- **`scripts/generate-work-dag.ts`** — generates comprehensive DAG visualization suite: full/per-campaign/backlog/graveyard SVGs in both LR and TB orientations, plus `work.md` analysis document with topological sort, critical path, parallelism, scheduling, transitive reduction/closure, ancestry/impact, campaign grouping, and anti-chain waves +- **`npm run graph:work`** — runs the generator, outputs to `docs/work/` +- **43 new tests** — unit tests for all DagAnalysis functions (diamond, linear, empty, single-node, isolated-node graphs) + +### Fixed — PR #32 Code Review + +- **DONE tasks inflated scheduling makespan** — `scheduleWorkers` now treats DONE tasks as weight 0, matching `computeCriticalPath` semantics (Codex P1) +- **CI traceability job failure** — added `fetch-depth: 0` and git identity config to traceability workflow; shallow clones lack commit objects needed by git-warp materialization + ### Added — Workflow Infrastructure - **Git hooks** — `scripts/hooks/pre-commit` (lint gate) and `scripts/hooks/pre-push` (test gate); plain shell scripts, no Husky/lint-staged diff --git a/CLAUDE.md b/CLAUDE.md index 385fc57..0400cdc 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -80,6 +80,15 @@ If the types are hard, that means you need to understand the code better. before your branch — fix them. You touched the codebase; you leave it better than you found it. +**NEVER implement graph algorithms in userland:** +- If you find yourself implementing graph algorithms (BFS, DFS, topological sort, + reachability, transitive reduction/closure, level assignment, etc.), **STOP**. +- git-warp probably already does what you need via `graph.traverse.*` or `graph.query()`. +- If git-warp doesn't have the primitive you need, **STOP** and request the user adds + the desired functionality to git-warp. You must never assume that the full DAG can + fit in memory at once — git-warp's traversals are designed to work incrementally + over the commit graph. + ### Project Planning via the Actuator XYPH plans and tracks its own development through the WARP graph. The `xyph-actuator.ts` CLI is the single source of truth for what's been done, diff --git a/docs/assets/work-dag.svg b/docs/assets/work-dag.svg new file mode 100644 index 0000000..fb3ed4a --- /dev/null +++ b/docs/assets/work-dag.svg @@ -0,0 +1,2275 @@ + + + + + + +XYPH + + +cluster_campaign_AGENT + +Milestone 12: Agent Protocol + + +cluster_campaign_BEDROCK + +Milestone 1: Bedrock Foundations + + +cluster_campaign_CLITOOL + +Milestone 10: CLI Tooling + + +cluster_campaign_DASHBOARD + +Milestone 5: WARP Dashboard + + +cluster_campaign_ECOSYSTEM + +Ecosystem + + +cluster_campaign_FORGE + +Milestone 9: Forge + + +cluster_campaign_HEARTBEAT + +Milestone 2: The Heartbeat + + +cluster_campaign_ORACLE + +Milestone 8: Oracle + + +cluster_campaign_SOVEREIGNTY + +Milestone 4: Sovereignty + + +cluster_campaign_SUBMISSION + +Milestone 6: Submission + + +cluster_campaign_TRACE + +Milestone 11: Traceability + + +cluster_campaign_TRIAGE + +Milestone 3: Triage + + +cluster_campaign_WEAVER + +Milestone 7: Weaver + + +cluster_uncategorized + +uncategorized + + +cluster_legend + +Status + + + +task_AGT_001 + +AGT-001 +Agent briefing command: structured ... + + + +task_AGT_002 + +AGT-002 +Agent status command: quick state c... + + + +task_AGT_003 + +AGT-003 +Agent next command: opinionated rec... + + + +task_AGT_004 + +AGT-004 +Agent act command: validated action... + + + +task_AGT_005 + +AGT-005 +Agent log command: session activity... + + + +task_AGT_006 + +AGT-006 +AgentBriefingService + AgentRecomme... + + + +task_AGT_006->task_AGT_002 + + + + + +task_AGT_006->task_AGT_004 + + + + + +task_AGT_006->task_AGT_005 + + + + + +task_AGT_007 + +AGT-007 +Output formatters: JsonFormatter, T... + + + +task_AGT_008 + +AGT-008 +Enhanced inbox command: add --descr... + + + +task_AGT_009 + +AGT-009 +Comment command: agents and humans ... + + + +task_AGT_010 + +AGT-010 +Flag command: mark entities for hum... + + + +task_AGT_009->task_AGT_010 + + + + + +task_AGT_011 + +AGT-011 +Agent submissions command: structur... + + + +task_AGT_012 + +AGT-012 +Agent review command: structured re... + + + +task_AGT_011->task_AGT_012 + + + + + +task_AGT_013 + +AGT-013 +Agent submit command: structured su... + + + +task_agent_context + +agent-context +xyph context <id>: full quest conte... + + + +task_agent_handoff + +agent-handoff +xyph handoff: end-of-session summar... + + + +task_doc_agent_charter + +doc-agent-charter +Implement or retire AGENT_CHARTER.m... + + + +task_BDK_001 + +BDK-001 +Extract foundational docs from chat... + + + +task_BDK_002 + +BDK-002 +Initialize git repo and .gitignore + + + +task_BDK_003 + +BDK-003 +Scaffold directory and actuator + + + +task_vision_doc_polish + +vision-doc-polish +Polish VISION_NORTH_STAR.md — clari... + + + +task_ACT_001 + +ACT-001 +CLI context + error handler + valid... + + + +task_ACT_002 + +ACT-002 +Extract ingest, sovereignty, coordi... + + + +task_ACT_003 + +ACT-003 +Extract artifact, submission, intak... + + + +task_ACT_004 + +ACT-004 +Extract dashboard commands + slim a... + + + +task_ACT_005 + +ACT-005 +Verify build + full test suite pass... + + + +task_BX_001 + +BX-001 +bin/xyph binary: package.json bin f... + + + +task_BX_002 + +BX-002 +Identity resolution: 5-layer preced... + + + +task_BX_001->task_BX_002 + + + + + +task_BX_005 + +BX-005 +xyph (no args) launches TUI dashboa... + + + +task_BX_001->task_BX_005 + + + + + +task_BX_006 + +BX-006 +Sovereignty gate: TTY + /dev/tty co... + + + +task_BX_001->task_BX_006 + + + + + +task_BX_007 + +BX-007 +Fix promote provenance gap: record ... + + + +task_BX_001->task_BX_007 + + + + + +task_BX_008 + +BX-008 +xyph status positional subcommand: ... + + + +task_BX_001->task_BX_008 + + + + + +task_BX_015 + +BX-015 +xyph slice <id>: holographic slice ... + + + +task_BX_001->task_BX_015 + + + + + +task_git_hooks_lifecycle + +git-hooks-lifecycle +Git hooks integration: auto-link co... + + + +task_BX_001->task_git_hooks_lifecycle + + + + + +task_no_tui_mode + +no-tui-mode +Dashboard: offer non-interactive pl... + + + +task_BX_001->task_no_tui_mode + + + + + +task_ide_integration + +ide-integration +IDE integration: VSCode extension +... + + + +task_BX_001->task_ide_integration + + + + + +task_BX_003 + +BX-003 +xyph whoami: print resolved identit... + + + +task_BX_002->task_BX_003 + + + + + +task_multi_user_proof + +multi-user-proof +Multi-user proof: 5+ concurrent wri... + + + +task_BX_002->task_multi_user_proof + + + + + +task_BX_004 + +BX-004 +xyph login/logout: write xyph.ident... + + + +task_BX_003->task_BX_004 + + + + + +task_BX_009 + +BX-009 +xyph history <id>: entity timeline ... + + + +task_BX_010 + +BX-010 +TUI h key: entity timeline modal av... + + + +task_BX_009->task_BX_010 + + + + + +task_BX_011 + +BX-011 +xyph receipts <id>: show tick recei... + + + +task_BX_012 + +BX-012 +xyph seek --tick N / --latest: mate... + + + +task_BX_014 + +BX-014 +xyph diff <tickA> <tickB>: roadmap-... + + + +task_BX_017 + +BX-017 +HistoryPort: new port exposing getE... + + + +task_BX_017->task_BX_009 + + + + + +task_BX_017->task_BX_011 + + + + + +task_BX_017->task_BX_012 + + + + + +task_BX_017->task_BX_014 + + + + + +task_SUB_CLI_002 + +SUB-CLI-002 +Add xyph timeline <submission-id> —... + + + +task_BX_017->task_SUB_CLI_002 + + + + + +task_BX_016 + +BX-016 +TUI provenance panel: property-leve... + + + +task_BX_017->task_BX_016 + + + + + +task_BX_018 + +BX-018 +README: merge redundant offline-fir... + + + +task_BX_019 + +BX-019 +CHANGELOG: split or collapse pre-al... + + + +task_BX_020 + +BX-020 +README: remaining smart-quote/NBSP ... + + + +task_SUB_CLI_001 + +SUB-CLI-001 +Add xyph diff <submission-id> comma... + + + +task_SUB_ID_001 + +SUB-ID-001 +Replace generateId() with proper UL... + + + +task_actuator_theme_destructure + +actuator-theme-destructure +Destructure theme tokens at top of ... + + + +task_advisory_doc_versioning + +advisory-doc-versioning +Advisory docs: auto-expire or link ... + + + +task_cli_api + +cli-api +API layer: --json output mode for C... + + + +task_cli_api->task_AGT_008 + + + + + +task_cli_api->task_AGT_011 + + + + + +task_cli_api->task_AGT_013 + + + + + +task_cli_api->task_agent_context + + + + + +task_cli_api->task_agent_handoff + + + + + +task_cli_api->task_SUB_CLI_001 + + + + + +task_cli_batch + +cli-batch +xyph batch claim/seal: multi-item o... + + + +task_cli_api->task_cli_batch + + + + + +task_cli_diff + +cli-diff +xyph diff [--since <tick|duration>]... + + + +task_cli_api->task_cli_diff + + + + + +task_graph_export_import + +graph-export-import +Graph export/import: portable snaps... + + + +task_cli_api->task_graph_export_import + + + + + +task_cli_api->task_ide_integration + + + + + +task_mcp_server + +mcp-server +MCP server: expose WARP graph as Mo... + + + +task_cli_api->task_mcp_server + + + + + +task_web_ui + +web-ui +Web UI: local air-gapped SPA for br... + + + +task_cli_api->task_web_ui + + + + + +task_cli_assign + +cli-assign +xyph assign <quest> <principal>: di... + + + +task_cli_fuzzy_claim + +cli-fuzzy-claim +Interactive xyph claim/depend with ... + + + +task_cli_move + +cli-move +xyph move <quest> --campaign <id>: ... + + + +task_cli_plan + +cli-plan +xyph plan <campaign>: execution pla... + + + +task_cli_rename_inbox_backlog + +cli-rename-inbox-backlog +Rename inbox CLI command to backlog... + + + +task_cli_show + +cli-show +xyph show <id>: full entity inspect... + + + +task_cli_show->task_agent_context + + + + + +task_cli_show->task_cli_assign + + + + + +task_cli_show->task_cli_fuzzy_claim + + + + + +task_cli_show->task_cli_move + + + + + +task_cli_show->task_cli_plan + + + + + +task_cli_wizard_promote + +cli-wizard-promote +Interactive xyph promote wizard: fi... + + + +task_cli_wizard_quest + +cli-wizard-quest +Interactive xyph quest wizard: filt... + + + +task_cli_wizard_review + +cli-wizard-review +Interactive xyph review wizard: fil... + + + +task_cli_wizard_triage + +cli-wizard-triage +Interactive xyph triage session: lo... + + + +task_coverage_threshold + +coverage-threshold +CI: configure @vitest/coverage-v8 w... + + + +task_lint_unused_interface_fields + +lint-unused-interface-fields +Add stricter lint rule for detectin... + + + +task_coverage_threshold->task_lint_unused_interface_fields + + + + + +task_pre_push_typecheck + +pre-push-typecheck +Git pre-push hook: run tsc --noEmit... + + + +task_coverage_threshold->task_pre_push_typecheck + + + + + +task_cross_adapter_test_stability + +cross-adapter-test-stability +Increase CrossAdapterVisibility tes... + + + +task_doc_data_contracts_rewrite + +doc-data-contracts-rewrite +Rewrite or retire DATA_CONTRACTS.md... + + + +task_doc_graph_schema_rewrite + +doc-graph-schema-rewrite +Rewrite GRAPH_SCHEMA.md: add all 17... + + + +task_docstring_coverage + +docstring-coverage +Improve docstring coverage in CLI c... + + + +task_inline_color_status + +inline-color-status +Inline colorStatus wrapper into sty... + + + +task_style_guide_md040 + +style-guide-md040 +Add language identifiers to fenced ... + + + +task_terminology_lint + +terminology-lint +CI terminology lint: flag non-guild... + + + +task_test_cross_type_depend + +test-cross-type-depend +Test: depend rejects cross-type tas... + + + +task_test_frontier_zero_edges + +test-frontier-zero-edges +Test: computeFrontier with zero dep... + + + +task_theme_preview_command + +theme-preview-command +Add 'xyph-actuator theme --preview'... + + + +task_theme_shared_module + +theme-shared-module +Extract chalk theme utilities to sr... + + + +task_theme_shared_module->task_actuator_theme_destructure + + + + + +task_theme_shared_module->task_inline_color_status + + + + + +task_theme_shared_module->task_theme_preview_command + + + + + +task_vi_stub_env_migration + +vi-stub-env-migration +Migrate resolve.test.ts process.env... + + + +task_BJU_001 + +BJU-001 +Theme bridge: create bijou preset f... + + + +task_BJU_002 + +BJU-002 +Port render-status.ts to bijou comp... + + + +task_BJU_009 + +BJU-009 +Wire graph.watch() into TEA loop fo... + + + +task_BJU_002->task_BJU_009 + + + + + +task_DSH_008 + +DSH-008 +RoadmapView: typeahead search/filte... + + + +task_BJU_002->task_DSH_008 + + + + + +task_dashboard_adapter_error_isolation + +dashboard-adapter-error-isolation +Add per-node error logging to WarpD... + + + +task_BJU_002->task_dashboard_adapter_error_isolation + + + + + +task_dashboard_resize_handler + +dashboard-resize-handler +Add terminal resize handler (SIGWIN... + + + +task_BJU_002->task_dashboard_resize_handler + + + + + +task_e2e_dashboard_smoke + +e2e-dashboard-smoke +Add e2e smoke test for dashboard st... + + + +task_BJU_002->task_e2e_dashboard_smoke + + + + + +task_help_modal_warp_glossary + +help-modal-warp-glossary +Review HelpModal glossary entry for... + + + +task_BJU_002->task_help_modal_warp_glossary + + + + + +task_snapshot_render_regression + +snapshot-render-regression +Add snapshot regression tests for r... + + + +task_BJU_002->task_snapshot_render_regression + + + + + +task_statusline_graph_health + +statusline-graph-health +StatusLine: show graph health indic... + + + +task_BJU_002->task_statusline_graph_health + + + + + +task_tui_logger_unit_tests + +tui-logger-unit-tests +Add unit tests for TuiLogger parent... + + + +task_BJU_002->task_tui_logger_unit_tests + + + + + +task_tui_min_size_guard + +tui-min-size-guard +Dashboard: show friendly message wh... + + + +task_BJU_002->task_tui_min_size_guard + + + + + +task_tui_submission_stepper + +tui-submission-stepper +Stepper component for submission li... + + + +task_BJU_002->task_tui_submission_stepper + + + + + +task_warp_explorer_view + +warp-explorer-view +TUI: WARP Graph Explorer view (writ... + + + +task_BJU_002->task_warp_explorer_view + + + + + +task_BJU_003 + +BJU-003 +TEA app shell: App<DashboardModel> ... + + + +task_BJU_004 + +BJU-004 +Port RoadmapView to pure view funct... + + + +task_BJU_005 + +BJU-005 +Port LineageView to pure view funct... + + + +task_BJU_006 + +BJU-006 +Port AllNodesView to pure view func... + + + +task_BJU_007 + +BJU-007 +Port InboxView to pure view functio... + + + +task_BJU_008 + +BJU-008 +Wire createKeyMap + createInputStac... + + + +task_BJU_010 + +BJU-010 +Remove React/Ink/chalk/cli-table3/b... + + + +task_BJU_009->task_BJU_010 + + + + + +task_BX_013 + +BX-013 +TUI LIVE/PINNED mode: visual indica... + + + +task_BJU_009->task_BX_013 + + + + + +task_tui_toast_watch + +tui-toast-watch +Toast notifications for remote grap... + + + +task_BJU_009->task_tui_toast_watch + + + + + +task_BX_021 + +BX-021 +WarpDashboardAdapter: graceful degr... + + + +task_DSH_001 + +DSH-001 +Fix campaign nodes: type stored as ... + + + +task_DSH_002 + +DSH-002 +Add xyph-actuator campaign command ... + + + +task_DSH_002->task_DSH_001 + + + + + +task_DSH_003 + +DSH-003 +Add xyph-actuator link-intent comma... + + + +task_DSH_002->task_DSH_003 + + + + + +task_DSH_010 + +DSH-010 +Campaign status auto-computed from ... + + + +task_DSH_002->task_DSH_010 + + + + + +task_DSH_004 + +DSH-004 +Fix IngestService test: "should sk... + + + +task_DSH_005 + +DSH-005 +Dashboard auto-refresh: toggle with... + + + +task_DSH_006 + +DSH-006 +Show "last refreshed HH:MM:SS" ti... + + + +task_DSH_007 + +DSH-007 +RoadmapView: show per-campaign comp... + + + +task_DSH_009 + +DSH-009 +Dashboard: g key toggles GRAVEYARD ... + + + +task_GRV_001 + +GRV-001 +Graveyard TUI view: browsable rejec... + + + +task_GRV_001->task_DSH_009 + + + + + +task_GRV_002 + +GRV-002 +Graveyard reopen action: r key send... + + + +task_GRV_001->task_GRV_002 + + + + + +task_GRV_003 + +GRV-003 +Graveyard patterns section: rejecti... + + + +task_GRV_002->task_GRV_003 + + + + + +task_LIN_001 + +LIN-001 +Surface intent description in Inten... + + + +task_LIN_002 + +LIN-002 +Lineage view: intent cards with des... + + + +task_LIN_001->task_LIN_002 + + + + + +task_LIN_003 + +LIN-003 +Lineage view: promote orphan sovere... + + + +task_LIN_002->task_LIN_003 + + + + + +task_OVR_001 + +OVR-001 +Overview redesign: project header w... + + + +task_OVR_002 + +OVR-002 +Overview redesign: in-progress sect... + + + +task_OVR_001->task_OVR_002 + + + + + +task_OVR_003 + +OVR-003 +Overview redesign: campaign progres... + + + +task_OVR_002->task_OVR_003 + + + + + +task_OVR_004 + +OVR-004 +Overview redesign: My Issues panel,... + + + +task_OVR_003->task_OVR_004 + + + + + +task_OVR_005 + +OVR-005 +Change default dashboard view from ... + + + +task_OVR_004->task_OVR_005 + + + + + +task_OVR_006 + +OVR-006 +Dashboard alert bar: sovereignty vi... + + + +task_OVR_005->task_OVR_006 + + + + + +task_OVR_007 + +OVR-007 +Dashboard inbox pressure indicator ... + + + +task_OVR_005->task_OVR_007 + + + + + +task_OVR_008 + +OVR-008 +Dashboard dependency blockers summa... + + + +task_OVR_005->task_OVR_008 + + + + + +task_OVR_009 + +OVR-009 +Dashboard writer activity panel sho... + + + +task_OVR_005->task_OVR_009 + + + + + +task_OVR_010 + +OVR-010 +Dashboard quick actions: claim fron... + + + +task_OVR_005->task_OVR_010 + + + + + +task_OVR_011 + +OVR-011 +Dashboard campaign focus mode: filt... + + + +task_OVR_005->task_OVR_011 + + + + + +task_OVR_012 + +OVR-012 +Rename overview view to dashboard e... + + + +task_OVR_012->task_OVR_001 + + + + + +task_SUB_PERF_001 + +SUB-PERF-001 +WarpSubmissionAdapter.getOpenSubmis... + + + +task_SUB_REFACTOR_001 + +SUB-REFACTOR-001 +Extract submission assembly from Wa... + + + +task_SUB_SAFETY_001 + +SUB-SAFETY-001 +GitWorkspaceAdapter.merge mutates w... + + + +task_SUB_SCHEMA_001 + +SUB-SCHEMA-001 +decision: prefix collision — pre-ex... + + + +task_SUB_SCHEMA_001->task_DSH_002 + + + + + +task_SUB_TUI_001 + +SUB-TUI-001 +Add submissions view to TUI dashboa... + + + +task_SUB_TUI_002 + +SUB-TUI-002 +Show submission status badges in Ro... + + + +task_VOC_001 + +VOC-001 +Vocabulary rename: INBOX→BACKLOG, B... + + + +task_VOC_002 + +VOC-002 +Status normalization layer in Graph... + + + +task_VOC_001->task_VOC_002 + + + + + +task_VOC_003 + +VOC-003 +Promotion as DAG insertion: require... + + + +task_VOC_002->task_VOC_003 + + + + + +task_appframe_migration + +appframe-migration +Migrate DashboardApp to bijou appFr... + + + +task_bijou_dag_renderer + +bijou-dag-renderer +Upstream bijou dag() component: ASC... + + + +task_dag_visualization + +dag-visualization +DAG visualization: SVG via Graphviz... + + + +task_bijou_dag_renderer->task_dag_visualization + + + + + +task_bijou_generic_resolved_theme + +bijou-generic-resolved-theme +Upstream bijou: generic ResolvedThe... + + + +task_bijou_type_guards + +bijou-type-guards +Adopt isKeyMsg()/isResizeMsg() type... + + + +task_bijou_v09_title_refactor + +bijou-v09-title-refactor +Refactor landing view to use bijou ... + + + +task_command_palette + +command-palette +Add command palette to TUI dashboar... + + + +task_confirm_overlay_integration_test + +confirm-overlay-integration-test +Integration test: confirm overlay r... + + + +task_dashboard_focus_clamp_test + +dashboard-focus-clamp-test +Add test for dashboard focusRow cla... + + + +task_dashboard_visibility_constants + +dashboard-visibility-constants +Extract dashboard panel visibility ... + + + +task_doc_tui_plan_update + +doc-tui-plan-update +Update TUI-plan.md: mark phases 4-5... + + + +task_doc_tui_plan_update->task_style_guide_md040 + + + + + +task_graphmeta_drop_tipsha + +graphmeta-drop-tipsha +Remove tipSha from GraphMeta (no lo... + + + +task_ink_fullscreen_pr + +ink-fullscreen-pr +Upstream PR: Ink fullscreen mode (a... + + + +task_inkstatus_type_safety + +inkstatus-type-safety +Evaluate narrowing inkStatus() para... + + + +task_tui_chord_commands + +tui-chord-commands +Vim-style chord commands via bijou ... + + + +task_tui_quest_modal + +tui-quest-modal +Composite modal for quest deep-dive... + + + +task_tui_chord_commands->task_tui_quest_modal + + + + + +task_tui_runscript_tests + +tui-runscript-tests +Add runScript()-based automated tes... + + + +task_upstream_ink_fullscreen + +upstream-ink-fullscreen +Investigate upstreaming alternate-s... + + + +task_worker_thread_loading + +worker-thread-loading +Offload fetchSnapshot to worker_thr... + + + +task_benchmark_large_graphs + +benchmark-large-graphs +Benchmarks: materialize/query perf ... + + + +task_FRG_001 + +FRG-001 +REVIEW phase — human-readable diff ... + + + +task_FRG_002 + +FRG-002 +EMIT phase — PlanPatchArtifact + Ro... + + + +task_FRG_001->task_FRG_002 + + + + + +task_FRG_003 + +FRG-003 +APPLY phase — optimistic concurrenc... + + + +task_FRG_002->task_FRG_003 + + + + + +task_FRG_004 + +FRG-004 +Full pipeline integration test: ING... + + + +task_FRG_003->task_FRG_004 + + + + + +task_HRB_001 + +HRB-001 +Implement Coordinator Heartbeat + + + +task_HRB_002 + +HRB-002 +Implement Phase 1: Ingest + + + +task_HRB_003 + +HRB-003 +Implement Phase 2: Normalize + + + +task_HRB_004 + +HRB-004 +Implement Orchestration FSM + + + +task_ORC_001 + +ORC-001 +CLASSIFY phase — intent classificat... + + + +task_ORC_002 + +ORC-002 +Full MUST/SHOULD/COULD policy engin... + + + +task_ORC_001->task_ORC_002 + + + + + +task_ORC_003 + +ORC-003 +MERGE phase — candidate vs. snapsho... + + + +task_ORC_002->task_ORC_003 + + + + + +task_ORC_004 + +ORC-004 +Anti-chain generation — MECE parall... + + + +task_ORC_003->task_ORC_004 + + + + + +task_SOV_001 + +SOV-001 +intent: node type + xyph-actuator i... + + + +task_SOV_002 + +SOV-002 +Constitutional enforcement — reject... + + + +task_SOV_003 + +SOV-003 +Approval gate node type (Article IV... + + + +task_SOV_004 + +SOV-004 +Guild Seal cryptographic signing on... + + + +task_submission_read_perf + +submission-read-perf +WarpSubmissionAdapter: deduplicate ... + + + +task_TRC_001 + +TRC-001 +Story + requirement node types: act... + + + +task_TRC_002 + +TRC-002 +implements + decomposes-to edge typ... + + + +task_TRC_003 + +TRC-003 +GraphContext + GraphSnapshot extens... + + + +task_TRC_004 + +TRC-004 +status --view trace renderer: requi... + + + +task_TRC_005 + +TRC-005 +Criterion + evidence node types: ac... + + + +task_TRC_006 + +TRC-006 +GraphContext extension for criterio... + + + +task_TRC_007 + +TRC-007 +xyph scan: test annotation parser (... + + + +task_TRC_008 + +TRC-008 +Completeness queries: unmet require... + + + +task_TRC_009 + +TRC-009 +Policy node type + governs edge: De... + + + +task_TRC_010 + +TRC-010 +Computed DONE status: TraceabilityS... + + + +task_TRC_009->task_TRC_010 + + + + + +task_TRC_011 + +TRC-011 +Enforce Definition of Done in seal/... + + + +task_TRC_010->task_TRC_011 + + + + + +task_TRC_012 + +TRC-012 +Constraint, assumption, risk, spike... + + + +task_TRC_013 + +TRC-013 +Gap detection + risk analysis queri... + + + +task_TRC_012->task_TRC_013 + + + + + +task_TRG_001 + +TRG-001 +Promotion review workflow: propose/... + + + +task_TRG_002 + +TRG-002 +Triage policy config: approval coun... + + + +task_TRG_001->task_TRG_002 + + + + + +task_TRG_003 + +TRG-003 +TUI triage view: pending proposals,... + + + +task_TRG_002->task_TRG_003 + + + + + +task_TRG_004 + +TRG-004 +Triage recommendation engine: campa... + + + +task_TRG_003->task_TRG_004 + + + + + +task_TRG_005 + +TRG-005 +Triage report command: structured i... + + + +task_TRG_004->task_TRG_005 + + + + + +task_WVR_001 + +WVR-001 +depends-on / blocked-by edge types ... + + + +task_WVR_002 + +WVR-002 +DAG cycle detection at ingest (hard... + + + +task_WVR_003 + +WVR-003 +Frontier computation — ready set of... + + + +task_WVR_004 + +WVR-004 +Topological sort via Kahn's algorit... + + + +task_WVR_005 + +WVR-005 +Critical path calculation via Dijks... + + + +task_WVR_006 + +WVR-006 +Fix keyIdForAgent() to derive multi... + + + +task_IDEA_HEATMAP + +IDEA-HEATMAP +Traceability heat map: coverage-col... + + + +task_IDEA_SCAN_IMPL + +IDEA-SCAN-IMPL +Extend xyph scan to parse @xyph imp... + + + +task_IDEA_TEMPORAL_TRACE + +IDEA-TEMPORAL-TRACE +Temporal traceability queries via g... + + + +task_auto_graph_push_hook + +auto-graph-push-hook +Post-push hook: auto-push WARP writ... + + + +task_ci_graph_cache + +ci-graph-cache +CI graph materialization cache: Git... + + + +task_dashboard_suggestion_widget + +dashboard-suggestion-widget +Suggestion widget in dashboard righ... + + + +task_roadmap_coverage_badge + +roadmap-coverage-badge +Coverage badge in roadmap view (cri... + + + +task_scan_production_annotations + +scan-production-annotations +Extend xyph scan to production code... + + + +task_self_referential_analyze + +self-referential-analyze +Self-referential analyze: run xyph ... + + + +task_soft_gate_merge + +soft-gate-merge +Soft-gate merge: warn on unmet trac... + + + +task_suggestion_calibrate + +suggestion-calibrate +Suggestion learning loop: xyph anal... + + + +task_suggestion_learning_loop + +suggestion-learning-loop +Suggestion learning loop: auto-cali... + + + +task_temporal_traceability + +temporal-traceability +Temporal traceability queries: CTL*... + + + +task_temporal_traceability_queries + +temporal-traceability-queries +Temporal traceability: use CTL* ope... + + + +task_traceability_heat_map + +traceability-heat-map +TUI traceability heat map: color-co... + + + +task_traceability_heatmap + +traceability-heatmap +Traceability heat map: visual cover... + + + +task_tui_suggestion_tab + +tui-suggestion-tab +Suggestion review tab in TUI (tab 6... + + + +task_agent_briefing + +task_agent_briefing + + + +task_agent_briefing->task_AGT_006 + + + + + +leg_DONE + +DONE + + + +leg_IN_PROGRESS + +IN_PROGRESS + + + + +leg_PLANNED + +PLANNED + + + + +leg_BACKLOG + +BACKLOG + + + + diff --git a/package.json b/package.json index 1754049..8d340b2 100644 --- a/package.json +++ b/package.json @@ -12,6 +12,7 @@ "test:local": "vitest run", "graph:pull": "git fetch origin 'refs/warp/xyph-roadmap/writers/*:refs/warp/xyph-roadmap/writers/*'", "graph:push": "git push origin 'refs/warp/xyph-roadmap/writers/*:refs/warp/xyph-roadmap/writers/*'", + "graph:work": "npx tsx scripts/generate-work-dag.ts", "postinstall": "patch-package" }, "dependencies": { diff --git a/scripts/generate-work-dag.ts b/scripts/generate-work-dag.ts new file mode 100644 index 0000000..df90beb --- /dev/null +++ b/scripts/generate-work-dag.ts @@ -0,0 +1,730 @@ +#!/usr/bin/env -S npx tsx +/** + * Generate the XYPH Work DAG analysis suite. + * + * Produces: + * docs/work/all-hr.svg, all-vert.svg — full DAG in both orientations + * docs/work//-hr/vert.svg — per-campaign views + * docs/work/backlog-hr.svg, backlog-vert.svg + * docs/work/graveyard-hr.svg, graveyard-vert.svg + * docs/work/work.md — full analysis document + * + * Usage: npx tsx scripts/generate-work-dag.ts + */ + +import WarpGraph, { GitGraphAdapter } from '@git-stunts/git-warp'; +import Plumbing from '@git-stunts/plumbing'; +import { execSync } from 'node:child_process'; +import { mkdirSync, writeFileSync } from 'node:fs'; +import { join } from 'node:path'; +import { normalizeQuestStatus } from '../src/domain/entities/Quest.js'; +import { + computeFrontier, + computeTopBlockers, + computeCriticalPath, + type TaskSummary, + type DepEdge, +} from '../src/domain/services/DepAnalysis.js'; +import { + computeLevels, + dagWidth, + scheduleWorkers, + transitiveReduction, + transitiveClosure, + computeAntiChains, + reverseReachability, + computeProvenance, +} from '../src/domain/services/DagAnalysis.js'; + +// --------------------------------------------------------------------------- +// Config +// --------------------------------------------------------------------------- + +const WRITER_ID = process.env['XYPH_AGENT_ID'] ?? 'agent.prime'; +const OUTPUT_DIR = join(process.cwd(), 'docs', 'work'); +const WORKERS = 4; + +// Dark theme colors +const STATUS_COLORS: Record = { + DONE: { fill: '#2d5016', font: '#b8e6a0', border: '#4a8c28' }, + IN_PROGRESS: { fill: '#1a4a6e', font: '#a0d4f7', border: '#2980b9' }, + PLANNED: { fill: '#4a3560', font: '#c9a0f7', border: '#7b52a0' }, + BACKLOG: { fill: '#3a3a3a', font: '#cccccc', border: '#666666' }, + GRAVEYARD: { fill: '#2a1a1a', font: '#996666', border: '#553333' }, +}; + +const FRONTIER_COLORS = { fill: '#5c4a00', font: '#ffd700', border: '#daa520' }; + +const CAMPAIGN_COLORS: Record = { + 'campaign:CLITOOL': '#e67e22', + 'campaign:DASHBOARD': '#2ecc71', + 'campaign:AGENT': '#e74c3c', + 'campaign:ORACLE': '#9b59b6', + 'campaign:FORGE': '#f39c12', + 'campaign:WEAVER': '#1abc9c', + 'campaign:TRIAGE': '#3498db', + 'campaign:BEDROCK': '#95a5a6', + 'campaign:HEARTBEAT': '#95a5a6', + 'campaign:SOVEREIGNTY': '#95a5a6', + 'campaign:SUBMISSION': '#95a5a6', + 'campaign:TRACEABILITY': '#16a085', +}; + +// --------------------------------------------------------------------------- +// Types +// --------------------------------------------------------------------------- + +interface TaskNode { + id: string; + title: string; + status: string; + hours: number; + campaign: string | null; + campaignTitle: string | null; + deps: string[]; +} + +interface DotOptions { + rankdir: 'LR' | 'TB'; + title?: string; + filter?: (t: TaskNode) => boolean; + highlightPath?: Set; + highlightFrontier?: Set; + highlightBlockers?: Set; +} + +// --------------------------------------------------------------------------- +// Data loading +// --------------------------------------------------------------------------- + +async function loadGraph(): Promise<{ + tasks: Map; + campaigns: Map; +}> { + const plumbing = Plumbing.createDefault({ cwd: process.cwd() }); + const persistence = new GitGraphAdapter({ plumbing }); + + const graph = await WarpGraph.open({ + persistence, + graphName: 'xyph-roadmap', + writerId: WRITER_ID, + autoMaterialize: true, + }); + await graph.syncCoverage(); + await graph.materialize(); + + const allNodes = await graph.getNodes(); + const taskIds = allNodes.filter((n) => n.startsWith('task:')); + const campaignIds = allNodes.filter((n) => n.startsWith('campaign:') || n.startsWith('milestone:')); + + // Load campaign titles + const campaigns = new Map(); + for (const cid of campaignIds) { + const props = await graph.getNodeProps(cid); + if (props) { + campaigns.set(cid, (props.get('title') as string) ?? cid.replace(/^(campaign|milestone):/, '')); + } + } + + // Load tasks + const tasks = new Map(); + for (const id of taskIds) { + const props = await graph.getNodeProps(id); + if (!props) continue; + + const rawStatus = (props.get('status') as string) ?? 'BACKLOG'; + const status = normalizeQuestStatus(rawStatus); + const title = (props.get('title') as string) ?? id; + const hours = Number(props.get('hours') ?? 1); + + const neighbors = (await graph.neighbors(id, 'outgoing')) as Array<{ + label: string; + nodeId: string; + }>; + + const campaignEdge = neighbors.find( + (n) => n.label === 'belongs-to' && (n.nodeId.startsWith('campaign:') || n.nodeId.startsWith('milestone:')), + ); + + const deps = neighbors + .filter((n) => n.label === 'depends-on') + .map((n) => n.nodeId); + + const campaignId = campaignEdge?.nodeId ?? null; + tasks.set(id, { + id, + title, + status, + hours: Number.isFinite(hours) ? hours : 1, + campaign: campaignId, + campaignTitle: campaignId ? (campaigns.get(campaignId) ?? null) : null, + deps, + }); + } + + return { tasks, campaigns }; +} + +// --------------------------------------------------------------------------- +// DOT generation +// --------------------------------------------------------------------------- + +function escapeLabel(s: string): string { + return s.replace(/"/g, '\\"').replace(/\n/g, '\\n'); +} + +function generateDot( + allTasks: Map, + opts: DotOptions, +): string { + const filteredIds = new Set(); + for (const [id, task] of allTasks) { + if (!opts.filter || opts.filter(task)) { + filteredIds.add(id); + } + } + + // Also include deps of filtered tasks that exist + const inDag = new Set(filteredIds); + for (const id of filteredIds) { + const task = allTasks.get(id); + if (task) { + for (const dep of task.deps) { + if (allTasks.has(dep)) inDag.add(dep); + } + } + } + + // Group by campaign + const byCampaign = new Map(); + for (const id of inDag) { + const task = allTasks.get(id); + if (!task) continue; + const key = task.campaign ?? '(none)'; + const arr = byCampaign.get(key) ?? []; + arr.push(id); + byCampaign.set(key, arr); + } + + const lines: string[] = []; + lines.push('digraph XYPH {'); + lines.push(` rankdir=${opts.rankdir};`); + lines.push(' bgcolor="#1a1a2e";'); + lines.push(' node [shape=box, style="filled,rounded", fontname="Helvetica", fontsize=10, margin="0.15,0.08"];'); + lines.push(' edge [color="#555555", arrowsize=0.7];'); + lines.push(' graph [fontname="Helvetica", fontsize=12, fontcolor="#cccccc"];'); + + if (opts.title) { + lines.push(` label="${escapeLabel(opts.title)}";`); + lines.push(' labelloc=t;'); + lines.push(' fontsize=16;'); + } + + lines.push(''); + + let clusterIdx = 0; + for (const [campaign, ids] of byCampaign) { + const campaignColor = CAMPAIGN_COLORS[campaign] ?? '#666666'; + const task0 = ids[0] ? allTasks.get(ids[0]) : undefined; + const label = campaign === '(none)' + ? 'Unassigned' + : (task0?.campaignTitle ?? campaign.replace(/^(campaign|milestone):/, 'M: ')); + + lines.push(` subgraph cluster_${clusterIdx++} {`); + lines.push(` label="${escapeLabel(label)}";`); + lines.push(' style=dashed;'); + lines.push(` color="${campaignColor}";`); + lines.push(` fontcolor="${campaignColor}";`); + lines.push(''); + + for (const id of ids) { + const task = allTasks.get(id); + if (!task) continue; + + const isFrontier = opts.highlightFrontier?.has(id) ?? false; + const isBlocker = opts.highlightBlockers?.has(id) ?? false; + const isCritical = opts.highlightPath?.has(id) ?? false; + + const sc = isFrontier + ? FRONTIER_COLORS + : STATUS_COLORS[task.status] ?? STATUS_COLORS['BACKLOG']!; + + const shortId = id.replace('task:', ''); + const tags: string[] = []; + if (isCritical) tags.push('CP'); + if (isFrontier) tags.push('*'); + const tagStr = tags.length > 0 ? ` [${tags.join(',')}]` : ''; + const truncTitle = task.title.length > 35 ? task.title.slice(0, 35) : task.title; + const label = `${shortId}${tagStr}\\n${escapeLabel(truncTitle)}`; + + const penwidth = isBlocker || isCritical ? ', penwidth=3' : ''; + const borderColor = isBlocker ? '#ff4444' : (isCritical ? '#ff8800' : sc.border); + + lines.push( + ` "${id}" [label="${label}", fillcolor="${sc.fill}", fontcolor="${sc.font}", color="${borderColor}"${penwidth}];`, + ); + } + + lines.push(' }'); + lines.push(''); + } + + // Edges: prerequisite → dependent (work flows in rankdir direction) + for (const id of inDag) { + const task = allTasks.get(id); + if (!task) continue; + for (const dep of task.deps) { + if (!inDag.has(dep)) continue; + const depTask = allTasks.get(dep); + const edgeColor = depTask?.status === 'DONE' ? '#4a8c28' : '#888888'; + lines.push(` "${dep}" -> "${id}" [color="${edgeColor}"];`); + } + } + + // Legend + lines.push(''); + lines.push(' subgraph cluster_legend {'); + lines.push(' label="Legend";'); + lines.push(' style=dashed;'); + lines.push(' color="#444444";'); + lines.push(' fontcolor="#999999";'); + lines.push(' "leg_done" [label="DONE", fillcolor="#2d5016", fontcolor="#b8e6a0", color="#4a8c28"];'); + lines.push(' "leg_wip" [label="IN_PROGRESS", fillcolor="#1a4a6e", fontcolor="#a0d4f7", color="#2980b9"];'); + lines.push(' "leg_frontier" [label="FRONTIER", fillcolor="#5c4a00", fontcolor="#ffd700", color="#daa520"];'); + lines.push(' "leg_planned" [label="PLANNED", fillcolor="#4a3560", fontcolor="#c9a0f7", color="#7b52a0"];'); + lines.push(' "leg_backlog" [label="BACKLOG", fillcolor="#3a3a3a", fontcolor="#cccccc", color="#666666"];'); + lines.push(' "leg_done" -> "leg_wip" -> "leg_frontier" -> "leg_planned" -> "leg_backlog" [style=invis];'); + lines.push(' }'); + + lines.push('}'); + return lines.join('\n'); +} + +// --------------------------------------------------------------------------- +// SVG rendering +// --------------------------------------------------------------------------- + +function renderSvg(dotContent: string, outputPath: string): void { + const dotFile = outputPath.replace(/\.svg$/, '.dot'); + writeFileSync(dotFile, dotContent); + try { + execSync(`dot -Tsvg "${dotFile}" -o "${outputPath}"`, { stdio: 'pipe' }); + console.log(` SVG: ${outputPath}`); + } catch (err) { + console.error(` FAIL: ${outputPath} — is graphviz installed? (brew install graphviz)`); + console.error(err instanceof Error ? err.message : String(err)); + } +} + +function generatePair( + allTasks: Map, + dir: string, + baseName: string, + opts: Omit, +): void { + const hrDot = generateDot(allTasks, { ...opts, rankdir: 'LR' }); + const vtDot = generateDot(allTasks, { ...opts, rankdir: 'TB' }); + renderSvg(hrDot, join(dir, `${baseName}-hr.svg`)); + renderSvg(vtDot, join(dir, `${baseName}-vert.svg`)); +} + +// --------------------------------------------------------------------------- +// Analysis helpers +// --------------------------------------------------------------------------- + +function buildAnalysisInputs(tasks: Map): { + summaries: TaskSummary[]; + edges: DepEdge[]; + sorted: string[]; +} { + const summaries: TaskSummary[] = []; + const edges: DepEdge[] = []; + + for (const [, task] of tasks) { + summaries.push({ id: task.id, status: task.status, hours: task.hours }); + for (const dep of task.deps) { + if (tasks.has(dep)) { + edges.push({ from: task.id, to: dep }); + } + } + } + + // Topological sort via Kahn's algorithm + const inDegree = new Map(); + const adj = new Map(); + for (const s of summaries) { + inDegree.set(s.id, 0); + adj.set(s.id, []); + } + for (const e of edges) { + inDegree.set(e.from, (inDegree.get(e.from) ?? 0) + 1); + const arr = adj.get(e.to) ?? []; + arr.push(e.from); + adj.set(e.to, arr); + } + + const queue: string[] = []; + for (const [id, deg] of inDegree) { + if (deg === 0) queue.push(id); + } + queue.sort(); // determinism + + const sorted: string[] = []; + while (queue.length > 0) { + const node = queue.shift()!; + sorted.push(node); + for (const dep of adj.get(node) ?? []) { + const newDeg = (inDegree.get(dep) ?? 1) - 1; + inDegree.set(dep, newDeg); + if (newDeg === 0) { + // Insert sorted for determinism + const insertIdx = queue.findIndex((q) => q > dep); + if (insertIdx === -1) queue.push(dep); + else queue.splice(insertIdx, 0, dep); + } + } + } + + return { summaries, edges, sorted }; +} + +// --------------------------------------------------------------------------- +// work.md generation +// --------------------------------------------------------------------------- + +function generateWorkMd( + tasks: Map, + summaries: TaskSummary[], + edges: DepEdge[], + sorted: string[], +): string { + const lines: string[] = []; + const now = new Date().toISOString().slice(0, 10); + + // Header + lines.push('# XYPH Work DAG Analysis'); + lines.push(''); + lines.push(`Generated: ${now} | ${summaries.length} quests | ${edges.length} dependency edges`); + lines.push(''); + + // --- Topological Sort --- + lines.push('## Topological Sort'); + lines.push(''); + lines.push('| # | Task ID | Title | Status | Campaign | Hours |'); + lines.push('|---|---------|-------|--------|----------|-------|'); + for (let i = 0; i < sorted.length; i++) { + const id = sorted[i]!; + const task = tasks.get(id); + if (!task) continue; + const campaign = task.campaignTitle ?? task.campaign?.replace(/^(campaign|milestone):/, '') ?? '—'; + lines.push(`| ${i + 1} | \`${id}\` | ${task.title} | ${task.status} | ${campaign} | ${task.hours} |`); + } + lines.push(''); + + // --- Critical Path --- + const cp = computeCriticalPath(sorted, summaries, edges); + lines.push('## Critical Path Analysis'); + lines.push(''); + if (cp.path.length > 0) { + lines.push(`**Total hours:** ${cp.totalHours} | **Length:** ${cp.path.length} tasks`); + lines.push(''); + lines.push('```'); + lines.push(cp.path.map((id) => { + const task = tasks.get(id); + const h = task?.hours ?? 0; + return `${id} (${h}h)`; + }).join(' → ')); + lines.push('```'); + lines.push(''); + + // Bottleneck: the critical path task with most hours + const bottleneck = cp.path.reduce((max, id) => { + const t = tasks.get(id); + const h = t?.hours ?? 0; + const maxH = tasks.get(max)?.hours ?? 0; + return h > maxH ? id : max; + }, cp.path[0]!); + const btTask = tasks.get(bottleneck); + if (btTask) { + lines.push(`**Bottleneck:** \`${bottleneck}\` — ${btTask.title} (${btTask.hours}h)`); + lines.push(''); + } + } else { + lines.push('No critical path (no dependencies or all tasks DONE).'); + lines.push(''); + } + + // --- Parallelism and Leveling --- + const levels = computeLevels(sorted, edges); + const width = dagWidth(levels); + lines.push('## Parallelism and Leveling'); + lines.push(''); + lines.push('### Width of DAG'); + lines.push(''); + lines.push(`**Max parallelism:** ${width.width} tasks at level ${width.widestLevel}`); + lines.push(''); + + // Level histogram + const levelGroups = new Map(); + for (const [id, level] of levels) { + const arr = levelGroups.get(level) ?? []; + arr.push(id); + levelGroups.set(level, arr); + } + const sortedLevelKeys = [...levelGroups.keys()].sort((a, b) => a - b); + lines.push('| Level | Count | Tasks |'); + lines.push('|-------|-------|-------|'); + for (const level of sortedLevelKeys) { + const ids = levelGroups.get(level) ?? []; + const display = ids.map((id) => `\`${id.replace('task:', '')}\``).join(', '); + lines.push(`| ${level} | ${ids.length} | ${display} |`); + } + lines.push(''); + + // Scheduling + lines.push(`### Scheduling (${WORKERS} workers)`); + lines.push(''); + const schedule = scheduleWorkers(sorted, summaries, edges, WORKERS); + lines.push(`**Makespan:** ${schedule.makespan}h`); + const serialTotal = summaries.reduce((sum, t) => sum + (t.status === 'DONE' ? 0 : t.hours), 0); + const utilization = schedule.makespan > 0 + ? ((serialTotal / (schedule.makespan * WORKERS)) * 100).toFixed(1) + : '0.0'; + lines.push(`**Serial total:** ${serialTotal}h | **Utilization:** ${utilization}%`); + lines.push(''); + + for (const worker of schedule.schedule) { + lines.push(`**Worker ${worker.workerId}:**`); + for (const slot of worker.tasks) { + const task = tasks.get(slot.id); + const title = task?.title ?? slot.id; + lines.push(` - \`${slot.id}\` [${slot.start}h–${slot.start + slot.hours}h] ${title}`); + } + lines.push(''); + } + + // --- Transitive Reduction / Closure --- + lines.push('## Transitive Reduction and Closure'); + lines.push(''); + + const reduced = transitiveReduction(edges); + const redundantCount = edges.length - reduced.length; + lines.push('### Transitive Reduction'); + lines.push(''); + lines.push(`**Redundant edges:** ${redundantCount} of ${edges.length}`); + lines.push(''); + if (redundantCount > 0) { + const reducedSet = new Set(reduced.map((e) => `${e.from}→${e.to}`)); + const removedEdges = edges.filter((e) => !reducedSet.has(`${e.from}→${e.to}`)); + for (const e of removedEdges) { + lines.push(`- \`${e.from}\` → \`${e.to}\` (redundant)`); + } + lines.push(''); + } + + const closure = transitiveClosure(edges); + const impliedCount = closure.length - edges.length; + lines.push('### Transitive Closure'); + lines.push(''); + lines.push(`**Implied dependencies:** ${impliedCount}`); + lines.push(''); + if (impliedCount > 0) { + const originalSet = new Set(edges.map((e) => `${e.from}→${e.to}`)); + const impliedEdges = closure.filter((e) => !originalSet.has(`${e.from}→${e.to}`)); + const displayLimit = Math.min(impliedEdges.length, 20); + for (let i = 0; i < displayLimit; i++) { + const e = impliedEdges[i]!; + lines.push(`- \`${e.from}\` → \`${e.to}\``); + } + if (impliedEdges.length > 20) { + lines.push(`- ... and ${impliedEdges.length - 20} more`); + } + lines.push(''); + } + + // --- Ancestry and Impact --- + lines.push('## Ancestry and Impact Analysis'); + lines.push(''); + + lines.push('### Reverse Reachability (Top Blockers)'); + lines.push(''); + const topBlockers = computeTopBlockers(summaries, edges, 15); + if (topBlockers.length > 0) { + lines.push('| Task | Title | Direct | Transitive |'); + lines.push('|------|-------|--------|------------|'); + for (const b of topBlockers) { + const task = tasks.get(b.id); + const title = task?.title ?? b.id; + lines.push(`| \`${b.id}\` | ${title} | ${b.directCount} | ${b.transitiveCount} |`); + } + lines.push(''); + } else { + lines.push('No active blockers.'); + lines.push(''); + } + + lines.push('### Provenance'); + lines.push(''); + const { frontier } = computeFrontier(summaries, edges); + const prov = computeProvenance(frontier, edges); + if (prov.size > 0) { + lines.push('| Frontier Task | Title | Root Ancestors |'); + lines.push('|---------------|-------|----------------|'); + for (const [id, roots] of prov) { + const task = tasks.get(id); + const title = task?.title ?? id; + const rootStr = roots.map((r) => `\`${r}\``).join(', '); + lines.push(`| \`${id}\` | ${title} | ${rootStr} |`); + } + lines.push(''); + } else { + lines.push('No frontier tasks.'); + lines.push(''); + } + + // --- Logical Grouping --- + lines.push('## Logical Grouping (Campaigns)'); + lines.push(''); + + const campaignStats = new Map(); + for (const [, task] of tasks) { + const key = task.campaign ?? '(none)'; + const stats = campaignStats.get(key) ?? { total: 0, done: 0, hours: 0, deps: 0 }; + stats.total++; + if (task.status === 'DONE') stats.done++; + stats.hours += task.hours; + stats.deps += task.deps.length; + campaignStats.set(key, stats); + } + + lines.push('| Campaign | Tasks | Done | % | Hours | Deps |'); + lines.push('|----------|-------|------|---|-------|------|'); + const sortedCampaigns = [...campaignStats.entries()].sort((a, b) => a[0].localeCompare(b[0])); + for (const [campaign, stats] of sortedCampaigns) { + const pct = stats.total > 0 ? ((stats.done / stats.total) * 100).toFixed(0) : '0'; + const label = campaign === '(none)' ? 'Unassigned' : campaign.replace(/^(campaign|milestone):/, ''); + lines.push(`| ${label} | ${stats.total} | ${stats.done} | ${pct}% | ${stats.hours} | ${stats.deps} |`); + } + lines.push(''); + + // --- Anti-chains --- + lines.push('## Anti-chains (Parallel Waves)'); + lines.push(''); + const chains = computeAntiChains(sorted, edges, summaries); + if (chains.length > 0) { + lines.push('| Wave | Parallel Tasks | Count | Total Hours |'); + lines.push('|------|----------------|-------|-------------|'); + for (let i = 0; i < chains.length; i++) { + const wave = chains[i]!; + const waveHours = wave.reduce((sum, id) => { + const t = summaries.find((s) => s.id === id); + return sum + (t?.hours ?? 0); + }, 0); + const display = wave.map((id) => `\`${id.replace('task:', '')}\``).join(', '); + lines.push(`| ${i} | ${display} | ${wave.length} | ${waveHours} |`); + } + lines.push(''); + } else { + lines.push('No active anti-chains (all tasks DONE or no tasks).'); + lines.push(''); + } + + // --- Status summary --- + lines.push('## Status Summary'); + lines.push(''); + const statusCounts = new Map(); + for (const [, task] of tasks) { + statusCounts.set(task.status, (statusCounts.get(task.status) ?? 0) + 1); + } + lines.push('| Status | Count |'); + lines.push('|--------|-------|'); + for (const status of ['DONE', 'IN_PROGRESS', 'PLANNED', 'BACKLOG', 'GRAVEYARD']) { + const count = statusCounts.get(status) ?? 0; + if (count > 0) lines.push(`| ${status} | ${count} |`); + } + lines.push(''); + + return lines.join('\n'); +} + +// --------------------------------------------------------------------------- +// Main +// --------------------------------------------------------------------------- + +async function main(): Promise { + console.log('Loading WARP graph...'); + const { tasks, campaigns } = await loadGraph(); + console.log(`Loaded ${tasks.size} tasks, ${campaigns.size} campaigns`); + + const { summaries, edges, sorted } = buildAnalysisInputs(tasks); + + // Compute highlights + const { frontier } = computeFrontier(summaries, edges); + const frontierSet = new Set(frontier); + const cp = computeCriticalPath(sorted, summaries, edges); + const criticalSet = new Set(cp.path); + const topBlockers = computeTopBlockers(summaries, edges, 10); + const blockerSet = new Set(topBlockers.map((b) => b.id)); + + const baseOpts: Omit = { + highlightFrontier: frontierSet, + highlightPath: criticalSet, + highlightBlockers: blockerSet, + }; + + // Create output dirs + mkdirSync(OUTPUT_DIR, { recursive: true }); + + // 1. Full DAG + console.log('\nGenerating full DAG...'); + generatePair(tasks, OUTPUT_DIR, 'all', { ...baseOpts, title: 'XYPH Work DAG — All Quests' }); + + // 2. Per-campaign + const campaignIds = new Set(); + for (const [, task] of tasks) { + if (task.campaign) campaignIds.add(task.campaign); + } + + for (const campaignId of [...campaignIds].sort()) { + const campaignName = campaigns.get(campaignId) ?? campaignId.replace(/^(campaign|milestone):/, ''); + const safeName = campaignId.replace(/^(campaign|milestone):/, ''); + const dir = join(OUTPUT_DIR, `${safeName}`); + mkdirSync(dir, { recursive: true }); + + console.log(`\nGenerating ${safeName}...`); + generatePair(tasks, dir, safeName, { + ...baseOpts, + title: `XYPH — ${campaignName}`, + filter: (t) => t.campaign === campaignId, + }); + } + + // 3. Backlog view + console.log('\nGenerating backlog...'); + generatePair(tasks, OUTPUT_DIR, 'backlog', { + ...baseOpts, + title: 'XYPH — Backlog / Planned', + filter: (t) => t.status === 'BACKLOG' || t.status === 'PLANNED', + }); + + // 4. Graveyard view + console.log('\nGenerating graveyard...'); + generatePair(tasks, OUTPUT_DIR, 'graveyard', { + ...baseOpts, + title: 'XYPH — Graveyard', + filter: (t) => t.status === 'GRAVEYARD', + }); + + // 5. Analysis document + console.log('\nGenerating work.md...'); + const workMd = generateWorkMd(tasks, summaries, edges, sorted); + writeFileSync(join(OUTPUT_DIR, 'work.md'), workMd); + console.log(` MD: ${join(OUTPUT_DIR, 'work.md')}`); + + console.log('\nDone!'); +} + +main().catch((err) => { + console.error(`[FATAL] ${err instanceof Error ? err.message : String(err)}`); + process.exit(1); +}); diff --git a/src/domain/services/DagAnalysis.ts b/src/domain/services/DagAnalysis.ts new file mode 100644 index 0000000..4a83ec0 --- /dev/null +++ b/src/domain/services/DagAnalysis.ts @@ -0,0 +1,481 @@ +/** + * DagAnalysis — Pure functions for DAG structure analysis. + * + * Complements DepAnalysis.ts (frontier, critical path, top blockers) with + * structural analysis: leveling, width, scheduling, transitive reduction/ + * closure, anti-chains, reverse reachability, and provenance. + * + * All functions are pure: (sorted, edges, tasks) → result. + * Graph traversals (topo sort, BFS, reachability) are handled by git-warp + * natively — these functions operate on the extracted DepEdge[] data. + */ + +import type { TaskSummary, DepEdge } from './DepAnalysis.js'; + +// --------------------------------------------------------------------------- +// Level assignment +// --------------------------------------------------------------------------- + +/** + * Assigns each task to its longest-path level from roots. + * Level = max(level of prerequisites) + 1. Roots are level 0. + */ +export function computeLevels( + sorted: string[], + edges: DepEdge[], +): Map { + const levels = new Map(); + + // Build deps map: task → [prerequisites] + const depsOf = new Map(); + for (const edge of edges) { + const arr = depsOf.get(edge.from) ?? []; + arr.push(edge.to); + depsOf.set(edge.from, arr); + } + + for (const node of sorted) { + const deps = depsOf.get(node) ?? []; + if (deps.length === 0) { + levels.set(node, 0); + } else { + let maxDepLevel = 0; + for (const dep of deps) { + const depLevel = levels.get(dep) ?? 0; + if (depLevel + 1 > maxDepLevel) { + maxDepLevel = depLevel + 1; + } + } + levels.set(node, maxDepLevel); + } + } + + return levels; +} + +// --------------------------------------------------------------------------- +// DAG width +// --------------------------------------------------------------------------- + +/** + * Returns the maximum number of tasks at any single level (max parallelism). + */ +export function dagWidth( + levels: Map, +): { width: number; widestLevel: number } { + if (levels.size === 0) { + return { width: 0, widestLevel: -1 }; + } + + const counts = new Map(); + for (const level of levels.values()) { + counts.set(level, (counts.get(level) ?? 0) + 1); + } + + let width = 0; + let widestLevel = -1; + for (const [level, count] of counts) { + if (count > width) { + width = count; + widestLevel = level; + } + } + + return { width, widestLevel }; +} + +// --------------------------------------------------------------------------- +// Worker scheduling +// --------------------------------------------------------------------------- + +export interface TaskSlot { + id: string; + start: number; + hours: number; +} + +export interface WorkerSchedule { + workerId: number; + tasks: TaskSlot[]; +} + +/** + * Greedy list-scheduling: assigns tasks in topological order to the worker + * that becomes free earliest, respecting dependency constraints. + */ +export function scheduleWorkers( + sorted: string[], + tasks: TaskSummary[], + edges: DepEdge[], + workers: number, +): { schedule: WorkerSchedule[]; makespan: number } { + if (sorted.length === 0) { + return { schedule: [], makespan: 0 }; + } + + // Build hours map (DONE tasks weigh 0 — already completed) + const hoursMap = new Map(); + for (const t of tasks) { + hoursMap.set(t.id, t.status === 'DONE' ? 0 : t.hours); + } + + // Build deps map: task → [prerequisites] + const depsOf = new Map(); + for (const edge of edges) { + const arr = depsOf.get(edge.from) ?? []; + arr.push(edge.to); + depsOf.set(edge.from, arr); + } + + // Track when each task finishes + const finishTime = new Map(); + + // Worker availability: earliest time each worker is free + const workerFree = new Array(workers).fill(0); + const workerTasks = new Array(workers); + for (let i = 0; i < workers; i++) { + workerTasks[i] = []; + } + + for (const taskId of sorted) { + const hours = hoursMap.get(taskId) ?? 1; + + // Earliest start = max finish time of all prerequisites + let earliest = 0; + for (const dep of depsOf.get(taskId) ?? []) { + const depFinish = finishTime.get(dep) ?? 0; + if (depFinish > earliest) earliest = depFinish; + } + + // Find the worker that is free earliest (but not before `earliest`) + let bestWorker = 0; + let bestStart = Math.max(workerFree[0] ?? 0, earliest); + for (let w = 1; w < workers; w++) { + const start = Math.max(workerFree[w] ?? 0, earliest); + if (start < bestStart) { + bestStart = start; + bestWorker = w; + } + } + + const slot: TaskSlot = { id: taskId, start: bestStart, hours }; + const wt = workerTasks[bestWorker]; + if (wt) wt.push(slot); + const endTime = bestStart + hours; + workerFree[bestWorker] = endTime; + finishTime.set(taskId, endTime); + } + + // Build schedule (only include workers that got tasks) + const schedule: WorkerSchedule[] = []; + for (let w = 0; w < workers; w++) { + const wTasks = workerTasks[w]; + if (wTasks && wTasks.length > 0) { + schedule.push({ workerId: w, tasks: wTasks }); + } + } + + const makespan = Math.max(...workerFree); + return { schedule, makespan }; +} + +// --------------------------------------------------------------------------- +// Transitive reduction +// --------------------------------------------------------------------------- + +/** + * Removes redundant edges: A→C is redundant if a longer path A→...→C exists. + * Uses BFS per edge to check reachability without that edge. + */ +export function transitiveReduction( + edges: DepEdge[], +): DepEdge[] { + if (edges.length === 0) return []; + + // Build adjacency: from → [to] (dependency direction) + const adj = new Map>(); + for (const edge of edges) { + const set = adj.get(edge.from) ?? new Set(); + set.add(edge.to); + adj.set(edge.from, set); + } + + // For each edge (from → to), check if `from` can reach `to` via other edges + const result: DepEdge[] = []; + for (const edge of edges) { + // Temporarily remove this edge + const neighbors = adj.get(edge.from); + if (!neighbors) { + result.push(edge); + continue; + } + neighbors.delete(edge.to); + + // BFS from edge.from to see if edge.to is still reachable + const reachable = bfsReachable(edge.from, edge.to, adj); + if (!reachable) { + result.push(edge); // edge is essential + } + + // Restore edge + neighbors.add(edge.to); + } + + return result; +} + +function bfsReachable( + start: string, + target: string, + adj: Map>, +): boolean { + const visited = new Set(); + const queue = [start]; + visited.add(start); + + while (queue.length > 0) { + const current = queue.shift(); + if (current === undefined) break; + for (const neighbor of adj.get(current) ?? []) { + if (neighbor === target) return true; + if (!visited.has(neighbor)) { + visited.add(neighbor); + queue.push(neighbor); + } + } + } + + return false; +} + +// --------------------------------------------------------------------------- +// Transitive closure +// --------------------------------------------------------------------------- + +/** + * Adds all implied edges: if A depends on B and B depends on C, adds A→C. + * Returns original edges plus all transitive edges (deduped). + */ +export function transitiveClosure( + edges: DepEdge[], +): DepEdge[] { + if (edges.length === 0) return []; + + // Build adjacency: from → Set + const adj = new Map>(); + for (const edge of edges) { + const set = adj.get(edge.from) ?? new Set(); + set.add(edge.to); + adj.set(edge.from, set); + } + + // For each node, BFS to find all reachable nodes and add edges + const allEdges = new Set(); + for (const edge of edges) { + allEdges.add(`${edge.from}→${edge.to}`); + } + + const result: DepEdge[] = [...edges]; + + // Derive node set from edges + const nodes = new Set(); + for (const edge of edges) { + nodes.add(edge.from); + nodes.add(edge.to); + } + + for (const node of nodes) { + // BFS from node following dependency direction + const visited = new Set(); + const queue: string[] = []; + + // Seed with direct deps + for (const dep of adj.get(node) ?? []) { + if (!visited.has(dep)) { + visited.add(dep); + queue.push(dep); + } + } + + while (queue.length > 0) { + const current = queue.shift(); + if (current === undefined) break; + for (const dep of adj.get(current) ?? []) { + if (!visited.has(dep)) { + visited.add(dep); + queue.push(dep); + } + } + } + + // Add transitive edges + for (const reachable of visited) { + const key = `${node}→${reachable}`; + if (!allEdges.has(key)) { + allEdges.add(key); + result.push({ from: node, to: reachable }); + } + } + } + + return result; +} + +// --------------------------------------------------------------------------- +// Anti-chain decomposition (MECE parallel waves) +// --------------------------------------------------------------------------- + +/** + * Groups non-DONE tasks into parallel waves based on dependency levels. + * Each wave is an anti-chain: tasks within a wave have no dependencies + * on each other and can run in parallel. + */ +export function computeAntiChains( + sorted: string[], + edges: DepEdge[], + tasks: TaskSummary[], +): string[][] { + if (sorted.length === 0) return []; + + const doneSet = new Set(tasks.filter((t) => t.status === 'DONE').map((t) => t.id)); + const activeSorted = sorted.filter((id) => !doneSet.has(id)); + + if (activeSorted.length === 0) return []; + + // Filter edges to only include active tasks + const activeSet = new Set(activeSorted); + const activeEdges = edges.filter( + (e) => activeSet.has(e.from) && activeSet.has(e.to), + ); + + // Compute levels on active subgraph + const levels = computeLevels(activeSorted, activeEdges); + + // Group by level + const byLevel = new Map(); + for (const [id, level] of levels) { + const arr = byLevel.get(level) ?? []; + arr.push(id); + byLevel.set(level, arr); + } + + // Sort levels and build result + const sortedLevels = [...byLevel.keys()].sort((a, b) => a - b); + return sortedLevels.map((level) => { + const wave = byLevel.get(level) ?? []; + wave.sort(); + return wave; + }); +} + +// --------------------------------------------------------------------------- +// Reverse reachability +// --------------------------------------------------------------------------- + +/** + * Returns all tasks that transitively depend on the given task. + * Uses BFS over the reverse dependency graph. + */ +export function reverseReachability( + taskId: string, + edges: DepEdge[], +): string[] { + // Build reverse map: prerequisite → [dependents] + const dependentsOf = new Map(); + for (const edge of edges) { + const arr = dependentsOf.get(edge.to) ?? []; + arr.push(edge.from); + dependentsOf.set(edge.to, arr); + } + + const visited = new Set(); + const queue = [taskId]; + + while (queue.length > 0) { + const current = queue.shift(); + if (current === undefined) break; + for (const dep of dependentsOf.get(current) ?? []) { + if (!visited.has(dep)) { + visited.add(dep); + queue.push(dep); + } + } + } + + const result = [...visited]; + result.sort(); + return result; +} + +// --------------------------------------------------------------------------- +// Provenance +// --------------------------------------------------------------------------- + +/** + * For each frontier task, traces back through dependencies to find root + * ancestors (tasks with no prerequisites). + */ +export function computeProvenance( + frontier: string[], + edges: DepEdge[], +): Map { + const result = new Map(); + if (frontier.length === 0) return result; + + // Build deps map: task → [prerequisites] + const depsOf = new Map(); + for (const edge of edges) { + const arr = depsOf.get(edge.from) ?? []; + arr.push(edge.to); + depsOf.set(edge.from, arr); + } + + // All nodes that appear in edges (to find roots) + const allNodes = new Set(); + for (const edge of edges) { + allNodes.add(edge.from); + allNodes.add(edge.to); + } + + for (const taskId of frontier) { + // BFS backwards through deps + const visited = new Set(); + const queue: string[] = []; + const roots: string[] = []; + + // Seed with task's own deps + const directDeps = depsOf.get(taskId) ?? []; + if (directDeps.length === 0) { + // This task IS a root + roots.push(taskId); + } else { + for (const dep of directDeps) { + if (!visited.has(dep)) { + visited.add(dep); + queue.push(dep); + } + } + } + + while (queue.length > 0) { + const current = queue.shift(); + if (current === undefined) break; + const currentDeps = depsOf.get(current) ?? []; + if (currentDeps.length === 0) { + roots.push(current); + } else { + for (const dep of currentDeps) { + if (!visited.has(dep)) { + visited.add(dep); + queue.push(dep); + } + } + } + } + + roots.sort(); + result.set(taskId, roots); + } + + return result; +} diff --git a/test/unit/DagAnalysis.test.ts b/test/unit/DagAnalysis.test.ts new file mode 100644 index 0000000..e2de257 --- /dev/null +++ b/test/unit/DagAnalysis.test.ts @@ -0,0 +1,391 @@ +import { describe, it, expect } from 'vitest'; +import { + computeLevels, + dagWidth, + scheduleWorkers, + transitiveReduction, + transitiveClosure, + computeAntiChains, + reverseReachability, + computeProvenance, +} from '../../src/domain/services/DagAnalysis.js'; +import type { TaskSummary, DepEdge } from '../../src/domain/services/DepAnalysis.js'; + +function makeTasks(...specs: Array<{ id: string; status?: string; hours?: number }>): TaskSummary[] { + return specs.map((s) => ({ + id: s.id, + status: s.status ?? 'PLANNED', + hours: s.hours ?? 1, + })); +} + +// --------------------------------------------------------------------------- +// Diamond graph: A → B, A → C, B → D, C → D +// Edge semantics: B depends-on A means { from: 'B', to: 'A' } +// Topological order: [A, B, C, D] (A is root, D is sink) +// --------------------------------------------------------------------------- +const diamondSorted = ['task:A', 'task:B', 'task:C', 'task:D']; +const diamondEdges: DepEdge[] = [ + { from: 'task:B', to: 'task:A' }, + { from: 'task:C', to: 'task:A' }, + { from: 'task:D', to: 'task:B' }, + { from: 'task:D', to: 'task:C' }, +]; +const diamondTasks = makeTasks( + { id: 'task:A', hours: 2 }, + { id: 'task:B', hours: 3 }, + { id: 'task:C', hours: 1 }, + { id: 'task:D', hours: 4 }, +); + +// --------------------------------------------------------------------------- +// Linear chain: A → B → C +// --------------------------------------------------------------------------- +const linearSorted = ['task:A', 'task:B', 'task:C']; +const linearEdges: DepEdge[] = [ + { from: 'task:B', to: 'task:A' }, + { from: 'task:C', to: 'task:B' }, +]; +const linearTasks = makeTasks( + { id: 'task:A', hours: 2 }, + { id: 'task:B', hours: 3 }, + { id: 'task:C', hours: 1 }, +); + +// --------------------------------------------------------------------------- +// computeLevels +// --------------------------------------------------------------------------- +describe('computeLevels', () => { + it('assigns levels in diamond graph: A=0, B=1, C=1, D=2', () => { + const levels = computeLevels(diamondSorted, diamondEdges); + expect(levels.get('task:A')).toBe(0); + expect(levels.get('task:B')).toBe(1); + expect(levels.get('task:C')).toBe(1); + expect(levels.get('task:D')).toBe(2); + }); + + it('assigns levels in linear chain: A=0, B=1, C=2', () => { + const levels = computeLevels(linearSorted, linearEdges); + expect(levels.get('task:A')).toBe(0); + expect(levels.get('task:B')).toBe(1); + expect(levels.get('task:C')).toBe(2); + }); + + it('returns empty map for empty graph', () => { + const levels = computeLevels([], []); + expect(levels.size).toBe(0); + }); + + it('assigns level 0 to a single node', () => { + const levels = computeLevels(['task:A'], []); + expect(levels.get('task:A')).toBe(0); + }); + + it('assigns level 0 to isolated nodes', () => { + const levels = computeLevels(['task:A', 'task:B', 'task:C'], []); + expect(levels.get('task:A')).toBe(0); + expect(levels.get('task:B')).toBe(0); + expect(levels.get('task:C')).toBe(0); + }); +}); + +// --------------------------------------------------------------------------- +// dagWidth +// --------------------------------------------------------------------------- +describe('dagWidth', () => { + it('returns width=2 at level 1 for diamond graph', () => { + const levels = computeLevels(diamondSorted, diamondEdges); + const result = dagWidth(levels); + expect(result.width).toBe(2); + expect(result.widestLevel).toBe(1); + }); + + it('returns width=1 for linear chain', () => { + const levels = computeLevels(linearSorted, linearEdges); + const result = dagWidth(levels); + expect(result.width).toBe(1); + }); + + it('returns width=0 for empty graph', () => { + const result = dagWidth(new Map()); + expect(result.width).toBe(0); + expect(result.widestLevel).toBe(-1); + }); + + it('returns width equal to node count for all-isolated nodes', () => { + const levels = new Map([ + ['task:A', 0], + ['task:B', 0], + ['task:C', 0], + ]); + const result = dagWidth(levels); + expect(result.width).toBe(3); + expect(result.widestLevel).toBe(0); + }); +}); + +// --------------------------------------------------------------------------- +// scheduleWorkers +// --------------------------------------------------------------------------- +describe('scheduleWorkers', () => { + it('schedules diamond with 4 workers: makespan = 9', () => { + const result = scheduleWorkers(diamondSorted, diamondTasks, diamondEdges, 4); + // Parallel: A(2), then B(3)+C(1) in parallel → B finishes at 5, then D(4) → makespan=9 + expect(result.makespan).toBe(9); + expect(result.schedule.length).toBeLessThanOrEqual(4); + }); + + it('schedules linear chain: makespan equals serial total', () => { + const result = scheduleWorkers(linearSorted, linearTasks, linearEdges, 4); + expect(result.makespan).toBe(6); + }); + + it('returns makespan=0 for empty graph', () => { + const result = scheduleWorkers([], [], [], 4); + expect(result.makespan).toBe(0); + expect(result.schedule).toHaveLength(0); + }); + + it('schedules single task', () => { + const tasks = makeTasks({ id: 'task:A', hours: 5 }); + const result = scheduleWorkers(['task:A'], tasks, [], 2); + expect(result.makespan).toBe(5); + expect(result.schedule).toHaveLength(1); + }); + + it('schedules independent tasks across workers', () => { + const tasks = makeTasks( + { id: 'task:A', hours: 3 }, + { id: 'task:B', hours: 3 }, + { id: 'task:C', hours: 3 }, + ); + const result = scheduleWorkers(['task:A', 'task:B', 'task:C'], tasks, [], 3); + // All independent, 3 workers → makespan = 3 + expect(result.makespan).toBe(3); + expect(result.schedule).toHaveLength(3); + }); + + it('tracks task assignments per worker', () => { + const result = scheduleWorkers(diamondSorted, diamondTasks, diamondEdges, 2); + // Every task must appear in exactly one worker's assignment + const allAssigned = result.schedule.flatMap((w) => w.tasks.map((t) => t.id)); + expect(allAssigned.sort()).toEqual(diamondSorted); + }); + + it('respects dependency ordering within schedule', () => { + const result = scheduleWorkers(diamondSorted, diamondTasks, diamondEdges, 2); + // D must start after B and C finish + const allSlots = result.schedule.flatMap((w) => w.tasks); + const dSlot = allSlots.find((s) => s.id === 'task:D'); + const bSlot = allSlots.find((s) => s.id === 'task:B'); + const cSlot = allSlots.find((s) => s.id === 'task:C'); + expect(dSlot).toBeDefined(); + expect(bSlot).toBeDefined(); + expect(cSlot).toBeDefined(); + expect(dSlot!.start).toBeGreaterThanOrEqual(bSlot!.start + bSlot!.hours); + expect(dSlot!.start).toBeGreaterThanOrEqual(cSlot!.start + cSlot!.hours); + }); + + it('treats DONE tasks as weight 0 (no worker time consumed)', () => { + // A is DONE (8h in graph, but should cost 0), B depends on A (1h active) + const tasks = makeTasks( + { id: 'task:A', status: 'DONE', hours: 8 }, + { id: 'task:B', hours: 1 }, + ); + const edges: DepEdge[] = [{ from: 'task:B', to: 'task:A' }]; + const result = scheduleWorkers(['task:A', 'task:B'], tasks, edges, 2); + // DONE tasks weigh 0 → makespan should be 1, not 9 + expect(result.makespan).toBe(1); + }); +}); + +// --------------------------------------------------------------------------- +// transitiveReduction +// --------------------------------------------------------------------------- +describe('transitiveReduction', () => { + it('removes redundant edge in diamond+shortcut', () => { + const edges: DepEdge[] = [ + ...diamondEdges, + { from: 'task:D', to: 'task:A' }, // shortcut — redundant + ]; + const reduced = transitiveReduction(edges); + const hasShortcut = reduced.some((e) => e.from === 'task:D' && e.to === 'task:A'); + expect(hasShortcut).toBe(false); + expect(reduced).toHaveLength(4); + }); + + it('keeps all edges in diamond without shortcuts', () => { + const reduced = transitiveReduction(diamondEdges); + expect(reduced).toHaveLength(4); + }); + + it('returns empty array for empty graph', () => { + expect(transitiveReduction([])).toEqual([]); + }); + + it('keeps edges in linear chain (no shortcuts)', () => { + const reduced = transitiveReduction(linearEdges); + expect(reduced).toHaveLength(2); + }); + + it('removes shortcut in linear chain with skip edge', () => { + const edges: DepEdge[] = [ + ...linearEdges, + { from: 'task:C', to: 'task:A' }, // C→A redundant: C→B→A + ]; + const reduced = transitiveReduction(edges); + expect(reduced).toHaveLength(2); + const hasShortcut = reduced.some((e) => e.from === 'task:C' && e.to === 'task:A'); + expect(hasShortcut).toBe(false); + }); +}); + +// --------------------------------------------------------------------------- +// transitiveClosure +// --------------------------------------------------------------------------- +describe('transitiveClosure', () => { + it('adds D→A in diamond graph', () => { + const closure = transitiveClosure(diamondEdges); + const hasTransitive = closure.some((e) => e.from === 'task:D' && e.to === 'task:A'); + expect(hasTransitive).toBe(true); + expect(closure).toHaveLength(5); // 4 original + D→A + }); + + it('adds C→A in linear chain', () => { + const closure = transitiveClosure(linearEdges); + const hasTransitive = closure.some((e) => e.from === 'task:C' && e.to === 'task:A'); + expect(hasTransitive).toBe(true); + expect(closure).toHaveLength(3); // 2 original + C→A + }); + + it('returns empty array for empty graph', () => { + expect(transitiveClosure([])).toEqual([]); + }); + + it('returns original edge when no transitives possible', () => { + const edges: DepEdge[] = [{ from: 'task:B', to: 'task:A' }]; + const closure = transitiveClosure(edges); + expect(closure).toHaveLength(1); + }); +}); + +// --------------------------------------------------------------------------- +// computeAntiChains +// --------------------------------------------------------------------------- +describe('computeAntiChains', () => { + it('produces 3 waves for diamond: [A], [B,C], [D]', () => { + const chains = computeAntiChains(diamondSorted, diamondEdges, diamondTasks); + expect(chains).toHaveLength(3); + expect(chains[0]).toEqual(['task:A']); + expect(chains[1]?.sort()).toEqual(['task:B', 'task:C']); + expect(chains[2]).toEqual(['task:D']); + }); + + it('produces N waves for linear chain', () => { + const chains = computeAntiChains(linearSorted, linearEdges, linearTasks); + expect(chains).toHaveLength(3); + expect(chains[0]).toEqual(['task:A']); + expect(chains[1]).toEqual(['task:B']); + expect(chains[2]).toEqual(['task:C']); + }); + + it('returns empty array for empty graph', () => { + expect(computeAntiChains([], [], [])).toEqual([]); + }); + + it('puts all isolated nodes in one wave', () => { + const tasks = makeTasks( + { id: 'task:A' }, + { id: 'task:B' }, + { id: 'task:C' }, + ); + const chains = computeAntiChains(['task:A', 'task:B', 'task:C'], [], tasks); + expect(chains).toHaveLength(1); + expect(chains[0]?.sort()).toEqual(['task:A', 'task:B', 'task:C']); + }); + + it('excludes DONE tasks from waves', () => { + const tasks = makeTasks( + { id: 'task:A', status: 'DONE' }, + { id: 'task:B' }, + { id: 'task:C' }, + { id: 'task:D' }, + ); + const chains = computeAntiChains(diamondSorted, diamondEdges, tasks); + // A is DONE → excluded. B and C deps all done → wave 0. D blocked → wave 1. + expect(chains).toHaveLength(2); + expect(chains[0]?.sort()).toEqual(['task:B', 'task:C']); + expect(chains[1]).toEqual(['task:D']); + }); +}); + +// --------------------------------------------------------------------------- +// reverseReachability +// --------------------------------------------------------------------------- +describe('reverseReachability', () => { + it('returns all downstream tasks for root in diamond', () => { + const reach = reverseReachability('task:A', diamondEdges); + expect(reach.sort()).toEqual(['task:B', 'task:C', 'task:D']); + }); + + it('returns only direct dependent for leaf-adjacent node', () => { + const reach = reverseReachability('task:B', diamondEdges); + expect(reach).toEqual(['task:D']); + }); + + it('returns empty for leaf node', () => { + expect(reverseReachability('task:D', diamondEdges)).toEqual([]); + }); + + it('returns empty for unknown node', () => { + expect(reverseReachability('task:Z', diamondEdges)).toEqual([]); + }); + + it('returns empty for empty graph', () => { + expect(reverseReachability('task:A', [])).toEqual([]); + }); + + it('returns all downstream in linear chain', () => { + const reach = reverseReachability('task:A', linearEdges); + expect(reach.sort()).toEqual(['task:B', 'task:C']); + }); +}); + +// --------------------------------------------------------------------------- +// computeProvenance +// --------------------------------------------------------------------------- +describe('computeProvenance', () => { + it('traces frontier task D back to root A in diamond', () => { + const prov = computeProvenance(['task:D'], diamondEdges); + expect(prov.get('task:D')).toEqual(['task:A']); + }); + + it('traces mid-level tasks to their roots', () => { + const prov = computeProvenance(['task:B', 'task:C'], diamondEdges); + expect(prov.get('task:B')).toEqual(['task:A']); + expect(prov.get('task:C')).toEqual(['task:A']); + }); + + it('returns self as root for root tasks', () => { + const prov = computeProvenance(['task:A'], diamondEdges); + expect(prov.get('task:A')).toEqual(['task:A']); + }); + + it('returns empty map for empty input', () => { + expect(computeProvenance([], diamondEdges).size).toBe(0); + }); + + it('traces through linear chain to root', () => { + const prov = computeProvenance(['task:C'], linearEdges); + expect(prov.get('task:C')).toEqual(['task:A']); + }); + + it('handles multiple roots correctly', () => { + const edges: DepEdge[] = [ + { from: 'task:C', to: 'task:A' }, + { from: 'task:C', to: 'task:B' }, + ]; + const prov = computeProvenance(['task:C'], edges); + expect(prov.get('task:C')?.sort()).toEqual(['task:A', 'task:B']); + }); +});