From d8b115664736e3c7ee9a2e44e5737c11c2021da9 Mon Sep 17 00:00:00 2001 From: Cody Lee Date: Tue, 12 May 2026 18:09:53 -0500 Subject: [PATCH] feat(skills): add native pi extension for pup Adds `dd-pup-pi`, a pi coding-agent extension that exposes pup as first-class LLM tools (logs, metrics, traces, monitors, auth, ...). Install with `pup skills install --platform=pi`. --- docs/COMMANDS.md | 2 +- skills/extensions/dd-pup-pi/README.md | 67 +++ skills/extensions/dd-pup-pi/index.ts | 590 +++++++++++++++++++++++ skills/extensions/dd-pup-pi/package.json | 9 + src/commands/mod.rs | 1 + src/commands/skills.rs | 101 +++- src/main.rs | 73 ++- src/skills.rs | 519 ++++++++++++++++++-- 8 files changed, 1279 insertions(+), 83 deletions(-) create mode 100644 skills/extensions/dd-pup-pi/README.md create mode 100644 skills/extensions/dd-pup-pi/index.ts create mode 100644 skills/extensions/dd-pup-pi/package.json diff --git a/docs/COMMANDS.md b/docs/COMMANDS.md index 2cb23e1f..856395a0 100644 --- a/docs/COMMANDS.md +++ b/docs/COMMANDS.md @@ -77,7 +77,7 @@ pup [options] # Nested commands | code-coverage | branch-summary, commit-summary | src/commands/code_coverage.rs | ✅ | | hamr | connections (get, create) | src/commands/hamr.rs | ✅ | | fleet | agents (list, get, versions, tracers), deployments (list, get, configure, upgrade, cancel), schedules (list, get, create, update, delete, trigger), tracers (list), clusters (list), instrumented-pods (list) | src/commands/fleet.rs | ✅ | -| skills | list, install, path | src/commands/skills.rs | ✅ | +| skills | list, install, path (entry types: skill, agent, extension; `--platform`/`--user` for extensions) | src/commands/skills.rs | ✅ | | runbooks | list, describe, run, import, validate | src/commands/runbooks.rs | ✅ | | workflows | get, create, update, delete, run, instances (list, get, cancel), connections (get, create, update, delete) | src/commands/workflows.rs | ✅ | | investigations | list, get, trigger | src/commands/investigations.rs | ✅ | diff --git a/skills/extensions/dd-pup-pi/README.md b/skills/extensions/dd-pup-pi/README.md new file mode 100644 index 00000000..bc56a3a2 --- /dev/null +++ b/skills/extensions/dd-pup-pi/README.md @@ -0,0 +1,67 @@ +# dd-pup — pi extension for the Datadog `pup` CLI + +Exposes the [`pup`](https://github.com/datadog-labs/pup) Datadog CLI as +first-class pi tools, so the LLM can query telemetry and manage Datadog +resources directly. + +## Install + +```bash +# 1. install pup +brew tap datadog-labs/pack +brew install pup +pup auth login + +# 2. install the extension +# Default: project-local when run inside a git repo +# (/.pi/extensions/dd-pup-pi/), user-global otherwise +# (~/.pi/agent/extensions/dd-pup-pi/). +pup skills install --platform=pi + +# Force user-global install regardless of cwd: +pup skills install --platform=pi --user +``` + +pi auto-discovers the extension on next launch (or via `/reload`). +Override the pup binary path with `DD_PUP_BIN` if needed. + +## Tools registered for the LLM + +| Tool | Purpose | +| --- | --- | +| `pup_run` | Run **any** `pup` subcommand (escape hatch). JSON output enforced. | +| `pup_logs_search` | Search Datadog logs by query + time window. | +| `pup_logs_aggregate` | Counts / distributions / percentiles on logs. | +| `pup_metrics_query` | Time-series metric query (avg/sum/max/min/count). | +| `pup_traces_search` | APM trace search (durations are **nanoseconds**). | +| `pup_monitors_list` | List monitors with tag/name filters. | +| `pup_apm_services` | APM service list / stats per env. | +| `pup_auth_status` | Check or refresh Datadog auth. | + +All telemetry tools default to a 1h window and small limits. On a 401/403 +the extension transparently runs `pup auth refresh` once and retries. + +## Slash commands + +- `/pup ` — run pup directly and show output, no LLM round-trip. +- `/pup-auth` — quick menu: status / refresh / login / logout. + +## Status widget + +A footer line shows the Datadog site and token expiry, e.g. + +``` +pup: ✓ datadoghq.com (exp 2026-05-12 16:06:02) +``` + +## Design notes + +- `pup_run` is the workhorse — any sub-domain the focused tools don't cover + (incidents, SLOs, dashboards, downtimes, RUM, security signals, infra + hosts, on-call, …) is still one tool call away. +- JSON output is auto-injected unless the caller passes `--output` themselves, + so results are structured (and surfaced in `details.parsed`). +- Outputs are truncated to ~24 KB of text to keep context cheap. The full + parsed JSON is still attached as tool result `details`. +- Durations in APM/trace queries are documented in the tool descriptions as + **nanoseconds** so the model stops getting that wrong. diff --git a/skills/extensions/dd-pup-pi/index.ts b/skills/extensions/dd-pup-pi/index.ts new file mode 100644 index 00000000..ae696e05 --- /dev/null +++ b/skills/extensions/dd-pup-pi/index.ts @@ -0,0 +1,590 @@ +/** + * dd-pup — Datadog CLI integration for pi + * + * Exposes the `pup` CLI as first-class LLM tools so the model can: + * - query telemetry (logs, metrics, traces, RUM, events, audit logs) + * - inspect APM services, hosts, incidents, SLOs, monitors + * - manage Datadog resources (monitors, downtimes, dashboards, notebooks, ...) + * + * Features + * -------- + * - `pup_run` flexible escape-hatch: run any `pup ...` subcommand + * - `pup_logs_search` opinionated log search (JSON) + * - `pup_logs_aggregate` counts / distributions instead of fetching raw logs + * - `pup_metrics_query` time-series metrics + * - `pup_traces_search` APM trace search (durations are nanoseconds!) + * - `pup_monitors_list` list/filter monitors + * - `pup_apm_services` APM service catalog stats + * - `pup_auth_status` check / refresh auth + * + * Auto-recovery: on 401/403, the extension transparently runs + * `pup auth refresh` once and retries the command. + * + * Slash commands: + * /pup raw passthrough, prints output (no LLM round-trip) + * /pup-auth login / refresh / status helper + * + * Status widget shows authenticated site + token expiry. + */ + +import { Type, type Static } from "typebox"; +import type { + ExtensionAPI, + ExtensionContext, + ToolExecutionResult, +} from "@earendil-works/pi-coding-agent"; + +// --------------------------------------------------------------------------- +// pup runner with auto-refresh on 401/403 +// --------------------------------------------------------------------------- + +interface PupResult { + stdout: string; + stderr: string; + code: number; + killed: boolean; +} + +interface RunOptions { + signal?: AbortSignal; + timeoutMs?: number; + stdin?: string; +} + +const PUP_BIN = process.env.DD_PUP_BIN || "pup"; +const DEFAULT_TIMEOUT_MS = 90_000; +const AUTH_FAILURE_REGEX = + /\b(401|403|unauthorized|forbidden|token (?:has )?expired|please (?:re)?-?authenticate|access token)\b/i; + +async function runPup( + pi: ExtensionAPI, + args: string[], + opts: RunOptions = {}, +): Promise { + const timeout = opts.timeoutMs ?? DEFAULT_TIMEOUT_MS; + const result = await pi.exec(PUP_BIN, args, { + signal: opts.signal, + timeout, + stdin: opts.stdin, + }); + return { + stdout: result.stdout ?? "", + stderr: result.stderr ?? "", + code: result.code ?? 0, + killed: result.killed ?? false, + }; +} + +function looksLikeAuthFailure(r: PupResult): boolean { + if (r.code === 0) return false; + const blob = `${r.stdout}\n${r.stderr}`; + return AUTH_FAILURE_REGEX.test(blob); +} + +/** Run pup, and if it fails on auth, run `pup auth refresh` once then retry. */ +async function runPupWithRetry( + pi: ExtensionAPI, + args: string[], + opts: RunOptions = {}, +): Promise<{ result: PupResult; refreshed: boolean }> { + const first = await runPup(pi, args, opts); + if (!looksLikeAuthFailure(first)) return { result: first, refreshed: false }; + + // Try a silent refresh, then retry once. + const refresh = await runPup(pi, ["auth", "refresh"], { + signal: opts.signal, + timeoutMs: 15_000, + }); + if (refresh.code !== 0) { + // Refresh failed — return the original failure so the LLM sees it. + return { result: first, refreshed: false }; + } + const second = await runPup(pi, args, opts); + return { result: second, refreshed: true }; +} + +// --------------------------------------------------------------------------- +// Output shaping +// --------------------------------------------------------------------------- + +const MAX_TEXT_CHARS = 24_000; + +function truncate(s: string, n = MAX_TEXT_CHARS): string { + if (s.length <= n) return s; + return `${s.slice(0, n)}\n…[truncated ${s.length - n} chars]`; +} + +/** Try to compactly stringify JSON; fall back to raw on failure. */ +function shapeOutput(stdout: string): { text: string; parsed?: unknown } { + const trimmed = stdout.trim(); + if (!trimmed) return { text: "(empty output)" }; + try { + const parsed = JSON.parse(trimmed); + return { text: truncate(JSON.stringify(parsed, null, 2)), parsed }; + } catch { + return { text: truncate(stdout) }; + } +} + +function toResult( + args: string[], + r: PupResult, + refreshed: boolean, + extra?: Record, +): ToolExecutionResult { + const shaped = shapeOutput(r.stdout); + const ok = r.code === 0 && !r.killed; + const header = ok + ? `$ pup ${args.join(" ")}${refreshed ? " (auto-refreshed auth)" : ""}` + : `$ pup ${args.join(" ")} → exit ${r.code}${r.killed ? " (killed)" : ""}`; + + let body = shaped.text; + if (!ok && r.stderr.trim()) { + body = `${body}\n\nstderr:\n${truncate(r.stderr)}`; + } else if (ok && r.stderr.trim()) { + // pup writes some informational chatter to stderr + body = `${body}\n\n[stderr]\n${truncate(r.stderr, 2_000)}`; + } + + return { + content: [{ type: "text", text: `${header}\n\n${body}` }], + isError: !ok, + details: { + argv: args, + exitCode: r.code, + killed: r.killed, + refreshed, + parsed: shaped.parsed, + ...extra, + }, + }; +} + +// --------------------------------------------------------------------------- +// Argument-builder helpers +// --------------------------------------------------------------------------- + +function addFlag(argv: string[], flag: string, value: unknown): void { + if (value === undefined || value === null) return; + if (typeof value === "boolean") { + if (value) argv.push(flag); + return; + } + if (Array.isArray(value)) { + for (const v of value) { + if (v === undefined || v === null) continue; + argv.push(flag, String(v)); + } + return; + } + argv.push(flag, String(value)); +} + +// --------------------------------------------------------------------------- +// Tool schemas +// --------------------------------------------------------------------------- + +const RunParams = Type.Object({ + args: Type.Array(Type.String(), { + description: + "Arguments to pass to `pup`, as an array. The leading `pup` is implicit. " + + "Example: [\"monitors\", \"list\", \"--tags\", \"env:prod\", \"--limit\", \"5\"]. " + + "JSON output is enforced unless you pass --output yourself.", + minItems: 1, + }), + timeoutSeconds: Type.Optional( + Type.Number({ + description: "Override default 90s timeout. Max 600.", + minimum: 1, + maximum: 600, + }), + ), +}); +type RunInput = Static; + +const LogsSearchParams = Type.Object({ + query: Type.String({ + description: + 'Datadog log query. Examples: "status:error service:payment-api", ' + + '"@http.status_code:5*", "host:i-abc*"', + }), + from: Type.Optional( + Type.String({ + description: "Relative time window (default 1h). Examples: 15m, 1h, 24h, 7d", + }), + ), + to: Type.Optional(Type.String({ description: "End time (default: now)" })), + limit: Type.Optional( + Type.Number({ + description: "Max logs to return (default 50). Keep small.", + minimum: 1, + maximum: 1000, + }), + ), + indexes: Type.Optional( + Type.Array(Type.String(), { + description: "Restrict to specific log indexes", + }), + ), +}); +type LogsSearchInput = Static; + +const LogsAggregateParams = Type.Object({ + query: Type.String({ description: "Datadog log query filter" }), + compute: Type.String({ + description: + "Aggregation, e.g. 'count', 'avg(@duration)', 'pc95(@latency)', 'cardinality(@user.id)'", + }), + groupBy: Type.Optional( + Type.Array(Type.String(), { + description: "Group-by facets, e.g. ['service', 'status']", + }), + ), + from: Type.Optional(Type.String({ description: "Relative window (default 1h)" })), + to: Type.Optional(Type.String()), +}); +type LogsAggregateInput = Static; + +const MetricsQueryParams = Type.Object({ + query: Type.String({ + description: + "Datadog metrics query. Must include an aggregation. " + + 'Example: "avg:system.cpu.user{env:prod} by {host}"', + }), + from: Type.Optional(Type.String({ description: "Relative window, default 1h" })), + to: Type.Optional(Type.String()), +}); +type MetricsQueryInput = Static; + +const TracesSearchParams = Type.Object({ + query: Type.String({ + description: + "APM trace query. Durations are in NANOSECONDS. " + + 'Example: "service:api @duration:>1000000000 status:error"', + }), + from: Type.Optional(Type.String({ description: "Relative window, default 1h" })), + to: Type.Optional(Type.String()), + limit: Type.Optional(Type.Number({ minimum: 1, maximum: 500 })), +}); +type TracesSearchInput = Static; + +const MonitorsListParams = Type.Object({ + tags: Type.Optional( + Type.String({ + description: "Comma-separated tag filter, e.g. 'env:prod,team:platform'", + }), + ), + name: Type.Optional( + Type.String({ description: "Name substring filter" }), + ), + limit: Type.Optional(Type.Number({ minimum: 1, maximum: 1000 })), +}); +type MonitorsListInput = Static; + +const ApmServicesParams = Type.Object({ + env: Type.String({ description: "Environment, e.g. 'production'" }), + stats: Type.Optional( + Type.Boolean({ + description: "Return per-service stats instead of plain list (default false)", + }), + ), +}); +type ApmServicesInput = Static; + +const AuthStatusParams = Type.Object({ + refresh: Type.Optional( + Type.Boolean({ + description: "If true, attempts `pup auth refresh` before reporting status", + }), + ), +}); +type AuthStatusInput = Static; + +// --------------------------------------------------------------------------- +// Extension +// --------------------------------------------------------------------------- + +export default function ddPupExtension(pi: ExtensionAPI) { + let lastAuthSummary = "unknown"; + + // ---- helpers -------------------------------------------------------------- + + async function refreshAuthWidget(ctx?: ExtensionContext) { + const r = await runPup(pi, ["auth", "status"], { timeoutMs: 8_000 }); + if (r.code !== 0) { + lastAuthSummary = "unauthenticated"; + } else { + let parsed: any = null; + try { + parsed = JSON.parse(r.stdout); + } catch { + /* ignore */ + } + if (parsed?.authenticated) { + const site = parsed.site || process.env.DD_SITE || "datadoghq.com"; + const exp = parsed.expires_at + ? ` (exp ${String(parsed.expires_at).replace(/T/, " ").slice(0, 19)})` + : ""; + lastAuthSummary = `✓ ${site}${exp}`; + } else { + lastAuthSummary = "unauthenticated"; + } + } + const widget = ctx?.ui ?? (ctx as any); + if (widget?.setStatus) { + widget.setStatus("dd-pup", `pup: ${lastAuthSummary}`); + } + } + + function ensureJsonOutput(args: string[]): string[] { + // Honor explicit --output already in args. + for (const a of args) { + if (a === "--output" || a.startsWith("--output=")) return args; + } + return [...args, "--output", "json"]; + } + + // ---- tools ---------------------------------------------------------------- + + pi.registerTool({ + name: "pup_run", + label: "pup (raw)", + description: + "Run any `pup` subcommand against Datadog. The leading `pup` is implicit; " + + "pass the rest as `args`. JSON output is enforced unless you set --output. " + + "Use this for anything not covered by the focused pup_* tools (dashboards, " + + "incidents, SLOs, downtimes, RUM, security, infra, on-call, etc.). " + + "Auth is auto-refreshed on 401/403.", + promptSnippet: + "Run any `pup` Datadog CLI subcommand. Use for Datadog telemetry & resource management.", + promptGuidelines: [ + "Use pup_run for Datadog operations the focused pup_* tools don't cover (incidents, SLOs, dashboards, downtimes, RUM, security signals, infra hosts, on-call, etc.).", + "When calling pup_run, never include the leading 'pup' word; pass the subcommand and flags as an args array.", + "For pup_run telemetry queries, always pass --from to bound the time range; start narrow (1h) before widening.", + "Remember APM durations in pup queries are NANOSECONDS (1s = 1000000000). Use pup_logs_aggregate to count instead of fetching raw logs.", + ], + parameters: RunParams, + async execute(_id, params: RunInput, signal): Promise { + const argv = ensureJsonOutput(params.args.slice()); + const { result, refreshed } = await runPupWithRetry(pi, argv, { + signal, + timeoutMs: (params.timeoutSeconds ?? 90) * 1000, + }); + return toResult(argv, result, refreshed); + }, + }); + + pi.registerTool({ + name: "pup_logs_search", + label: "pup logs search", + description: + "Search Datadog logs with a query string. Returns up to `limit` log events as JSON. " + + "Prefer pup_logs_aggregate when you only need counts/distributions.", + promptSnippet: "Search Datadog logs by query + time window.", + promptGuidelines: [ + "Use pup_logs_search to retrieve example log events; use pup_logs_aggregate for counts.", + "For pup_logs_search keep --limit small (≤50) on first call; widen only if needed.", + ], + parameters: LogsSearchParams, + async execute(_id, params: LogsSearchInput, signal): Promise { + const argv = ["logs", "search", "--query", params.query]; + addFlag(argv, "--from", params.from ?? "1h"); + addFlag(argv, "--to", params.to); + addFlag(argv, "--limit", params.limit ?? 50); + addFlag(argv, "--indexes", params.indexes); + argv.push("--output", "json"); + const { result, refreshed } = await runPupWithRetry(pi, argv, { signal }); + return toResult(argv, result, refreshed); + }, + }); + + pi.registerTool({ + name: "pup_logs_aggregate", + label: "pup logs aggregate", + description: + "Aggregate Datadog logs (count, avg, percentiles, cardinality) with optional " + + "group-by. Use this instead of fetching raw logs whenever the question is " + + "'how many?', 'what's the rate?', 'top N by …?'.", + promptSnippet: "Aggregate Datadog logs (count, percentile, etc.) by facet.", + promptGuidelines: [ + "Use pup_logs_aggregate for log counts / distributions instead of fetching and counting locally.", + ], + parameters: LogsAggregateParams, + async execute(_id, params: LogsAggregateInput, signal): Promise { + const argv = [ + "logs", + "aggregate", + "--query", + params.query, + "--compute", + params.compute, + ]; + addFlag(argv, "--from", params.from ?? "1h"); + addFlag(argv, "--to", params.to); + if (params.groupBy?.length) { + for (const g of params.groupBy) argv.push("--group-by", g); + } + argv.push("--output", "json"); + const { result, refreshed } = await runPupWithRetry(pi, argv, { signal }); + return toResult(argv, result, refreshed); + }, + }); + + pi.registerTool({ + name: "pup_metrics_query", + label: "pup metrics query", + description: + "Query a Datadog metric time-series. Query MUST include an aggregation " + + "(avg, sum, max, min, count). Example: avg:system.cpu.user{env:prod} by {host}.", + promptSnippet: "Query Datadog metrics (time-series).", + promptGuidelines: [ + "For pup_metrics_query the query string must start with an aggregation (avg:, sum:, max:, min:, count:).", + ], + parameters: MetricsQueryParams, + async execute(_id, params: MetricsQueryInput, signal): Promise { + const argv = ["metrics", "query", "--query", params.query]; + addFlag(argv, "--from", params.from ?? "1h"); + addFlag(argv, "--to", params.to); + argv.push("--output", "json"); + const { result, refreshed } = await runPupWithRetry(pi, argv, { signal }); + return toResult(argv, result, refreshed); + }, + }); + + pi.registerTool({ + name: "pup_traces_search", + label: "pup traces search", + description: + "Search APM traces. WARNING: @duration is in NANOSECONDS " + + "(1s = 1000000000, 5ms = 5000000).", + promptSnippet: "Search APM traces (nanosecond durations).", + promptGuidelines: [ + "For pup_traces_search, @duration values are in NANOSECONDS — never seconds or ms.", + ], + parameters: TracesSearchParams, + async execute(_id, params: TracesSearchInput, signal): Promise { + const argv = ["traces", "search", `--query=${params.query}`]; + addFlag(argv, "--from", params.from ?? "1h"); + addFlag(argv, "--to", params.to); + addFlag(argv, "--limit", params.limit); + argv.push("--output", "json"); + const { result, refreshed } = await runPupWithRetry(pi, argv, { signal }); + return toResult(argv, result, refreshed); + }, + }); + + pi.registerTool({ + name: "pup_monitors_list", + label: "pup monitors list", + description: "List Datadog monitors with optional tag/name filters.", + promptSnippet: "List Datadog monitors with optional filters.", + parameters: MonitorsListParams, + async execute(_id, params: MonitorsListInput, signal): Promise { + const argv = ["monitors", "list"]; + addFlag(argv, "--tags", params.tags); + addFlag(argv, "--name", params.name); + addFlag(argv, "--limit", params.limit ?? 25); + argv.push("--output", "json"); + const { result, refreshed } = await runPupWithRetry(pi, argv, { signal }); + return toResult(argv, result, refreshed); + }, + }); + + pi.registerTool({ + name: "pup_apm_services", + label: "pup apm services", + description: + "List APM services in a given environment, or fetch per-service stats when stats=true.", + promptSnippet: "List APM services or per-service stats for an env.", + parameters: ApmServicesParams, + async execute(_id, params: ApmServicesInput, signal): Promise { + const argv = ["apm", "services", params.stats ? "stats" : "list", "--env", params.env]; + argv.push("--output", "json"); + const { result, refreshed } = await runPupWithRetry(pi, argv, { signal }); + return toResult(argv, result, refreshed); + }, + }); + + pi.registerTool({ + name: "pup_auth_status", + label: "pup auth status", + description: + "Check current Datadog auth status. Set refresh=true to first run `pup auth refresh`.", + promptSnippet: "Check / refresh pup (Datadog) auth.", + parameters: AuthStatusParams, + async execute(_id, params: AuthStatusInput, signal): Promise { + if (params.refresh) { + const refreshed = await runPup(pi, ["auth", "refresh"], { signal, timeoutMs: 15_000 }); + if (refreshed.code !== 0) { + return toResult(["auth", "refresh"], refreshed, false); + } + } + const status = await runPup(pi, ["auth", "status", "--output", "json"], { signal }); + // Update widget side-effect + void refreshAuthWidget(); + return toResult(["auth", "status"], status, !!params.refresh); + }, + }); + + // ---- commands ------------------------------------------------------------- + + /** /pup — quick passthrough for the human (no LLM round-trip) */ + pi.registerCommand("pup", { + description: "Run `pup ` directly and print the output", + handler: async (args, ctx) => { + const argv = args.trim().length > 0 ? args.trim().split(/\s+/) : ["--help"]; + ctx.ui.setStatus("dd-pup-run", `running: pup ${argv.join(" ")}`); + const { result } = await runPupWithRetry(pi, argv, { timeoutMs: 60_000 }); + ctx.ui.setStatus("dd-pup-run", ""); + const head = `$ pup ${argv.join(" ")} (exit ${result.code})`; + const out = (result.stdout || result.stderr || "(no output)").trim(); + ctx.ui.notify(`${head}\n\n${truncate(out, 4_000)}`, result.code === 0 ? "info" : "error"); + }, + }); + + /** /pup-auth — interactive auth helper */ + pi.registerCommand("pup-auth", { + description: "Datadog auth: status, refresh, login, logout", + handler: async (_args, ctx) => { + const choice = await ctx.ui.select("pup auth", [ + "status", + "refresh", + "login", + "logout", + ]); + if (!choice) return; + + if (choice === "login") { + ctx.ui.notify("Opening browser for `pup auth login` …", "info"); + // login is interactive; spawn but don't block the UI thread on stdin + const res = await runPup(pi, ["auth", "login"], { timeoutMs: 300_000 }); + ctx.ui.notify( + res.code === 0 ? "Login complete." : `Login failed (exit ${res.code})`, + res.code === 0 ? "success" : "error", + ); + } else { + const res = await runPup(pi, ["auth", choice], { timeoutMs: 30_000 }); + ctx.ui.notify( + truncate((res.stdout || res.stderr || "").trim() || `(exit ${res.code})`, 2_000), + res.code === 0 ? "info" : "error", + ); + } + await refreshAuthWidget(ctx); + }, + }); + + // ---- lifecycle ------------------------------------------------------------ + + pi.on("session_start", async (_event, ctx) => { + // Verify pup is installed + const probe = await runPup(pi, ["--version"], { timeoutMs: 5_000 }); + if (probe.code !== 0) { + ctx.ui.setStatus( + "dd-pup", + `pup: NOT FOUND (install: brew tap datadog-labs/pack && brew install pup)`, + ); + return; + } + await refreshAuthWidget(ctx); + }); +} diff --git a/skills/extensions/dd-pup-pi/package.json b/skills/extensions/dd-pup-pi/package.json new file mode 100644 index 00000000..f551803f --- /dev/null +++ b/skills/extensions/dd-pup-pi/package.json @@ -0,0 +1,9 @@ +{ + "name": "dd-pup", + "version": "0.1.0", + "description": "pi extension exposing the Datadog pup CLI as LLM tools", + "private": true, + "pi": { + "extensions": ["./index.ts"] + } +} diff --git a/src/commands/mod.rs b/src/commands/mod.rs index d159d01f..f27fcad1 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -69,6 +69,7 @@ pub mod scorecards; pub mod seats; pub mod security; pub mod service_catalog; +#[cfg(not(target_arch = "wasm32"))] pub mod skills; pub mod slos; pub mod software_catalog; diff --git a/src/commands/skills.rs b/src/commands/skills.rs index d0c23676..60a3f582 100644 --- a/src/commands/skills.rs +++ b/src/commands/skills.rs @@ -14,11 +14,21 @@ pub fn list(cfg: &crate::config::Config, entry_type: Option) -> Result<( let items: Vec = entries .iter() .map(|e| { - serde_json::json!({ + let mut v = serde_json::json!({ "name": e.name, "type": e.entry_type, "description": e.description, - }) + }); + if e.entry_type == "extension" { + if let Some(obj) = v.as_object_mut() { + obj.insert("platform".to_string(), serde_json::json!(e.platform)); + obj.insert( + "files".to_string(), + serde_json::json!(e.files.iter().map(|(r, _)| *r).collect::>()), + ); + } + } + v }) .collect(); @@ -26,16 +36,24 @@ pub fn list(cfg: &crate::config::Config, entry_type: Option) -> Result<( Ok(()) } +#[allow(clippy::too_many_arguments)] pub fn install( cfg: &crate::config::Config, name: Option, target_agent: Option, dir: Option, entry_type: Option, + platform: Option, + user_scope: bool, ) -> Result<()> { - let project_root = - skills::find_project_root().unwrap_or_else(|| std::env::current_dir().unwrap_or_default()); + let (project_root, in_project) = skills::project_root_or_cwd(); let agent = skills::resolve_agent(target_agent.as_deref()); + let platform_slug = skills::resolve_platform(platform.as_deref(), &agent); + + // Default scope for extensions: + // - explicit --user wins + // - else: project-local if a project root (.git) was found, else user-global + let extensions_user_scope = user_scope || !in_project; let entries: Vec<_> = skills::SKILLS .iter() @@ -47,6 +65,19 @@ pub fn install( Some(t) => e.entry_type == t.as_str(), None => true, }) + // When --platform is set, scope extensions to that platform. + // Skills and agents are unaffected by --platform. + .filter(|e| { + if e.entry_type != "extension" { + return true; + } + if platform_slug.is_empty() { + // No platform context — only install extensions when the user + // asked for them explicitly (by --type=extension or by name). + return entry_type.as_deref() == Some("extension") || name.is_some(); + } + e.platform == platform_slug + }) .collect(); if let Some(ref n) = name { @@ -55,23 +86,34 @@ pub fn install( } } - let mut installed = 0; + let mut installed_files = 0usize; + let mut installed_entries = 0usize; let mut dirs_used = std::collections::BTreeSet::new(); for entry in &entries { - let (path, fmt) = skills::install_path(entry, &agent, &project_root, dir.as_deref()); - if let Some(parent) = path.parent() { - std::fs::create_dir_all(parent)?; - dirs_used.insert(parent.display().to_string()); + let targets = skills::install_paths( + entry, + &agent, + &platform_slug, + &project_root, + dir.as_deref(), + extensions_user_scope, + )?; + installed_entries += 1; + for (path, content) in targets { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + dirs_used.insert(parent.display().to_string()); + } + std::fs::write(&path, &content)?; + installed_files += 1; } - let content = skills::format_content(entry, &fmt); - std::fs::write(&path, &content)?; - installed += 1; } if cfg.agent_mode { let directories: Vec<_> = dirs_used.into_iter().collect(); let result = serde_json::json!({ - "installed": installed, + "installed": installed_entries, + "files": installed_files, "directories": directories, }); crate::formatter::format_and_print(&result, &cfg.output_format, cfg.agent_mode, None)?; @@ -79,23 +121,42 @@ pub fn install( for d in &dirs_used { println!(" {d}"); } - println!("Installed {} skill(s) and agent(s)", installed); + println!( + "Installed {} entry(ies), {} file(s)", + installed_entries, installed_files + ); } Ok(()) } -pub fn path(target_agent: Option) -> Result<()> { - let project_root = - skills::find_project_root().unwrap_or_else(|| std::env::current_dir().unwrap_or_default()); +pub fn path( + target_agent: Option, + platform: Option, + user_scope: bool, +) -> Result<()> { + let (project_root, in_project) = skills::project_root_or_cwd(); let agent = skills::resolve_agent(target_agent.as_deref()); let sd = skills::skills_dir(&agent, &project_root); let ad = skills::agents_dir(&agent, &project_root); if sd == ad { - println!("{}", sd.display()); + println!("skills: {}", sd.display()); } else { - println!("skills: {}", sd.display()); - println!("agents: {}", ad.display()); + println!("skills: {}", sd.display()); + println!("agents: {}", ad.display()); + } + + let platform_slug = skills::resolve_platform(platform.as_deref(), &agent); + if !platform_slug.is_empty() { + let scope = user_scope || !in_project; + if let Some(ed) = skills::extensions_dir(&platform_slug, &project_root, scope) { + println!( + "extensions: {} (platform: {}, scope: {})", + ed.display(), + platform_slug, + if scope { "user" } else { "project" }, + ); + } } Ok(()) } diff --git a/src/main.rs b/src/main.rs index 3ab8a2b1..d4bae220 100644 --- a/src/main.rs +++ b/src/main.rs @@ -10,6 +10,7 @@ mod extensions; mod formatter; #[cfg(not(target_arch = "wasm32"))] mod runbooks; +#[cfg(not(target_arch = "wasm32"))] mod skills; #[cfg(not(target_arch = "wasm32"))] mod tunnel; @@ -2311,15 +2312,27 @@ enum Commands { #[command(subcommand)] action: ServiceCatalogActions, }, - /// Manage agent skills for AI coding assistants + /// Manage agent skills, subagents, and extensions for AI coding assistants + /// + /// Install structured workflow guides, domain references, specialized + /// agents, and platform extensions that teach AI coding assistants how to + /// compose pup commands. /// - /// Install structured workflow guides, domain references, and specialized - /// agents that teach AI coding assistants how to compose pup commands. + /// ENTRY TYPES: + /// skill Single-file markdown guide installed under the agent's skills dir + /// agent Domain subagent (Claude Code subagent format or SKILL.md fallback) + /// extension Multi-file bundle for a coding-agent platform (e.g. pi) + /// + /// EXTENSION SCOPE: + /// By default extensions install project-local when run inside a git + /// repository (e.g. /.pi/extensions//), and user-global + /// otherwise (e.g. ~/.pi/agent/extensions//). Pass --user to force + /// user-global. Skills and agents always install project-local. /// /// COMMANDS: - /// list List available skills and agents - /// install Install skills for the detected AI coding assistant - /// path Show where skills would be installed + /// list List available skills, agents, and extensions + /// install Install entries for the detected AI coding assistant + /// path Show where entries would be installed /// /// EXAMPLES: /// pup skills list @@ -2327,7 +2340,11 @@ enum Commands { /// pup skills install dd-pup /// pup skills install --type=agent /// pup skills install --target-agent=cursor + /// pup skills install --platform=pi + /// pup skills install --platform=pi --user /// pup skills path + /// pup skills path --platform=pi + #[cfg(not(target_arch = "wasm32"))] #[command(verbatim_doc_comment)] Skills { #[command(subcommand)] @@ -8799,17 +8816,18 @@ enum AliasActions { } // ---- Skills ---- +#[cfg(not(target_arch = "wasm32"))] #[derive(Subcommand)] enum SkillsActions { - /// List available skills and agents + /// List available skills, agents, and extensions List { - /// Filter by type: skill, agent + /// Filter by type: skill, agent, extension #[arg(long = "type", name = "type")] entry_type: Option, }, /// Install skills for the detected AI coding assistant Install { - /// Install a specific skill or agent by name + /// Install a specific skill, agent, or extension by name name: Option, /// Override detected AI agent (claude-code, cursor, codex, windsurf, gemini-code) #[arg(long = "target-agent")] @@ -8817,15 +8835,29 @@ enum SkillsActions { /// Override install directory #[arg(long)] dir: Option, - /// Filter by type: skill, agent + /// Filter by type: skill, agent, extension #[arg(long = "type", name = "type")] entry_type: Option, + /// Extension platform (e.g. pi). Required for extension installs unless + /// the detected agent maps to a platform. + #[arg(long)] + platform: Option, + /// Install extensions to the user-global directory (e.g. ~/.pi/agent/extensions) + /// instead of the project-local one. Has no effect on skills/agents. + #[arg(long = "user")] + user_scope: bool, }, - /// Show where skills would be installed + /// Show where skills/agents/extensions would be installed Path { /// Override detected AI agent #[arg(long = "target-agent")] target_agent: Option, + /// Extension platform (e.g. pi) + #[arg(long)] + platform: Option, + /// Show user-global extension path instead of project-local + #[arg(long = "user")] + user_scope: bool, }, } @@ -13732,6 +13764,7 @@ async fn main_inner() -> anyhow::Result<()> { .await?; } // --- Skills --- + #[cfg(not(target_arch = "wasm32"))] Commands::Skills { action } => match action { SkillsActions::List { entry_type } => commands::skills::list(&cfg, entry_type)?, SkillsActions::Install { @@ -13739,8 +13772,22 @@ async fn main_inner() -> anyhow::Result<()> { target_agent, dir, entry_type, - } => commands::skills::install(&cfg, name, target_agent, dir, entry_type)?, - SkillsActions::Path { target_agent } => commands::skills::path(target_agent)?, + platform, + user_scope, + } => commands::skills::install( + &cfg, + name, + target_agent, + dir, + entry_type, + platform, + user_scope, + )?, + SkillsActions::Path { + target_agent, + platform, + user_scope, + } => commands::skills::path(target_agent, platform, user_scope)?, }, // --- Product Analytics --- Commands::ProductAnalytics { action } => { diff --git a/src/skills.rs b/src/skills.rs index e8411a42..cfc395e6 100644 --- a/src/skills.rs +++ b/src/skills.rs @@ -3,10 +3,37 @@ use std::path::{Path, PathBuf}; pub struct SkillEntry { pub name: &'static str, pub description: &'static str, - pub entry_type: &'static str, // "skill" or "agent" + /// One of: "skill", "agent", "extension". + /// - skill / agent: single-file markdown installed under a skills/ dir + /// - extension: multi-file bundle for an AI coding agent platform (e.g. pi) + pub entry_type: &'static str, + /// SKILL.md / agent.md body, or empty for entry_type == "extension". pub content: &'static str, + /// Platform slug for entry_type == "extension". One of: "pi". + /// Empty for skills and agents. + pub platform: &'static str, + /// Files to materialize for entry_type == "extension". + /// Each tuple is `(relative_path_within_extension_dir, file_contents)`. + /// Empty for skills and agents. + pub files: &'static [(&'static str, &'static str)], } +/// Files for the `dd-pup-pi` extension bundle (pi coding agent). +static DD_PUP_PI_FILES: &[(&str, &str)] = &[ + ( + "index.ts", + include_str!("../skills/extensions/dd-pup-pi/index.ts"), + ), + ( + "package.json", + include_str!("../skills/extensions/dd-pup-pi/package.json"), + ), + ( + "README.md", + include_str!("../skills/extensions/dd-pup-pi/README.md"), + ), +]; + pub static SKILLS: &[SkillEntry] = &[ // --- Skills (from agent-skills + claude-plugin) --- SkillEntry { @@ -14,54 +41,72 @@ pub static SKILLS: &[SkillEntry] = &[ description: "Datadog CLI (pup). OAuth2 auth with token refresh.", entry_type: "skill", content: include_str!("../skills/dd-pup/SKILL.md"), + platform: "", + files: &[], }, SkillEntry { name: "dd-monitors", description: "Monitor management - create, update, mute, and alerting best practices.", entry_type: "skill", content: include_str!("../skills/dd-monitors/SKILL.md"), + platform: "", + files: &[], }, SkillEntry { name: "dd-logs", description: "Log management - search, pipelines, archives, and cost control.", entry_type: "skill", content: include_str!("../skills/dd-logs/SKILL.md"), + platform: "", + files: &[], }, SkillEntry { name: "dd-apm", description: "APM - traces, services, dependencies, performance analysis.", entry_type: "skill", content: include_str!("../skills/dd-apm/SKILL.md"), + platform: "", + files: &[], }, SkillEntry { name: "dd-debugger", description: "Live Debugger - create, delete, and watch log probes and events.", entry_type: "skill", content: include_str!("../skills/dd-debugger/SKILL.md"), + platform: "", + files: &[], }, SkillEntry { name: "dd-docs", description: "Datadog docs lookup using docs.datadoghq.com/llms.txt.", entry_type: "skill", content: include_str!("../skills/dd-docs/SKILL.md"), + platform: "", + files: &[], }, SkillEntry { name: "dd-code-generation", description: "Use pup CLI or generate code (TypeScript, Python, Java, Go, Rust).", entry_type: "skill", content: include_str!("../skills/dd-code-generation/SKILL.md"), + platform: "", + files: &[], }, SkillEntry { name: "dd-file-issue", description: "File GitHub issues to the right repository (pup CLI or plugin).", entry_type: "skill", content: include_str!("../skills/dd-file-issue/SKILL.md"), + platform: "", + files: &[], }, SkillEntry { name: "dd-symdb", description: "Symbol Database - search service symbols, find probe-able methods.", entry_type: "skill", content: include_str!("../skills/dd-symdb/SKILL.md"), + platform: "", + files: &[], }, // --- Domain Agents (from datadog-api-claude-plugin) --- SkillEntry { @@ -69,288 +114,393 @@ pub static SKILLS: &[SkillEntry] = &[ description: "Manage Datadog Agentless Scanning for AWS and Azure resources.", entry_type: "agent", content: include_str!("../agents/agentless-scanning.md"), + platform: "", + files: &[], }, SkillEntry { name: "api-management", description: "Manage API keys and Application keys for authentication.", entry_type: "agent", content: include_str!("../agents/api-management.md"), + platform: "", + files: &[], }, SkillEntry { name: "apm-configuration", description: "Manage APM retention filters and span-based metrics.", entry_type: "agent", content: include_str!("../agents/apm-configuration.md"), + platform: "", + files: &[], }, SkillEntry { name: "app-builder", description: "Manage App Builder applications (low-code internal tools).", entry_type: "agent", content: include_str!("../agents/app-builder.md"), + platform: "", + files: &[], }, SkillEntry { name: "application-security", description: "Manage ASM including WAF rules, threat detection, API protection.", entry_type: "agent", content: include_str!("../agents/application-security.md"), + platform: "", + files: &[], }, SkillEntry { name: "audience-management", description: "Query and segment RUM users and accounts.", entry_type: "agent", content: include_str!("../agents/audience-management.md"), + platform: "", + files: &[], }, SkillEntry { name: "audit-logs", description: "Query and manage Audit Trail events for compliance.", entry_type: "agent", content: include_str!("../agents/audit-logs.md"), + platform: "", + files: &[], }, SkillEntry { name: "aws-integration", description: "Configure AWS integration for monitoring and log collection.", entry_type: "agent", content: include_str!("../agents/aws-integration.md"), + platform: "", + files: &[], }, SkillEntry { name: "azure-integration", description: "Configure Azure integration for monitoring and resources.", entry_type: "agent", content: include_str!("../agents/azure-integration.md"), + platform: "", + files: &[], }, SkillEntry { name: "cicd", description: "Manage CI/CD Visibility including tests, pipelines, DORA metrics.", entry_type: "agent", content: include_str!("../agents/cicd.md"), + platform: "", + files: &[], }, SkillEntry { name: "cloud-cost", description: "Manage Cloud Cost Management including multi-cloud config.", entry_type: "agent", content: include_str!("../agents/cloud-cost.md"), + platform: "", + files: &[], }, SkillEntry { name: "cloud-workload-security", description: "Manage CSM Threats and Workload Protection agent rules.", entry_type: "agent", content: include_str!("../agents/cloud-workload-security.md"), + platform: "", + files: &[], }, SkillEntry { name: "container-monitoring", description: "Monitor Kubernetes and containerized environments.", entry_type: "agent", content: include_str!("../agents/container-monitoring.md"), + platform: "", + files: &[], }, SkillEntry { name: "dashboards", description: "Manage dashboards including CRUD and widgets.", entry_type: "agent", content: include_str!("../agents/dashboards.md"), + platform: "", + files: &[], }, SkillEntry { name: "data-deletion", description: "GDPR/data privacy compliance through targeted deletion.", entry_type: "agent", content: include_str!("../agents/data-deletion.md"), + platform: "", + files: &[], }, SkillEntry { name: "data-governance", description: "Access control, data enrichment, data protection.", entry_type: "agent", content: include_str!("../agents/data-governance.md"), + platform: "", + files: &[], }, SkillEntry { name: "database-monitoring", description: "Query and manage DBM data and monitors.", entry_type: "agent", content: include_str!("../agents/database-monitoring.md"), + platform: "", + files: &[], }, SkillEntry { name: "error-tracking", description: "Manage error tracking issues, triage, and assignment.", entry_type: "agent", content: include_str!("../agents/error-tracking.md"), + platform: "", + files: &[], }, SkillEntry { name: "events", description: "Manage events including submission, search, filtering.", entry_type: "agent", content: include_str!("../agents/events.md"), + platform: "", + files: &[], }, SkillEntry { name: "fleet-automation", description: "Manage Agent fleet, deployments, upgrades, schedules.", entry_type: "agent", content: include_str!("../agents/fleet-automation.md"), + platform: "", + files: &[], }, SkillEntry { name: "gcp-integration", description: "Configure GCP integration for monitoring and resources.", entry_type: "agent", content: include_str!("../agents/gcp-integration.md"), + platform: "", + files: &[], }, SkillEntry { name: "incident-response", description: "Manage incident lifecycle, teams, and response.", entry_type: "agent", content: include_str!("../agents/incident-response.md"), + platform: "", + files: &[], }, SkillEntry { name: "infrastructure", description: "Query infrastructure hosts, counts, and metadata.", entry_type: "agent", content: include_str!("../agents/infrastructure.md"), + platform: "", + files: &[], }, SkillEntry { name: "log-configuration", description: "Manage log archives, pipelines, indexes, custom destinations.", entry_type: "agent", content: include_str!("../agents/log-configuration.md"), + platform: "", + files: &[], }, SkillEntry { name: "logs", description: "Search and analyze log data with flexible queries.", entry_type: "agent", content: include_str!("../agents/logs.md"), + platform: "", + files: &[], }, SkillEntry { name: "metrics", description: "Query, list, and manage metrics.", entry_type: "agent", content: include_str!("../agents/metrics.md"), + platform: "", + files: &[], }, SkillEntry { name: "monitoring-alerting", description: "Full monitor management, downtimes, and templates.", entry_type: "agent", content: include_str!("../agents/monitoring-alerting.md"), + platform: "", + files: &[], }, SkillEntry { name: "network-performance", description: "Network Performance Monitoring and DNS monitoring.", entry_type: "agent", content: include_str!("../agents/network-performance.md"), + platform: "", + files: &[], }, SkillEntry { name: "notebooks", description: "Manage investigation notebooks.", entry_type: "agent", content: include_str!("../agents/notebooks.md"), + platform: "", + files: &[], }, SkillEntry { name: "observability-pipelines", description: "Manage Observability Pipelines for data routing.", entry_type: "agent", content: include_str!("../agents/observability-pipelines.md"), + platform: "", + files: &[], }, SkillEntry { name: "organization-management", description: "Manage organization settings, teams, and users.", entry_type: "agent", content: include_str!("../agents/organization-management.md"), + platform: "", + files: &[], }, SkillEntry { name: "powerpacks", description: "Manage reusable dashboard widget groups.", entry_type: "agent", content: include_str!("../agents/powerpacks.md"), + platform: "", + files: &[], }, SkillEntry { name: "rum-metrics-retention", description: "Manage RUM metrics and retention filters.", entry_type: "agent", content: include_str!("../agents/rum-metrics-retention.md"), + platform: "", + files: &[], }, SkillEntry { name: "rum", description: "Query Real User Monitoring data.", entry_type: "agent", content: include_str!("../agents/rum.md"), + platform: "", + files: &[], }, SkillEntry { name: "saml-configuration", description: "Manage SAML SSO configuration.", entry_type: "agent", content: include_str!("../agents/saml-configuration.md"), + platform: "", + files: &[], }, SkillEntry { name: "scorecards", description: "Manage service quality scorecards.", entry_type: "agent", content: include_str!("../agents/scorecards.md"), + platform: "", + files: &[], }, SkillEntry { name: "security-posture-management", description: "Manage CSPM findings and compliance.", entry_type: "agent", content: include_str!("../agents/security-posture-management.md"), + platform: "", + files: &[], }, SkillEntry { name: "security", description: "Security monitoring signals and rules.", entry_type: "agent", content: include_str!("../agents/security.md"), + platform: "", + files: &[], }, SkillEntry { name: "service-catalog", description: "Manage service registry and metadata.", entry_type: "agent", content: include_str!("../agents/service-catalog.md"), + platform: "", + files: &[], }, SkillEntry { name: "slos", description: "Manage Service Level Objectives.", entry_type: "agent", content: include_str!("../agents/slos.md"), + platform: "", + files: &[], }, SkillEntry { name: "spark-pod-autosizing", description: "Manage Spark pod autosizing for Kubernetes.", entry_type: "agent", content: include_str!("../agents/spark-pod-autosizing.md"), + platform: "", + files: &[], }, SkillEntry { name: "static-analysis", description: "Manage static code analysis.", entry_type: "agent", content: include_str!("../agents/static-analysis.md"), + platform: "", + files: &[], }, SkillEntry { name: "synthetics", description: "Manage synthetic monitoring tests.", entry_type: "agent", content: include_str!("../agents/synthetics.md"), + platform: "", + files: &[], }, SkillEntry { name: "third-party-integrations", description: "Manage third-party integrations (PagerDuty, Slack, etc.).", entry_type: "agent", content: include_str!("../agents/third-party-integrations.md"), + platform: "", + files: &[], }, SkillEntry { name: "traces", description: "Query APM traces and spans.", entry_type: "agent", content: include_str!("../agents/traces.md"), + platform: "", + files: &[], }, SkillEntry { name: "usage-metering", description: "Track Datadog usage and billing.", entry_type: "agent", content: include_str!("../agents/usage-metering.md"), + platform: "", + files: &[], }, SkillEntry { name: "user-access-management", description: "Manage users, roles, teams, and permissions.", entry_type: "agent", content: include_str!("../agents/user-access-management.md"), + platform: "", + files: &[], }, SkillEntry { name: "workflows", description: "Manage workflow automations.", entry_type: "agent", content: include_str!("../agents/workflows.md"), + platform: "", + files: &[], + }, + // --- Extensions (multi-file bundles for AI coding agent platforms) --- + SkillEntry { + name: "dd-pup-pi", + description: "pi coding agent extension: exposes pup as LLM tools (logs, metrics, traces, monitors, ...).", + entry_type: "extension", + content: "", + platform: "pi", + files: DD_PUP_PI_FILES, }, ]; @@ -361,6 +511,64 @@ pub fn resolve_agent(agent: Option<&str>) -> String { .unwrap_or_else(|| crate::useragent::detect_agent_info().name) } +/// Resolve the extension platform slug. +/// +/// Precedence: +/// 1. explicit `--platform` flag value +/// 2. mapping from the detected/resolved agent name (e.g. `pi-dev` -> `pi`) +/// 3. empty string (caller must decide how to handle) +/// +/// Supported platforms today: `pi`. +pub fn resolve_platform(platform: Option<&str>, agent: &str) -> String { + if let Some(p) = platform { + if !p.is_empty() { + return p.to_string(); + } + } + match agent { + "pi-dev" | "pi" => "pi".to_string(), + _ => String::new(), + } +} + +/// Determine the install directory for an extension on a given platform. +/// +/// When `user_scope` is true, returns the per-user global location +/// (`~/.pi/agent/extensions` for pi). Otherwise returns the project-local +/// location (`/.pi/extensions` for pi). +/// +/// Returns `None` for unsupported platforms, or in user-scope mode when the +/// home directory cannot be resolved (HOME/USERPROFILE unset). The caller is +/// expected to surface this as an error — see [`install_paths`]. +pub fn extensions_dir(platform: &str, project_root: &Path, user_scope: bool) -> Option { + extensions_dir_with_home( + platform, + dirs::home_dir().as_deref(), + project_root, + user_scope, + ) +} + +/// Same as [`extensions_dir`] but takes an explicit `home` directory, so tests +/// don't have to mutate the process-global `HOME` env var. +pub fn extensions_dir_with_home( + platform: &str, + home: Option<&Path>, + project_root: &Path, + user_scope: bool, +) -> Option { + match platform { + "pi" => { + if user_scope { + Some(home?.join(".pi").join("agent").join("extensions")) + } else { + Some(project_root.join(".pi").join("extensions")) + } + } + _ => None, + } +} + /// Determine the skills install directory for the given agent. /// If an existing skills directory is found, use it regardless of detected agent. pub fn skills_dir(agent: &str, project_root: &Path) -> PathBuf { @@ -397,16 +605,24 @@ pub fn agents_dir(agent: &str, project_root: &Path) -> PathBuf { } } -/// Determine the install path for a single entry. +/// Determine the install path for a single (single-file) entry. +/// /// Skills always go to `//SKILL.md`. /// Agents go to `/.md` for Claude Code (subagent format), /// or `//SKILL.md` for other tools. +/// +/// Panics if called for an `extension` entry; use [`install_paths`] for those. pub fn install_path( entry: &SkillEntry, agent: &str, project_root: &Path, dir_override: Option<&str>, ) -> (PathBuf, InstallFormat) { + debug_assert_ne!( + entry.entry_type, "extension", + "install_path() does not handle extensions; use install_paths()" + ); + if let Some(d) = dir_override { // Explicit --dir: everything as SKILL.md return ( @@ -432,6 +648,51 @@ pub fn install_path( } } +/// Resolve install destinations for any entry, including multi-file extensions. +/// +/// Returns a list of `(absolute_path, contents)` tuples. For skills and agents +/// this is always a single-element list using [`install_path`] + [`format_content`]. +/// For extensions this expands to one entry per bundled file. +pub fn install_paths( + entry: &SkillEntry, + agent: &str, + platform: &str, + project_root: &Path, + dir_override: Option<&str>, + user_scope: bool, +) -> anyhow::Result> { + if entry.entry_type != "extension" { + let (path, fmt) = install_path(entry, agent, project_root, dir_override); + return Ok(vec![(path, format_content(entry, &fmt))]); + } + + // entry_type == "extension": materialize each bundled file under the + // platform-appropriate extension directory. + let base = if let Some(d) = dir_override { + PathBuf::from(d).join(entry.name) + } else { + let plat = if platform.is_empty() { + entry.platform + } else { + platform + }; + let root = extensions_dir(plat, project_root, user_scope).ok_or_else(|| { + anyhow::anyhow!( + "unknown or unsupported extension platform: '{}' (entry: {})", + plat, + entry.name, + ) + })?; + root.join(entry.name) + }; + + Ok(entry + .files + .iter() + .map(|(rel, body)| (base.join(rel), (*body).to_string())) + .collect()) +} + #[derive(Debug, PartialEq)] pub enum InstallFormat { SkillMd, @@ -492,6 +753,18 @@ pub fn format_content(entry: &SkillEntry, format: &InstallFormat) -> String { } } +/// Find the project root (nearest ancestor containing `.git`) and fall back to +/// the current working directory if none is found. Returns `(root, found)` +/// where `found` is true iff a project root was actually located — callers can +/// use that to decide between project-local and user-global defaults without +/// re-walking the tree. +pub fn project_root_or_cwd() -> (PathBuf, bool) { + match find_project_root() { + Some(p) => (p, true), + None => (std::env::current_dir().unwrap_or_default(), false), + } +} + /// Find the project root by walking up from cwd looking for .git. pub fn find_project_root() -> Option { let mut dir = std::env::current_dir().ok()?; @@ -541,7 +814,7 @@ mod tests { fn test_all_entries_have_valid_type() { for entry in SKILLS { assert!( - entry.entry_type == "skill" || entry.entry_type == "agent", + matches!(entry.entry_type, "skill" | "agent" | "extension"), "invalid type '{}' for {}", entry.entry_type, entry.name @@ -550,13 +823,35 @@ mod tests { } #[test] - fn test_all_entries_have_content() { + fn test_all_entries_have_content_or_files() { for entry in SKILLS { - assert!( - !entry.content.is_empty(), - "empty content for {}", - entry.name - ); + if entry.entry_type == "extension" { + assert!( + !entry.files.is_empty(), + "extension {} has no files", + entry.name + ); + assert!( + !entry.platform.is_empty(), + "extension {} has empty platform", + entry.name + ); + for (rel, body) in entry.files { + assert!(!rel.is_empty(), "empty file path in {}", entry.name); + assert!( + !body.is_empty(), + "empty file body for {}:{}", + entry.name, + rel + ); + } + } else { + assert!( + !entry.content.is_empty(), + "empty content for {}", + entry.name + ); + } } } @@ -645,16 +940,22 @@ mod tests { assert_eq!(agents_dir("cursor", &root), root.join(".cursor/skills")); } + fn entry(name: &'static str, entry_type: &'static str, content: &'static str) -> SkillEntry { + SkillEntry { + name, + description: "test", + entry_type, + content, + platform: "", + files: &[], + } + } + #[test] fn test_install_path_skill_claude_code() { let root = PathBuf::from("/tmp/test-project"); - let entry = SkillEntry { - name: "dd-pup", - description: "test", - entry_type: "skill", - content: "", - }; - let (path, fmt) = install_path(&entry, "claude-code", &root, None); + let e = entry("dd-pup", "skill", ""); + let (path, fmt) = install_path(&e, "claude-code", &root, None); assert_eq!(path, root.join(".claude/skills/dd-pup/SKILL.md")); assert_eq!(fmt, InstallFormat::SkillMd); } @@ -662,13 +963,8 @@ mod tests { #[test] fn test_install_path_agent_claude_code() { let root = PathBuf::from("/tmp/test-project"); - let entry = SkillEntry { - name: "logs", - description: "test", - entry_type: "agent", - content: "", - }; - let (path, fmt) = install_path(&entry, "claude-code", &root, None); + let e = entry("logs", "agent", ""); + let (path, fmt) = install_path(&e, "claude-code", &root, None); assert_eq!(path, root.join(".claude/agents/logs.md")); assert_eq!(fmt, InstallFormat::AgentMd); } @@ -676,13 +972,8 @@ mod tests { #[test] fn test_install_path_agent_cursor_as_skill() { let root = PathBuf::from("/tmp/test-project"); - let entry = SkillEntry { - name: "logs", - description: "test", - entry_type: "agent", - content: "", - }; - let (path, fmt) = install_path(&entry, "cursor", &root, None); + let e = entry("logs", "agent", ""); + let (path, fmt) = install_path(&e, "cursor", &root, None); assert_eq!(path, root.join(".cursor/skills/logs/SKILL.md")); assert_eq!(fmt, InstallFormat::SkillMd); } @@ -690,51 +981,181 @@ mod tests { #[test] fn test_install_path_dir_override() { let root = PathBuf::from("/tmp/test-project"); - let entry = SkillEntry { - name: "logs", - description: "test", - entry_type: "agent", - content: "", - }; - let (path, fmt) = install_path(&entry, "claude-code", &root, Some("/tmp/out")); + let e = entry("logs", "agent", ""); + let (path, fmt) = install_path(&e, "claude-code", &root, Some("/tmp/out")); assert_eq!(path, PathBuf::from("/tmp/out/logs/SKILL.md")); assert_eq!(fmt, InstallFormat::SkillMd); } #[test] fn test_format_as_skill_md_adds_name() { - let entry = SkillEntry { - name: "test-agent", + let e = SkillEntry { description: "Test agent", - entry_type: "agent", content: "---\ndescription: Test agent\n---\n\n# Test\n", + ..entry("test-agent", "agent", "") }; - let result = format_as_skill_md(&entry); + let result = format_as_skill_md(&e); assert!(result.contains("name: test-agent")); assert!(result.contains("description: Test agent")); } #[test] fn test_format_preserves_existing_name() { - let entry = SkillEntry { - name: "test-skill", + let e = SkillEntry { description: "Test skill", - entry_type: "skill", content: "---\nname: test-skill\ndescription: Test skill\n---\n\n# Test\n", + ..entry("test-skill", "skill", "") }; - assert_eq!(format_as_skill_md(&entry), entry.content); + assert_eq!(format_as_skill_md(&e), e.content); } #[test] fn test_format_no_frontmatter() { - let entry = SkillEntry { - name: "bare", + let e = SkillEntry { description: "Bare content", - entry_type: "agent", content: "# No Frontmatter\n\nJust content.\n", + ..entry("bare", "agent", "") }; - let result = format_as_skill_md(&entry); + let result = format_as_skill_md(&e); assert!(result.starts_with("---\nname: bare\n")); assert!(result.contains("# No Frontmatter")); } + + // ---- extension helpers --------------------------------------------------- + + #[test] + fn test_resolve_platform_explicit_wins() { + assert_eq!(resolve_platform(Some("pi"), "claude-code"), "pi"); + assert_eq!(resolve_platform(Some("pi"), ""), "pi"); + } + + #[test] + fn test_resolve_platform_from_agent() { + assert_eq!(resolve_platform(None, "pi"), "pi"); + assert_eq!(resolve_platform(None, "pi-dev"), "pi"); + } + + #[test] + fn test_resolve_platform_empty_for_unknown() { + assert_eq!(resolve_platform(None, "claude-code"), ""); + assert_eq!(resolve_platform(Some(""), "claude-code"), ""); + } + + #[test] + fn test_extensions_dir_pi_project_scope() { + let root = PathBuf::from("/tmp/proj"); + assert_eq!( + extensions_dir("pi", &root, false), + Some(root.join(".pi/extensions")) + ); + } + + #[test] + fn test_extensions_dir_pi_user_scope_with_home() { + // Use the injectable helper so we don't mutate the process-global HOME + // env var (set_var is `unsafe` and races with parallel test threads). + let home = PathBuf::from("/tmp/fake-home"); + assert_eq!( + extensions_dir_with_home("pi", Some(&home), &PathBuf::from("/unused"), true), + Some(PathBuf::from("/tmp/fake-home/.pi/agent/extensions")) + ); + } + + #[test] + fn test_extensions_dir_pi_user_scope_without_home_returns_none() { + assert_eq!( + extensions_dir_with_home("pi", None, &PathBuf::from("/unused"), true), + None, + ); + } + + #[test] + fn test_extensions_dir_unknown_platform_returns_none() { + let root = PathBuf::from("/tmp/proj"); + assert_eq!(extensions_dir("unknown", &root, false), None); + assert_eq!(extensions_dir("", &root, false), None); + } + + #[test] + fn test_install_paths_skill_single_file() { + let root = PathBuf::from("/tmp/proj"); + let e = entry("dd-pup", "skill", "body"); + let paths = install_paths(&e, "claude-code", "", &root, None, false).unwrap(); + assert_eq!(paths.len(), 1); + assert_eq!(paths[0].0, root.join(".claude/skills/dd-pup/SKILL.md")); + } + + #[test] + fn test_install_paths_extension_expands_files() { + static FILES: &[(&str, &str)] = &[("index.ts", "// js"), ("package.json", "{}")]; + let e = SkillEntry { + platform: "pi", + files: FILES, + ..entry("dd-pup-pi", "extension", "") + }; + let root = PathBuf::from("/tmp/proj"); + let paths = install_paths(&e, "claude-code", "pi", &root, None, false).unwrap(); + assert_eq!(paths.len(), 2); + assert_eq!(paths[0].0, root.join(".pi/extensions/dd-pup-pi/index.ts")); + assert_eq!(paths[0].1, "// js"); + assert_eq!( + paths[1].0, + root.join(".pi/extensions/dd-pup-pi/package.json") + ); + } + + #[test] + fn test_install_paths_extension_dir_override() { + static FILES: &[(&str, &str)] = &[("index.ts", "// js")]; + let e = SkillEntry { + platform: "pi", + files: FILES, + ..entry("dd-pup-pi", "extension", "") + }; + let paths = install_paths( + &e, + "claude-code", + "pi", + &PathBuf::from("/unused"), + Some("/tmp/out"), + false, + ) + .unwrap(); + assert_eq!(paths.len(), 1); + assert_eq!(paths[0].0, PathBuf::from("/tmp/out/dd-pup-pi/index.ts")); + } + + #[test] + fn test_install_paths_extension_unknown_platform_errors() { + static FILES: &[(&str, &str)] = &[("index.ts", "// js")]; + let e = SkillEntry { + platform: "bogus", + files: FILES, + ..entry("dd-pup-bogus", "extension", "") + }; + let err = install_paths( + &e, + "claude-code", + "bogus", + &PathBuf::from("/tmp/proj"), + None, + false, + ) + .unwrap_err(); + assert!(err.to_string().contains("bogus")); + } + + #[test] + fn test_dd_pup_pi_entry_registered() { + let e = SKILLS + .iter() + .find(|e| e.name == "dd-pup-pi") + .expect("dd-pup-pi must be registered"); + assert_eq!(e.entry_type, "extension"); + assert_eq!(e.platform, "pi"); + let names: Vec<&str> = e.files.iter().map(|(p, _)| *p).collect(); + assert!(names.contains(&"index.ts")); + assert!(names.contains(&"package.json")); + assert!(names.contains(&"README.md")); + } }