Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 65 additions & 5 deletions src/cli.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,28 @@ import {
type ConnectManifest,
type RemoveOptions,
} from "./cli/remove-plan.js";
import { renderSplash } from "./cli/splash.js";
import { isFirstRun, readPrefs, resetPrefs, writePrefs } from "./cli/preferences.js";
import { runOnboarding } from "./cli/onboarding.js";
import { setBootVerbose } from "./logger.js";
import { VERSION } from "./version.js";

const __dirname = dirname(fileURLToPath(import.meta.url));
const args = process.argv.slice(2);
const IS_WINDOWS = platform() === "win32";
const IS_VERBOSE = args.includes("--verbose") || args.includes("-v");
const IS_VERBOSE =
args.includes("--verbose") ||
args.includes("-v") ||
process.env["AGENTMEMORY_VERBOSE"] === "1" ||
process.env["AGENTMEMORY_VERBOSE"] === "true";

// Propagate the resolved verbosity to the worker's boot logger so the
// 25-line `[agentmemory] X registered` stream is either dropped or
// printed verbatim. Without this the worker's default (env-only) would
// disagree with the CLI flag.
setBootVerbose(IS_VERBOSE);

const IS_RESET = args.includes("--reset");

// Pinned iii-engine version. The unpinned `install.iii.dev/iii/main/install.sh`
// script tracks `latest`, which made every fresh agentmemory install pull
Expand Down Expand Up @@ -123,7 +140,8 @@ Commands:

Options:
--help, -h Show this help
--verbose, -v Show engine stderr and diagnostic info on startup
--verbose, -v Show engine stderr, boot log, and diagnostic info
--reset Wipe ~/.agentmemory/preferences.json and re-run onboarding
--tools all|core Tool visibility (default: core = 7 tools)
--no-engine Skip auto-starting iii-engine
--port <N> Override REST port (default: 3111)
Expand Down Expand Up @@ -710,23 +728,61 @@ function portInUseDiagnostic(port: number): string {
: ` lsof -i :${port} # or: ss -tlnp | grep :${port}`;
}

async function waitForAgentmemoryReady(timeoutMs: number): Promise<boolean> {
const start = Date.now();
while (Date.now() - start < timeoutMs) {
if (await isAgentmemoryReady()) return true;
await new Promise((r) => setTimeout(r, 250));
}
return false;
}

function printReadyHint(): void {
const port = getRestPort();
const viewer = getViewerUrl();
const hint = `Memory ready on :${port} · viewer on ${viewer} · try: agentmemory demo`;
// Use plain stdout (not p.outro) so the hint isn't decorated with
// clack's closing line — it reads as a status, not an end-of-flow.
process.stdout.write("\n" + hint + "\n");
}

async function main() {
p.intro("agentmemory");
// `--reset` wipes preferences before anything else so the onboarding
// flow below always runs fresh.
if (IS_RESET) {
resetPrefs();
}

const firstRun = isFirstRun();
const prefs = readPrefs();
// Show the splash on the first run, on --reset, or whenever the user
// hasn't yet opted out via the schema (we set `skipSplash: true`
// after onboarding completes). Verbose runs always splash since the
// user explicitly asked for the chatty experience.
if (firstRun || IS_RESET || IS_VERBOSE || !prefs.skipSplash) {
renderSplash(VERSION);
}

if (firstRun || IS_RESET) {
await runOnboarding();
}

if (skipEngine) {
p.log.info("Skipping engine check (--no-engine)");
if (IS_VERBOSE) p.log.info("Skipping engine check (--no-engine)");
await import("./index.js");
if (await waitForAgentmemoryReady(15000)) printReadyHint();
return;
}

if (await isEngineRunning()) {
p.log.success("iii-engine is running");
if (IS_VERBOSE) p.log.success("iii-engine is running");
const attachedBin =
whichBinary("iii") ?? fallbackIiiPaths().find((p) => existsSync(p)) ?? null;
warnIfEngineVersionMismatch(attachedBin);
adoptRunningEngine();
maybeEmitNpxHint();
await import("./index.js");
if (await waitForAgentmemoryReady(15000)) printReadyHint();
return;
}

Expand Down Expand Up @@ -796,6 +852,10 @@ async function main() {
s.stop("iii-engine is ready");
maybeEmitNpxHint();
await import("./index.js");
if (await waitForAgentmemoryReady(15000)) printReadyHint();
// Mark splash as something to skip on subsequent runs. This is a
// no-op if onboarding already flipped the flag (idempotent merge).
writePrefs({ skipSplash: true });
}

async function apiFetch<T = unknown>(base: string, path: string, timeoutMs = 5000): Promise<T | null> {
Expand Down
202 changes: 202 additions & 0 deletions src/cli/onboarding.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
// First-run interactive onboarding flow.
//
// Wakes up only when `isFirstRun()` is true (preferences are missing or
// have never recorded a `firstRunAt`) or when the user passes
// `--reset`. The flow asks for:
//
// 1. Which agents will be wired to agentmemory (multi-select). Each
// option carries a small glyph that we reuse in /status output so
// the user recognises them later. The label mirrors README row 1
// (native plugins) and row 2 (MCP-only).
// 2. Which LLM provider to use for compress / consolidate / graph.
// "skip — BM25-only mode" is a real first-class option; lots of
// users want agentmemory purely as a hybrid keyword + vector
// memory layer without granting LLM API keys.
//
// We then write `~/.agentmemory/preferences.json` and seed
// `~/.agentmemory/.env` with a commented-out `*_API_KEY=` line for the
// chosen provider. This matches the existing `agentmemory init` flow
// closely so users who skip onboarding still get the same file via
// `agentmemory init`.

import { copyFile, mkdir } from "node:fs/promises";
import { constants as fsConstants, existsSync, writeFileSync } from "node:fs";
import { homedir } from "node:os";
import { dirname, join } from "node:path";
import { fileURLToPath } from "node:url";
import * as p from "@clack/prompts";
import { writePrefs } from "./preferences.js";

const __dirname = dirname(fileURLToPath(import.meta.url));

// Native plugin row — these agents ship an agentmemory plugin or
// first-party integration. Glyphs match SkillKit's published set
// where they overlap; the rest fall back to the generic `◇`.
const NATIVE_AGENTS: { value: string; label: string; glyph: string }[] = [
{ value: "claude-code", label: "Claude Code", glyph: "⟁" },
{ value: "codex", label: "Codex", glyph: "◎" },
{ value: "openhuman", label: "OpenHuman", glyph: "◇" },
{ value: "openclaw", label: "OpenClaw", glyph: "◇" },
{ value: "hermes", label: "Hermes", glyph: "◇" },
{ value: "pi", label: "Pi", glyph: "◇" },
{ value: "cursor", label: "Cursor", glyph: "◫" },
{ value: "gemini-cli", label: "Gemini CLI", glyph: "✦" },
];

// MCP-only row — these agents use the MCP server we ship rather than
// a native plugin.
const MCP_AGENTS: { value: string; label: string; glyph: string }[] = [
{ value: "opencode", label: "OpenCode", glyph: "⬡" },
{ value: "cline", label: "Cline", glyph: "◇" },
{ value: "goose", label: "Goose", glyph: "◇" },
{ value: "kilo", label: "Kilo", glyph: "◇" },
{ value: "aider", label: "Aider", glyph: "◇" },
{ value: "claude-desktop", label: "Claude Desktop", glyph: "⟁" },
{ value: "windsurf", label: "Windsurf", glyph: "◇" },
{ value: "roo", label: "Roo", glyph: "◇" },
];

const PROVIDERS: { value: string; label: string; envKey: string | null }[] = [
{ value: "anthropic", label: "Anthropic — claude", envKey: "ANTHROPIC_API_KEY" },
{ value: "openai", label: "OpenAI — gpt", envKey: "OPENAI_API_KEY" },
{ value: "gemini", label: "Google — gemini", envKey: "GEMINI_API_KEY" },
{ value: "openrouter", label: "OpenRouter — multi-model", envKey: "OPENROUTER_API_KEY" },
{ value: "minimax", label: "MiniMax — minimax-m1", envKey: "MINIMAX_API_KEY" },
{ value: "skip", label: "Skip — BM25-only mode (no LLM key)", envKey: null },
];

function buildAgentOptions(): { value: string; label: string; hint?: string }[] {
return [
...NATIVE_AGENTS.map((a) => ({
value: a.value,
label: `${a.glyph} ${a.label}`,
hint: "native plugin",
})),
...MCP_AGENTS.map((a) => ({
value: a.value,
label: `${a.glyph} ${a.label}`,
hint: "MCP server",
})),
];
}

// Mirror src/cli.ts findEnvExample so onboarding ships the same .env
// skeleton whether called directly or via `agentmemory init`. We
// duplicate (rather than import) so the onboarding module doesn't
// pull cli.ts's top-level side effects into the test runner.
function findEnvExample(): string | null {
const candidates = [
join(__dirname, "..", "..", ".env.example"),
join(__dirname, "..", ".env.example"),
join(__dirname, ".env.example"),
join(process.cwd(), ".env.example"),
];
for (const c of candidates) {
if (existsSync(c)) return c;
}
return null;
}

async function seedEnvFile(provider: string | null): Promise<string | null> {
const target = join(homedir(), ".agentmemory", ".env");
const dir = dirname(target);
await mkdir(dir, { recursive: true });

const template = findEnvExample();
if (template && !existsSync(target)) {
try {
await copyFile(template, target, fsConstants.COPYFILE_EXCL);
} catch (err) {
if ((err as NodeJS.ErrnoException)?.code !== "EEXIST") {
return null;
}
}
} else if (!template && !existsSync(target)) {
// Fall back to a minimal skeleton so users always get a `.env` to
// edit. This matches the shape of the bundled `.env.example`
// without forcing us to keep two copies in sync.
const lines = [
"# agentmemory environment — uncomment what you need",
"# AGENTMEMORY_URL=http://localhost:3111",
"",
];
const envKey = PROVIDERS.find((x) => x.value === provider)?.envKey;
if (envKey) {
lines.push(`# ${envKey}=`);
}
writeFileSync(target, lines.join("\n"), { mode: 0o600 });
}

return target;
}

export interface OnboardingResult {
agents: string[];
provider: string | null;
}

export async function runOnboarding(): Promise<OnboardingResult> {
p.note(
[
"Welcome to agentmemory.",
"",
"Persistent memory for your AI coding agents. We'll pick which",
"agents to wire up and which provider (if any) handles compression",
"and consolidation. Either step can be changed later in ~/.agentmemory/.env.",
].join("\n"),
"first-run setup",
);

const agentsPicked = await p.multiselect<string>({
message: "Which agents will use agentmemory? (space to toggle, enter to confirm)",
options: buildAgentOptions(),
required: false,
initialValues: ["claude-code"],
});
if (p.isCancel(agentsPicked)) {
p.cancel("Setup cancelled. Re-run any time with: agentmemory --reset");
process.exit(0);
}

const providerPicked = await p.select<string>({
message: "Which LLM provider should agentmemory use for compress/consolidate?",
options: PROVIDERS.map(({ value, label }) => ({ value, label })),
initialValue: "anthropic",
});
if (p.isCancel(providerPicked)) {
p.cancel("Setup cancelled. Re-run any time with: agentmemory --reset");
process.exit(0);
}

const provider = providerPicked === "skip" ? null : providerPicked;
const agents = (agentsPicked as string[]) ?? [];

const envPath = await seedEnvFile(provider);

writePrefs({
lastAgent: agents[0] ?? null,
lastAgents: agents,
lastProvider: provider,
skipSplash: true,
firstRunAt: new Date().toISOString(),
});

const prefsLocation = join(homedir(), ".agentmemory", "preferences.json");
const lines = [`✓ Saved preferences to ${prefsLocation}`];
if (envPath) {
lines.push(`✓ Wrote ${envPath} (edit to add your API key)`);
} else {
lines.push(`! Could not write ~/.agentmemory/.env — run \`agentmemory init\` after this completes.`);
}
if (provider) {
const envKey = PROVIDERS.find((x) => x.value === provider)?.envKey;
if (envKey) {
lines.push(` Uncomment ${envKey}= in that file to enable ${provider}.`);
}
} else {
lines.push(" No provider chosen — agentmemory will run in BM25-only mode.");
}
p.note(lines.join("\n"), "ready");

return { agents, provider };
}
Loading