Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,8 @@ The interactive wizard will ask you to:
1. **Choose your personal agent** — `openclaw`, `copaw`, `ironclaw`, `picoclaw`, `zeroclaw`, `nanoclaw`, `nemoclaw`, or `none` (MetaClaw will auto-configure it on start)
2. **Choose your auth method** — `api_key` (direct API) or `oauth_token` (CLI subprocess)
3. **Choose your LLM provider**:
- **api_key**: Kimi, Qwen, OpenAI, Volcano Engine, or custom → enter API base + API key
- **api_key**: Kimi, Qwen, OpenAI, Minimax, Novita, OpenRouter, Volcengine, BytePlus, or custom
- **Volcengine / BytePlus**: choose `standard` or `coding-plan`, then enter the matching API key
- **oauth_token**: Anthropic (Claude Code), OpenAI Codex, or Gemini CLI → paste OAuth token
4. **Enter your model ID** and optionally enable RL training

Expand Down Expand Up @@ -290,7 +291,8 @@ claw_type: openclaw # "openclaw" | "copaw" | "ironclaw" | "picoclaw" | "z

llm:
auth_method: api_key # "api_key" | "oauth_token"
provider: kimi # kimi | qwen | openai | minimax | novita | openrouter | volcengine | custom
provider: kimi # kimi | qwen | openai | minimax | novita | openrouter | volcengine | byteplus | custom
plan_variant: "" # volcengine/byteplus only: "standard" | "coding-plan"
model_id: moonshotai/Kimi-K2.5
api_base: https://api.moonshot.cn/v1
api_key: sk-...
Expand Down
11 changes: 8 additions & 3 deletions assets/README_ZH.md
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,12 @@ metaclaw setup

交互式向导会引导你完成:
1. **选择个人 Agent** — `openclaw`、`copaw`、`ironclaw`、`picoclaw`、`zeroclaw`、`nanoclaw`、`nemoclaw` 或 `none`(`metaclaw start` 时会自动配置)
2. **选择 LLM 提供商** — Kimi、Qwen、OpenAI、Volcano Engine、自定义等
3. **填写 API Key**,并可选择是否启用 RL 训练
2. **选择认证方式** — `api_key`(直连 API)或 `oauth_token`(CLI 子进程)
3. **选择 LLM 提供商**
- **api_key**:Kimi、Qwen、OpenAI、Minimax、Novita、OpenRouter、Volcengine、BytePlus 或自定义
- **Volcengine / BytePlus**:再选择 `standard` 或 `coding-plan` 变体,并填写对应 API Key
- **oauth_token**:Anthropic(Claude Code)、OpenAI Codex 或 Gemini CLI
4. **填写模型 ID**,并可选择是否启用 RL 训练

MetaClaw 的 RL 路径可以显式切换 `tinker`、`mint` 和 `weaver`。推荐默认值是 `auto`;当环境里安装了 MinT 或 Weaver 兼容包时,它仍然可以根据对应风格的凭证或 base URL 自动识别。

Expand Down Expand Up @@ -222,7 +226,8 @@ claw_type: openclaw # "openclaw" | "copaw" | "ironclaw" | "picoclaw" | "z

llm:
auth_method: api_key # "api_key" | "oauth_token"
provider: kimi # kimi | qwen | openai | minimax | novita | openrouter | volcengine | custom
provider: kimi # kimi | qwen | openai | minimax | novita | openrouter | volcengine | byteplus | custom
plan_variant: "" # 仅 volcengine/byteplus 使用:"standard" | "coding-plan"
model_id: moonshotai/Kimi-K2.5
api_base: https://api.moonshot.cn/v1
api_key: sk-...
Expand Down
3 changes: 2 additions & 1 deletion metaclaw/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,8 @@ class MetaClawConfig:
# ------------------------------------------------------------------ #
# LLM for skills_only forwarding #
# ------------------------------------------------------------------ #
# provider: "custom" (OpenAI-compat API) | "anthropic" | "openai-codex" | "gemini"
# provider: API-key providers like "openai", "volcengine", "byteplus", "custom",
# or CLI-backed providers like "anthropic", "openai-codex", "gemini"
llm_provider: str = "custom"
# auth_method: "api_key" (direct API call) | "oauth_token" (CLI subprocess)
llm_auth_method: str = "api_key"
Expand Down
3 changes: 3 additions & 0 deletions metaclaw/config_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
"mode": "auto",
"llm": {
"provider": "custom",
"plan_variant": "",
"auth_method": "api_key", # "api_key" | "oauth_token"
"model_id": "",
"api_base": "",
Expand Down Expand Up @@ -341,6 +342,8 @@ def describe(self) -> str:
f"skills.evolution_every_n_turns: {skills.get('evolution_every_n_turns', 10)}",
f"rl.enabled: {rl.get('enabled', False)}",
]
if llm.get("provider") in ("volcengine", "byteplus"):
lines.insert(2, f"llm.plan_variant: {llm.get('plan_variant', 'standard')}")
if rl.get("enabled"):
lines += [
f"rl.model: {rl.get('model', '?')}",
Expand Down
135 changes: 107 additions & 28 deletions metaclaw/setup_wizard.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from pathlib import Path

from .config_store import CONFIG_DIR, ConfigStore
from .config_store import CONFIG_DIR, ConfigStore, _deep_merge

_API_KEY_PROVIDERS = {
"kimi": {
Expand Down Expand Up @@ -34,15 +34,40 @@
"model_id": "",
},
"volcengine": {
"api_base": "https://ark.cn-beijing.volces.com/api/v3",
"model_id": "doubao-seed-2-0-pro-260215",
"auth_label": "Volcengine API Key",
"variants": {
"standard": {
"api_base": "https://ark.cn-beijing.volces.com/api/v3",
"model_id": "doubao-seed-2-0-pro-260215",
},
"coding-plan": {
"api_base": "https://ark.cn-beijing.volces.com/api/coding/v3",
"model_id": "doubao-seed-2.0-code",
},
},
},
"byteplus": {
"auth_label": "BytePlus API Key",
"variants": {
"standard": {
"api_base": "https://ark.ap-southeast.bytepluses.com/api/v3",
"model_id": "seed-2-0-pro-260328",
},
"coding-plan": {
"api_base": "https://ark.ap-southeast.bytepluses.com/api/coding/v3",
"model_id": "dola-seed-2.0-pro",
},
},
},
"custom": {
"api_base": "",
"model_id": "",
},
}

_PLAN_VARIANT_PROVIDERS = {"volcengine", "byteplus"}
_PLAN_VARIANTS = ["standard", "coding-plan"]

_OAUTH_TOKEN_PROVIDERS = {
"anthropic": {
"model_id": "claude-sonnet-4-6",
Expand All @@ -65,6 +90,35 @@
}


def _resolve_api_key_preset(provider: str, plan_variant: str = "") -> dict[str, str]:
preset = _API_KEY_PROVIDERS[provider]
variants = preset.get("variants")
if not isinstance(variants, dict):
return preset

variant = plan_variant if plan_variant in variants else "standard"
resolved = {k: v for k, v in preset.items() if k != "variants"}
resolved.update(variants[variant])
return resolved


def _infer_plan_variant(provider: str, current_llm: dict) -> str:
if provider not in _PLAN_VARIANT_PROVIDERS:
return ""

configured = current_llm.get("plan_variant", "")
if configured in _PLAN_VARIANTS:
return configured

api_base = current_llm.get("api_base", "")
for variant in _PLAN_VARIANTS:
preset = _resolve_api_key_preset(provider, variant)
if api_base == preset.get("api_base", ""):
return variant

return "standard"


def _prompt(msg: str, default: str = "", hide: bool = False) -> str:
import getpass
if default:
Expand Down Expand Up @@ -144,6 +198,7 @@ def run(self):
)

provider = ""
plan_variant = ""
api_base = ""
api_key = ""
model_id = ""
Expand Down Expand Up @@ -210,11 +265,24 @@ def run(self):
current_provider = "custom"

provider = _prompt_choice(
"Provider",
"LLM provider",
list(_API_KEY_PROVIDERS.keys()),
default=current_provider,
)
preset = _API_KEY_PROVIDERS[provider]
if provider in _PLAN_VARIANT_PROVIDERS:
default_plan_variant = _infer_plan_variant(provider, current_llm)
plan_variant = _prompt_choice(
"Plan variant",
_PLAN_VARIANTS,
default=default_plan_variant,
)

preset = _resolve_api_key_preset(provider, plan_variant)
current_plan_variant = _infer_plan_variant(current_provider, current_llm)
same_variant = (
provider not in _PLAN_VARIANT_PROVIDERS
or current_plan_variant == plan_variant
)

if provider == "custom":
api_base = _prompt(
Expand All @@ -227,10 +295,14 @@ def run(self):

model_id = _prompt(
"Model ID",
default=current_llm.get("model_id") or preset["model_id"],
default=(
current_llm.get("model_id")
if current_provider == provider and same_variant and current_llm.get("model_id")
else preset["model_id"]
),
)
api_key = _prompt(
"API key",
preset.get("auth_label", "API key"),
default=current_llm.get("api_key", ""),
hide=True,
)
Expand Down Expand Up @@ -413,28 +485,35 @@ def run(self):
scheduler_config = {"enabled": False, "calendar": {"enabled": False}}

# ---- Write config ----
data = {
"mode": mode,
"llm": {
"provider": provider,
"auth_method": auth_method,
"model_id": model_id,
"api_base": api_base,
"api_key": api_key,
data = _deep_merge(
existing,
{
"mode": mode,
"llm": {
"provider": provider,
"plan_variant": plan_variant,
"auth_method": auth_method,
"model_id": model_id,
"api_base": api_base,
"api_key": api_key,
},
"proxy": {
"port": proxy_port,
"host": current_proxy.get("host", "0.0.0.0"),
},
"skills": {
"enabled": skills_enabled,
"dir": skills_dir,
"retrieval_mode": current_skills.get("retrieval_mode", "template"),
"top_k": current_skills.get("top_k", 6),
"task_specific_top_k": current_skills.get("task_specific_top_k", 10),
"auto_evolve": auto_evolve,
"evolution_every_n_turns": current_skills.get("evolution_every_n_turns", 10),
},
"rl": rl_config,
"scheduler": scheduler_config,
},
"proxy": {"port": proxy_port, "host": "0.0.0.0"},
"skills": {
"enabled": skills_enabled,
"dir": skills_dir,
"retrieval_mode": current_skills.get("retrieval_mode", "template"),
"top_k": current_skills.get("top_k", 6),
"task_specific_top_k": current_skills.get("task_specific_top_k", 10),
"auto_evolve": auto_evolve,
"evolution_every_n_turns": current_skills.get("evolution_every_n_turns", 10),
},
"rl": rl_config,
"scheduler": scheduler_config,
}
)

cs.save(data)
Path(skills_dir).expanduser().mkdir(parents=True, exist_ok=True)
Expand Down
56 changes: 56 additions & 0 deletions tests/test_setup_wizard.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ def test_setup_wizard_preserves_existing_proxy_settings(monkeypatch, tmp_path: P
def fake_prompt_choice(msg, choices, default=""):
if msg == "Operating mode":
return "skills_only"
if msg == "Auth method":
return "api_key"
if msg == "LLM provider":
return "custom"
raise AssertionError(f"Unexpected choice prompt: {msg}")
Expand Down Expand Up @@ -79,3 +81,57 @@ def fake_prompt_int(msg, default=0):
assert saved["proxy"]["host"] == "127.0.0.1"
assert saved["proxy"]["api_key"] == "proxy-key"
assert saved["proxy"]["trusted_local"] is True


def test_setup_wizard_supports_byteplus_coding_plan(monkeypatch, tmp_path: Path):
config_path = tmp_path / "config.yaml"
skills_dir = tmp_path / "skills"
store = ConfigStore(config_file=config_path)

monkeypatch.setattr("metaclaw.setup_wizard.ConfigStore", lambda: store)

def fake_prompt_choice(msg, choices, default=""):
if msg == "Operating mode":
return "skills_only"
if msg == "Auth method":
return "api_key"
if msg == "LLM provider":
return "byteplus"
if msg == "Plan variant":
return "coding-plan"
raise AssertionError(f"Unexpected choice prompt: {msg}")

def fake_prompt(msg, default="", hide=False):
if msg == "Model ID":
return default
if msg == "BytePlus API Key":
return "bp-key"
if msg == "Skills directory":
return str(skills_dir)
raise AssertionError(f"Unexpected text prompt: {msg}")

def fake_prompt_bool(msg, default=False):
if msg == "Enable skill injection":
return True
if msg == "Auto-summarize skills after each conversation":
return True
raise AssertionError(f"Unexpected bool prompt: {msg}")

def fake_prompt_int(msg, default=0):
if msg == "Proxy port":
return 30000
raise AssertionError(f"Unexpected int prompt: {msg}")

monkeypatch.setattr("metaclaw.setup_wizard._prompt_choice", fake_prompt_choice)
monkeypatch.setattr("metaclaw.setup_wizard._prompt", fake_prompt)
monkeypatch.setattr("metaclaw.setup_wizard._prompt_bool", fake_prompt_bool)
monkeypatch.setattr("metaclaw.setup_wizard._prompt_int", fake_prompt_int)

SetupWizard().run()

saved = store.load()
assert saved["llm"]["provider"] == "byteplus"
assert saved["llm"]["plan_variant"] == "coding-plan"
assert saved["llm"]["model_id"] == "dola-seed-2.0-pro"
assert saved["llm"]["api_base"] == "https://ark.ap-southeast.bytepluses.com/api/coding/v3"
assert saved["llm"]["api_key"] == "bp-key"