diff --git a/ROADMAP.md b/ROADMAP.md
index 20e472b52a..c41bece9ae 100644
--- a/ROADMAP.md
+++ b/ROADMAP.md
@@ -13,6 +13,10 @@ Wave Terminal's AI assistant is already powerful and continues to evolve. Here's
### AI Provider Support
- ✅ OpenAI (including gpt-5 and gpt-5-mini models)
+- ✅ Google Gemini (v0.13)
+- ✅ OpenRouter and custom OpenAI-compatible endpoints (v0.13)
+- ✅ Azure OpenAI (modern and legacy APIs) (v0.13)
+- ✅ Local AI models via Ollama, LM Studio, vLLM, and other OpenAI-compatible servers (v0.13)
### Context & Input
@@ -32,33 +36,28 @@ Wave Terminal's AI assistant is already powerful and continues to evolve. Here's
### AI Configuration & Flexibility
-- 🔷 BYOK (Bring Your Own Key) - Use your own API keys for any supported provider
+- ✅ BYOK (Bring Your Own Key) - Use your own API keys for any supported provider (v0.13)
+- ✅ Local AI agents - Run AI models locally on your machine (v0.13)
- 🔧 Enhanced provider configuration options
- 🔷 Context (add markdown files to give persistent system context)
### Expanded Provider Support
-Top priorities are Claude (for better coding support), and the OpenAI Completions API which will allow us to interface with
-many more local/open models.
-
- 🔷 Anthropic Claude - Full integration with extended thinking and tool use
-- 🔷 OpenAI Completions API - Support for older model formats
-- 🤞 Google Gemini - Complete integration
-- 🤞 Local AI agents - Run AI models locally on your machine
### Advanced AI Tools
#### File Operations
-- 🔧 AI file writing with intelligent diff previews
-- 🔧 Rollback support for AI-made changes
+- ✅ AI file writing with intelligent diff previews
+- ✅ Rollback support for AI-made changes
- 🔷 Multi-file editing workflows
- 🔷 Safe file modification patterns
#### Terminal Command Execution
- 🔧 Execute commands directly from AI
-- 🔧 Intelligent terminal state detection
+- ✅ Intelligent terminal state detection
- 🔧 Command result capture and parsing
### Remote & Advanced Capabilities
diff --git a/cmd/server/main-server.go b/cmd/server/main-server.go
index 7971511159..748acaefbd 100644
--- a/cmd/server/main-server.go
+++ b/cmd/server/main-server.go
@@ -22,6 +22,7 @@ import (
"github.com/wavetermdev/waveterm/pkg/panichandler"
"github.com/wavetermdev/waveterm/pkg/remote/conncontroller"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs"
+ "github.com/wavetermdev/waveterm/pkg/secretstore"
"github.com/wavetermdev/waveterm/pkg/service"
"github.com/wavetermdev/waveterm/pkg/telemetry"
"github.com/wavetermdev/waveterm/pkg/telemetry/telemetrydata"
@@ -224,18 +225,25 @@ func updateTelemetryCounts(lastCounts telemetrydata.TEventProps) telemetrydata.T
customWidgets := fullConfig.CountCustomWidgets()
customAIPresets := fullConfig.CountCustomAIPresets()
customSettings := wconfig.CountCustomSettings()
+ customAIModes := fullConfig.CountCustomAIModes()
props.UserSet = &telemetrydata.TEventUserProps{
SettingsCustomWidgets: customWidgets,
SettingsCustomAIPresets: customAIPresets,
SettingsCustomSettings: customSettings,
+ SettingsCustomAIModes: customAIModes,
+ }
+
+ secretsCount, err := secretstore.CountSecrets()
+ if err == nil {
+ props.UserSet.SettingsSecretsCount = secretsCount
}
if utilfn.CompareAsMarshaledJson(props, lastCounts) {
return lastCounts
}
tevent := telemetrydata.MakeTEvent("app:counts", props)
- err := telemetry.RecordTEvent(ctx, tevent)
+ err = telemetry.RecordTEvent(ctx, tevent)
if err != nil {
log.Printf("error recording counts tevent: %v\n", err)
}
diff --git a/docs/docs/releasenotes.mdx b/docs/docs/releasenotes.mdx
index f14b1252c7..b540906e06 100644
--- a/docs/docs/releasenotes.mdx
+++ b/docs/docs/releasenotes.mdx
@@ -6,6 +6,33 @@ sidebar_position: 200
# Release Notes
+### v0.13.0 — Dec 8, 2025
+
+**Wave v0.13 Brings Local AI Support, BYOK, and Unified Configuration**
+
+Wave v0.13 is a major release that opens up Wave AI to local models, third-party providers, and bring-your-own-key (BYOK) configurations. This release also includes a completely redesigned configuration system and several terminal improvements.
+
+**Local AI & BYOK Support:**
+- **OpenAI-Compatible API** - Wave now supports any provider or local server using the `/v1/chat/completions` endpoint, enabling use of Ollama, LM Studio, vLLM, OpenRouter, and countless other local and hosted models
+- **Google Gemini Integration** - Native support for Google's Gemini models with a dedicated API adapter
+- **Provider Presets** - Simplified configuration with built-in presets for OpenAI, OpenRouter, Google, Azure, and custom endpoints
+- **Multiple AI Modes** - Easily switch between different models and providers with a unified interface
+- See the new [Wave AI Modes documentation](https://docs.waveterm.dev/waveai-modes) for configuration examples and setup guides
+
+**Unified Configuration Widget:**
+- **New Config Interface** - Replaced the basic JSON editor with a dedicated configuration widget accessible from the sidebar
+- **Better Organization** - Browse and edit different configuration types (general settings, AI modes, secrets) with improved validation and error handling
+- **Integrated Secrets Management** - Access Wave's secret store directly from the config widget for secure credential management
+
+**Terminal Improvements:**
+- **Bracketed Paste Mode** - Now enabled by default to improve multi-line paste behavior and compatibility with tools like Claude Code
+- **Windows Paste Fix** - Ctrl+V now works as a standard paste accelerator on Windows
+- **SSH Password Management** - Store SSH connection passwords in Wave's secret store to avoid re-typing credentials
+
+**Other Changes:**
+- Package updates and dependency upgrades
+- Various bug fixes and stability improvements
+
### v0.12.5 — Nov 24, 2025
Quick patch release to fix paste behavior on Linux (prevent raw HTML from getting pasted to the terminal).
diff --git a/docs/docs/secrets.mdx b/docs/docs/secrets.mdx
index ab6f7902bc..e01612c5b8 100644
--- a/docs/docs/secrets.mdx
+++ b/docs/docs/secrets.mdx
@@ -8,7 +8,7 @@ import { VersionBadge } from "@site/src/components/versionbadge";
# Secrets
-
+
Wave Terminal provides a secure way to store sensitive information like passwords, API keys, and tokens. Secrets are stored encrypted in your system's native keychain (macOS Keychain, Windows Credential Manager, or Linux Secret Service), ensuring your sensitive data remains protected.
diff --git a/docs/docs/waveai-modes.mdx b/docs/docs/waveai-modes.mdx
index d4f0c30a69..d8b94ee460 100644
--- a/docs/docs/waveai-modes.mdx
+++ b/docs/docs/waveai-modes.mdx
@@ -4,6 +4,10 @@ id: "waveai-modes"
title: "Wave AI (Local Models + BYOK)"
---
+import { VersionBadge } from "@site/src/components/versionbadge";
+
+
+
Wave AI supports custom AI modes that allow you to use local models, custom API endpoints, and alternative AI providers. This gives you complete control over which models and providers you use with Wave's AI features.
## Configuration Overview
@@ -15,7 +19,7 @@ AI modes are configured in `~/.config/waveterm/waveai.json`.
2. Select "Settings" from the menu
3. Choose "Wave AI Modes" from the settings sidebar
-**Or edit from the command line:**
+**Or launch from the command line:**
```bash
wsh editconfig waveai.json
```
@@ -43,83 +47,51 @@ Wave AI supports the following API types:
- **`openai-responses`**: Uses the `/v1/responses` endpoint (modern API for GPT-5+ models)
- **`google-gemini`**: Google's Gemini API format (automatically set when using `ai:provider: "google"`, not typically used directly)
-## Configuration Structure
+## Global Wave AI Settings
-### Minimal Configuration (with Provider)
+You can configure global Wave AI behavior in your Wave Terminal settings (separate from the mode configurations in `waveai.json`).
-```json
-{
- "mode-key": {
- "display:name": "Qwen (OpenRouter)",
- "ai:provider": "openrouter",
- "ai:model": "qwen/qwen-2.5-coder-32b-instruct"
- }
-}
-```
+### Setting a Default AI Mode
-### Full Configuration (all fields)
+After configuring a local model or custom mode, you can make it the default by setting `waveai:defaultmode` in your Wave Terminal settings.
-```json
-{
- "mode-key": {
- "display:name": "Display Name",
- "display:order": 1,
- "display:icon": "icon-name",
- "display:description": "Full description",
- "ai:provider": "custom",
- "ai:apitype": "openai-chat",
- "ai:model": "model-name",
- "ai:thinkinglevel": "medium",
- "ai:endpoint": "http://localhost:11434/v1/chat/completions",
- "ai:azureapiversion": "v1",
- "ai:apitoken": "your-token",
- "ai:apitokensecretname": "PROVIDER_KEY",
- "ai:azureresourcename": "your-resource",
- "ai:azuredeployment": "your-deployment",
- "ai:capabilities": ["tools", "images", "pdfs"]
- }
-}
-```
+:::important
+Use the **mode key** (the key in your `waveai.json` configuration), not the display name. For example, use `"ollama-llama"` (the key), not `"Ollama - Llama 3.3"` (the display name).
+:::
-### Field Reference
+**Using the settings command:**
+```bash
+wsh setconfig waveai:defaultmode="ollama-llama"
+```
-| Field | Required | Description |
-|-------|----------|-------------|
-| `display:name` | Yes | Name shown in the AI mode selector |
-| `display:order` | No | Sort order in the selector (lower numbers first) |
-| `display:icon` | No | Icon identifier for the mode |
-| `display:description` | No | Full description of the mode |
-| `ai:provider` | No | Provider preset: `openai`, `openrouter`, `google`, `azure`, `azure-legacy`, `custom` |
-| `ai:apitype` | No | API type: `openai-chat`, `openai-responses`, or `google-gemini` (defaults to `openai-chat` if not specified) |
-| `ai:model` | No | Model identifier (required for most providers) |
-| `ai:thinkinglevel` | No | Thinking level: `low`, `medium`, or `high` |
-| `ai:endpoint` | No | *Full* API endpoint URL (auto-set by provider when available) |
-| `ai:azureapiversion` | No | Azure API version (for `azure-legacy` provider, defaults to `2025-04-01-preview`) |
-| `ai:apitoken` | No | API key/token (not recommended - use secrets instead) |
-| `ai:apitokensecretname` | No | Name of secret containing API token (auto-set by provider) |
-| `ai:azureresourcename` | No | Azure resource name (for Azure providers) |
-| `ai:azuredeployment` | No | Azure deployment name (for `azure-legacy` provider) |
-| `ai:capabilities` | No | Array of supported capabilities: `"tools"`, `"images"`, `"pdfs"` |
-| `waveai:cloud` | No | Internal - for Wave Cloud AI configuration only |
-| `waveai:premium` | No | Internal - for Wave Cloud AI configuration only |
+**Or edit settings.json directly:**
+1. Click the settings (gear) icon in the widget bar
+2. Select "Settings" from the menu
+3. Add the `waveai:defaultmode` key to your settings.json:
+```json
+ "waveai:defaultmode": "ollama-llama"
+```
-### AI Capabilities
+This will make the specified mode the default selection when opening Wave AI features.
-The `ai:capabilities` field specifies what features the AI mode supports:
+### Hiding Wave Cloud Modes
-- **`tools`** - Enables AI tool usage for file reading/writing, shell integration, and widget interaction
-- **`images`** - Allows image attachments in chat (model can view uploaded images)
-- **`pdfs`** - Allows PDF file attachments in chat (model can read PDF content)
+If you prefer to use only your local or custom models and want to hide Wave's cloud AI modes from the mode dropdown, set `waveai:showcloudmodes` to `false`:
-**Provider-specific behavior:**
-- **OpenAI and Google providers**: Capabilities are automatically configured based on the model. You don't need to specify them.
-- **OpenRouter, Azure, Azure-Legacy, and Custom providers**: You must manually specify capabilities based on your model's features.
+**Using the settings command:**
+```bash
+wsh setconfig waveai:showcloudmodes=false
+```
-:::warning
-If you don't include `"tools"` in the `ai:capabilities` array, the AI model will not be able to interact with your Wave terminal widgets, read/write files, or execute commands. Most AI modes should include `"tools"` for the best Wave experience.
-:::
+**Or edit settings.json directly:**
+1. Click the settings (gear) icon in the widget bar
+2. Select "Settings" from the menu
+3. Add the `waveai:showcloudmodes` key to your settings.json:
+```json
+ "waveai:showcloudmodes": false
+```
-Most models support `tools` and can benefit from it. Vision-capable models should include `images`. Not all models support PDFs, so only include `pdfs` if your model can process them.
+This will hide Wave's built-in cloud AI modes, showing only your custom configured modes.
## Local Model Examples
@@ -132,7 +104,7 @@ Most models support `tools` and can benefit from it. Vision-capable models shoul
"ollama-llama": {
"display:name": "Ollama - Llama 3.3",
"display:order": 1,
- "display:icon": "llama",
+ "display:icon": "microchip",
"display:description": "Local Llama 3.3 70B model via Ollama",
"ai:apitype": "openai-chat",
"ai:model": "llama3.3:70b",
@@ -420,3 +392,81 @@ If you get "model not found" errors:
- Use `openai-chat` for Ollama, LM Studio, custom endpoints, and most cloud providers
- Use `openai-responses` for newer OpenAI models (GPT-5+) or when your provider specifically requires it
- Provider presets automatically set the correct API type when needed
+
+## Configuration Reference
+
+### Minimal Configuration (with Provider)
+
+```json
+{
+ "mode-key": {
+ "display:name": "Qwen (OpenRouter)",
+ "ai:provider": "openrouter",
+ "ai:model": "qwen/qwen-2.5-coder-32b-instruct"
+ }
+}
+```
+
+### Full Configuration (all fields)
+
+```json
+{
+ "mode-key": {
+ "display:name": "Display Name",
+ "display:order": 1,
+ "display:icon": "icon-name",
+ "display:description": "Full description",
+ "ai:provider": "custom",
+ "ai:apitype": "openai-chat",
+ "ai:model": "model-name",
+ "ai:thinkinglevel": "medium",
+ "ai:endpoint": "http://localhost:11434/v1/chat/completions",
+ "ai:azureapiversion": "v1",
+ "ai:apitoken": "your-token",
+ "ai:apitokensecretname": "PROVIDER_KEY",
+ "ai:azureresourcename": "your-resource",
+ "ai:azuredeployment": "your-deployment",
+ "ai:capabilities": ["tools", "images", "pdfs"]
+ }
+}
+```
+
+### Field Reference
+
+| Field | Required | Description |
+|-------|----------|-------------|
+| `display:name` | Yes | Name shown in the AI mode selector |
+| `display:order` | No | Sort order in the selector (lower numbers first) |
+| `display:icon` | No | Icon identifier for the mode (can use any [FontAwesome icon](https://fontawesome.com/search), use the name without the "fa-" prefix). Default is "sparkles" |
+| `display:description` | No | Full description of the mode |
+| `ai:provider` | No | Provider preset: `openai`, `openrouter`, `google`, `azure`, `azure-legacy`, `custom` |
+| `ai:apitype` | No | API type: `openai-chat`, `openai-responses`, or `google-gemini` (defaults to `openai-chat` if not specified) |
+| `ai:model` | No | Model identifier (required for most providers) |
+| `ai:thinkinglevel` | No | Thinking level: `low`, `medium`, or `high` |
+| `ai:endpoint` | No | *Full* API endpoint URL (auto-set by provider when available) |
+| `ai:azureapiversion` | No | Azure API version (for `azure-legacy` provider, defaults to `2025-04-01-preview`) |
+| `ai:apitoken` | No | API key/token (not recommended - use secrets instead) |
+| `ai:apitokensecretname` | No | Name of secret containing API token (auto-set by provider) |
+| `ai:azureresourcename` | No | Azure resource name (for Azure providers) |
+| `ai:azuredeployment` | No | Azure deployment name (for `azure-legacy` provider) |
+| `ai:capabilities` | No | Array of supported capabilities: `"tools"`, `"images"`, `"pdfs"` |
+| `waveai:cloud` | No | Internal - for Wave Cloud AI configuration only |
+| `waveai:premium` | No | Internal - for Wave Cloud AI configuration only |
+
+### AI Capabilities
+
+The `ai:capabilities` field specifies what features the AI mode supports:
+
+- **`tools`** - Enables AI tool usage for file reading/writing, shell integration, and widget interaction
+- **`images`** - Allows image attachments in chat (model can view uploaded images)
+- **`pdfs`** - Allows PDF file attachments in chat (model can read PDF content)
+
+**Provider-specific behavior:**
+- **OpenAI and Google providers**: Capabilities are automatically configured based on the model. You don't need to specify them.
+- **OpenRouter, Azure, Azure-Legacy, and Custom providers**: You must manually specify capabilities based on your model's features.
+
+:::warning
+If you don't include `"tools"` in the `ai:capabilities` array, the AI model will not be able to interact with your Wave terminal widgets, read/write files, or execute commands. Most AI modes should include `"tools"` for the best Wave experience.
+:::
+
+Most models support `tools` and can benefit from it. Vision-capable models should include `images`. Not all models support PDFs, so only include `pdfs` if your model can process them.
diff --git a/docs/docs/waveai.mdx b/docs/docs/waveai.mdx
index 1d027177ef..ab9259d5a0 100644
--- a/docs/docs/waveai.mdx
+++ b/docs/docs/waveai.mdx
@@ -74,15 +74,32 @@ Supports text files, images, PDFs, and directories. Use `-n` for new chat, `-s`
File system operations require explicit approval. You control all file access.
:::
+## Local Models & BYOK
+
+Wave AI supports using your own AI models and API keys:
+
+- **Local Models**: Run AI models locally with [Ollama](https://ollama.ai), [LM Studio](https://lmstudio.ai), [vLLM](https://docs.vllm.ai), and other OpenAI-compatible servers
+- **BYOK (Bring Your Own Key)**: Use your own API keys with OpenAI, OpenRouter, Google AI (Gemini), Azure OpenAI, and other cloud providers
+- **Multiple Modes**: Configure and switch between multiple AI providers and models
+- **Privacy**: Keep your data local or use your preferred cloud provider
+
+See the [**Local Models & BYOK guide**](./waveai-modes.mdx) for complete configuration instructions, examples, and troubleshooting.
+
## Privacy
+**Default Wave AI Service:**
- Messages are proxied through the Wave Cloud AI service (powered by OpenAI's APIs). Please refer to OpenAI's privacy policy for details on how they handle your data.
- Wave does not store your chats, attachments, or use them for training
- Usage counters included in anonymous telemetry
- File access requires explicit approval
+**Local Models & BYOK:**
+- When using local models, your chat data never leaves your machine
+- When using BYOK with cloud providers, requests are sent directly to your chosen provider
+- Refer to your provider's privacy policy for details on how they handle your data
+
:::info Under Active Development
-Wave AI is in active beta with included AI credits while we refine the experience. BYOK will be available once we've stabilized core features and gathered feedback on what works best. Share feedback in our [Discord](https://discord.gg/XfvZ334gwU).
+Wave AI is in active beta with included AI credits while we refine the experience. Share feedback in our [Discord](https://discord.gg/XfvZ334gwU).
**Coming Soon:**
- **Remote File Access**: Read files on SSH-connected systems
diff --git a/docs/src/components/versionbadge.css b/docs/src/components/versionbadge.css
index 4883d04aa0..63ac0b3771 100644
--- a/docs/src/components/versionbadge.css
+++ b/docs/src/components/versionbadge.css
@@ -12,7 +12,11 @@
white-space: nowrap;
}
+.version-badge.no-left-margin {
+ margin-left: 0;
+}
+
[data-theme="dark"] .version-badge {
background-color: var(--ifm-color-primary-dark);
color: var(--ifm-background-color);
-}
\ No newline at end of file
+}
diff --git a/docs/src/components/versionbadge.tsx b/docs/src/components/versionbadge.tsx
index 36903ce8bd..58c616440c 100644
--- a/docs/src/components/versionbadge.tsx
+++ b/docs/src/components/versionbadge.tsx
@@ -2,8 +2,9 @@ import "./versionbadge.css";
interface VersionBadgeProps {
version: string;
+ noLeftMargin?: boolean;
}
-export function VersionBadge({ version }: VersionBadgeProps) {
- return {version};
+export function VersionBadge({ version, noLeftMargin }: VersionBadgeProps) {
+ return {version};
}
\ No newline at end of file
diff --git a/frontend/app/aipanel/aimode.tsx b/frontend/app/aipanel/aimode.tsx
index 1878af2d13..d8aa67ccac 100644
--- a/frontend/app/aipanel/aimode.tsx
+++ b/frontend/app/aipanel/aimode.tsx
@@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
import { atoms, getSettingsKeyAtom } from "@/app/store/global";
+import { RpcApi } from "@/app/store/wshclientapi";
+import { TabRpcClient } from "@/app/store/wshrpcutil";
import { cn, fireAndForget, makeIconClass } from "@/util/util";
import { useAtomValue } from "jotai";
import { memo, useRef, useState } from "react";
@@ -175,6 +177,16 @@ export const AIModeDropdown = memo(({ compatibilityMode = false }: AIModeDropdow
const handleConfigureClick = () => {
fireAndForget(async () => {
+ RpcApi.RecordTEventCommand(
+ TabRpcClient,
+ {
+ event: "action:other",
+ props: {
+ "action:type": "waveai:configuremodes:contextmenu",
+ },
+ },
+ { noresponse: true }
+ );
await model.openWaveAIConfig();
setIsOpen(false);
});
diff --git a/frontend/app/aipanel/aipanel.tsx b/frontend/app/aipanel/aipanel.tsx
index 062fc2f559..cd4d8a745a 100644
--- a/frontend/app/aipanel/aipanel.tsx
+++ b/frontend/app/aipanel/aipanel.tsx
@@ -21,6 +21,7 @@ import { AIPanelHeader } from "./aipanelheader";
import { AIPanelInput } from "./aipanelinput";
import { AIPanelMessages } from "./aipanelmessages";
import { AIRateLimitStrip } from "./airatelimitstrip";
+import { BYOKAnnouncement } from "./byokannouncement";
import { TelemetryRequiredMessage } from "./telemetryrequired";
import { WaveAIModel } from "./waveai-model";
@@ -85,7 +86,7 @@ const AIWelcomeMessage = memo(() => {
return (
+ Wave AI now supports bring-your-own-key (BYOK) with OpenAI, Google Gemini, Azure, and
+ OpenRouter, plus local models via Ollama, LM Studio, and other OpenAI-compatible providers.
+
- Wave AI is your new terminal assistant with full context. It can read your terminal
- output, analyze widgets, access files, and help you solve problems faster.
+ Wave AI is your terminal assistant with full context. It can read your terminal
+ output, analyze widgets, read and write files, and help you solve
+ problems faster.
-
- Wave AI is in active beta with included AI credits while we refine the experience.
- We're actively improving it and would love your feedback in{" "}
+
+ New in v0.13: Wave AI now
+ supports local models and bring-your-own-key! Use Ollama, LM Studio, vLLM,
+ OpenRouter, or any OpenAI-compatible provider.
+
+
+ Wave AI is in beta with included AI credits while we refine the experience. We're
+ actively improving it and would love your feedback in{" "}
Discord
@@ -151,9 +157,9 @@ const UpgradeOnboardingModal_v0_12_0 = () => {
-
+
-
+
Thanks for being an early Wave adopter! ⭐
A GitHub star shows your support for Wave (and open-source) and helps us reach more
@@ -186,7 +192,7 @@ const UpgradeOnboardingModal_v0_12_0 = () => {
}
const paddingClass = isCompact ? "!py-3 !px-[30px]" : "!p-[30px]";
- const widthClass = pageName === "features" ? "w-[800px]" : "w-[560px]";
+ const widthClass = pageName === "features" ? "w-[800px]" : "w-[600px]";
return (
@@ -196,6 +202,6 @@ const UpgradeOnboardingModal_v0_12_0 = () => {
);
};
-UpgradeOnboardingModal_v0_12_0.displayName = "UpgradeOnboardingModal_v0_12_0";
+UpgradeOnboardingMinor.displayName = "UpgradeOnboardingMinor";
-export { UpgradeOnboardingModal_v0_12_0 };
\ No newline at end of file
+export { UpgradeOnboardingMinor };
diff --git a/frontend/app/onboarding/onboarding-upgrade-patch.tsx b/frontend/app/onboarding/onboarding-upgrade-patch.tsx
index 3e767fe3dc..ee1d7795db 100644
--- a/frontend/app/onboarding/onboarding-upgrade-patch.tsx
+++ b/frontend/app/onboarding/onboarding-upgrade-patch.tsx
@@ -17,6 +17,7 @@ import { debounce } from "throttle-debounce";
import { UpgradeOnboardingModal_v0_12_1_Content } from "./onboarding-upgrade-v0121";
import { UpgradeOnboardingModal_v0_12_2_Content } from "./onboarding-upgrade-v0122";
import { UpgradeOnboardingModal_v0_12_3_Content } from "./onboarding-upgrade-v0123";
+import { UpgradeOnboardingModal_v0_13_0_Content } from "./onboarding-upgrade-v0130";
interface VersionConfig {
version: string;
@@ -41,6 +42,12 @@ const versions: VersionConfig[] = [
version: "v0.12.5",
content: () => ,
prevText: "Prev (v0.12.2)",
+ nextText: "Next (v0.13.0)",
+ },
+ {
+ version: "v0.13.0",
+ content: () => ,
+ prevText: "Prev (v0.12.5)",
},
];
diff --git a/frontend/app/onboarding/onboarding-upgrade-v0130.tsx b/frontend/app/onboarding/onboarding-upgrade-v0130.tsx
new file mode 100644
index 0000000000..57ce06403e
--- /dev/null
+++ b/frontend/app/onboarding/onboarding-upgrade-v0130.tsx
@@ -0,0 +1,93 @@
+// Copyright 2025, Command Line Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+const UpgradeOnboardingModal_v0_13_0_Content = () => {
+ return (
+
+
+
+ Wave v0.13 brings local AI support, bring-your-own-key (BYOK), a redesigned configuration system,
+ and improved terminal functionality.
+
+
+
+
+
+
+
+
+
Local AI & BYOK
+
+
+
+ OpenAI-Compatible API - Connect to Ollama, LM Studio, vLLM, OpenRouter,
+ and other local or hosted models
+
+
+ Google Gemini - Native support for Gemini models
+
+
+ Provider Presets - Built-in configs for OpenAI, OpenRouter, Google,
+ Azure, and custom endpoints
+
+
+ Multiple AI Modes - Easily switch between models and providers
+
+
+
+
+
+
+
+
+
+
+
+
Configuration Widget
+
+
+
+ New Config Interface - Dedicated widget accessible from the sidebar
+
+
+ Better Organization - Browse and edit settings with improved validation
+ and error handling
+
+
+ Integrated Secrets - Manage API keys and credentials from the config
+ widget
+
+
+
+
+
+
+
+
+
+
+
+
Terminal Updates
+
+
+
+ Bracketed Paste Mode - Enabled by default for better multi-line paste
+ behavior
+
+
+ Windows Paste Fix - Ctrl+V now works as standard paste on Windows
+
+
+ SSH Password Storage - Store SSH passwords in Wave's secret store
+
- We collect minimal anonymous{" "}
-
- telemetry data
- {" "}
- to help us understand how people are using Wave (
+ Anonymous usage data helps us improve features you use.
+ {
>
Privacy Policy
- ).