Skip to content

Commit 38f7c1b

Browse files
author
Kevin Hopper
committed
Add Meta AI (Llama) as BYOAI provider, fix glass theme message popover
- Add "meta" provider alias (OpenAI-compatible, base URL auto-configured) - Add Meta AI to Settings dropdown and AI Profiles support - Fix Llama API empty arguments rejection (pad with first schema property) - Trim whitespace on profile base URLs to prevent silent failures - Add Meta AI section to BYOAI docs (models, key format, rate limits) - Fix glass theme transparency on Messages panel popover
1 parent 5ab141a commit 38f7c1b

7 files changed

Lines changed: 80 additions & 20 deletions

File tree

.env.example

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ CROW_FILES_PATH=/home
102102
# ── AI PROVIDER (BYOAI — Bring Your Own AI) ──────────────
103103

104104
# Configure an AI provider to enable the built-in AI Chat in the Crow's Nest.
105-
# Supports: openai, anthropic, google, ollama, openrouter
105+
# Supports: openai, anthropic, google, ollama, openrouter, meta
106106
# AI_PROVIDER=openai
107107
# AI_API_KEY=sk-...
108108
# AI_MODEL=gpt-4o

docs/guide/ai-providers.md

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ No gateway restart needed — the config is hot-reloaded.
5050
| Google Gemini | `google` | `gemini-2.5-flash` | Yes | [Get key](https://aistudio.google.com/app/apikey) |
5151
| Ollama | `ollama` | `llama3.1` | No | Fully local, no API key needed |
5252
| OpenRouter | `openrouter` | `openai/gpt-4o` | Yes | [Get key](https://openrouter.ai/keys) — access 100+ models |
53+
| Meta AI (Llama) | `meta` | `Llama-4-Maverick-17B-128E-Instruct-FP8` | Yes | [Get key](https://llama.com/) — Llama 4 & 3.3 models |
5354
| DashScope Coding | `openai` | `qwen3.5-plus` | Yes | [Get key](https://dashscope.console.aliyun.com/apiKey) — Qwen, GLM, Kimi, MiniMax ([guide](/guide/dashscope-coding)) |
5455
| Z.AI Coding | `openai` | `glm-5` | Yes | [Get key](https://z.ai) — GLM models ([guide](/guide/zai-coding)) |
5556

@@ -107,6 +108,33 @@ AI_MODEL=openai/gpt-4o
107108

108109
OpenRouter gives you access to 100+ models from multiple providers through a single API key. Great for trying different models without signing up for each provider separately. Many models have free tiers.
109110

111+
### Meta AI (Llama)
112+
113+
```env
114+
AI_PROVIDER=meta
115+
AI_API_KEY=LLM|...
116+
AI_MODEL=Llama-4-Maverick-17B-128E-Instruct-FP8
117+
```
118+
119+
Meta's Llama API provides direct access to Llama models. Available models:
120+
121+
| Model | RPM | TPM |
122+
|---|---|---|
123+
| `Llama-4-Maverick-17B-128E-Instruct-FP8` | 10 | 250,000 |
124+
| `Llama-4-Scout-17B-16E-Instruct-FP8` | 10 | 250,000 |
125+
| `Llama-3.3-70B-Instruct` | 10 | 250,000 |
126+
| `Llama-3.3-8B-Instruct` | 10 | 250,000 |
127+
128+
The API is OpenAI-compatible — no custom base URL needed.
129+
130+
::: tip API Key Format
131+
Meta API keys start with `LLM|` (e.g., `LLM|953656...|8vKG-...`). Get one at [llama.com](https://llama.com/).
132+
:::
133+
134+
::: warning Semantic Search
135+
Meta's API does not support embeddings. Semantic search is not available when using Meta as your AI provider — Crow falls back to keyword search (FTS5) automatically.
136+
:::
137+
110138
### DashScope Coding Plan (Alibaba Cloud)
111139

112140
```env
@@ -146,7 +174,7 @@ This works with vLLM, LM Studio, text-generation-webui, and other OpenAI-compati
146174

147175
| Variable | Required | Description |
148176
|---|---|---|
149-
| `AI_PROVIDER` | Yes | Provider name: `openai`, `anthropic`, `google`, `ollama`, `openrouter` |
177+
| `AI_PROVIDER` | Yes | Provider name: `openai`, `anthropic`, `google`, `ollama`, `openrouter`, `meta` |
150178
| `AI_API_KEY` | Depends | API key (not needed for Ollama) |
151179
| `AI_MODEL` | No | Model name (uses provider default if blank) |
152180
| `AI_BASE_URL` | No | Custom API endpoint (for Ollama, OpenRouter, or self-hosted) |
@@ -250,7 +278,7 @@ LocalAI provides an OpenAI-compatible API running entirely on your hardware —
250278
Set `AI_PROVIDER` in Settings or `.env`. At minimum you need the provider name.
251279

252280
### "API key is invalid (401)"
253-
Double-check your `AI_API_KEY`. For Anthropic, keys start with `sk-ant-`. For OpenAI, `sk-`. For Google, `AIza`.
281+
Double-check your `AI_API_KEY`. For Anthropic, keys start with `sk-ant-`. For OpenAI, `sk-`. For Google, `AIza`. For Meta, keys start with `LLM|`.
254282

255283
### "Model not found (404)"
256284
The model name is provider-specific. Check the provider's docs for available models. For Ollama, run `ollama pull <model>` first.

servers/gateway/ai/adapters/openai.js

Lines changed: 34 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,23 @@ function mcpToolsToOpenAI(tools) {
2626

2727
/**
2828
* Convert chat messages to OpenAI message format.
29+
* @param {Array} messages
30+
* @param {Array} [tools] - Tool schemas, used to fix up empty arguments for
31+
* providers (e.g. Meta Llama) that reject "{}" as "parameters missing".
2932
*/
30-
function toOpenAIMessages(messages) {
33+
function toOpenAIMessages(messages, tools) {
34+
// Build a map of tool name → first schema property for empty-args fixup
35+
const toolFirstProp = new Map();
36+
if (tools) {
37+
for (const t of tools) {
38+
const props = t.inputSchema?.properties;
39+
if (props) {
40+
const firstKey = Object.keys(props)[0];
41+
if (firstKey) toolFirstProp.set(t.name, firstKey);
42+
}
43+
}
44+
}
45+
3146
return messages.map((m) => {
3247
if (m.role === "tool") {
3348
return {
@@ -41,14 +56,21 @@ function toOpenAIMessages(messages) {
4156
return {
4257
role: "assistant",
4358
content: m.content || null,
44-
tool_calls: toolCalls.map((tc) => ({
45-
id: tc.id,
46-
type: "function",
47-
function: {
48-
name: tc.name,
49-
arguments: typeof tc.arguments === "string" ? tc.arguments : JSON.stringify(tc.arguments),
50-
},
51-
})),
59+
tool_calls: toolCalls.map((tc) => {
60+
let args = typeof tc.arguments === "string" ? tc.arguments : JSON.stringify(tc.arguments);
61+
// Fix empty arguments for providers that reject "{}" (e.g. Meta Llama API)
62+
if (args === "{}" || args === "") {
63+
const firstProp = toolFirstProp.get(tc.name);
64+
if (firstProp) {
65+
args = JSON.stringify({ [firstProp]: "" });
66+
}
67+
}
68+
return {
69+
id: tc.id,
70+
type: "function",
71+
function: { name: tc.name, arguments: args },
72+
};
73+
}),
5274
};
5375
}
5476
return { role: m.role, content: m.content || "" };
@@ -67,15 +89,15 @@ export default function createOpenAIAdapter(config) {
6789
const temperature = options.temperature ?? 0.7;
6890
const maxTokens = options.maxTokens || 4096;
6991

92+
const openaiTools = mcpToolsToOpenAI(tools);
93+
7094
const body = {
7195
model,
72-
messages: toOpenAIMessages(messages),
96+
messages: toOpenAIMessages(messages, tools),
7397
temperature,
7498
max_tokens: maxTokens,
7599
stream: true,
76100
};
77-
78-
const openaiTools = mcpToolsToOpenAI(tools);
79101
if (openaiTools) {
80102
body.tools = openaiTools;
81103
}

servers/gateway/ai/provider.js

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
* - anthropic — Anthropic Messages API
1111
* - google — Google Gemini API
1212
* - ollama — Ollama native /api/chat endpoint
13+
* - meta — Meta Llama API (OpenAI-compatible)
1314
*/
1415

1516
import { readEnvFile, resolveEnvPath } from "../env-manager.js";
@@ -91,6 +92,7 @@ export async function createProviderAdapter() {
9192
// Resolve provider to adapter loader (support aliases)
9293
let adapterKey = provider;
9394
if (provider === "openrouter") adapterKey = "openai";
95+
if (provider === "meta") adapterKey = "openai";
9496

9597
const loader = ADAPTER_LOADERS[adapterKey];
9698
if (!loader) {
@@ -122,6 +124,9 @@ export async function createProviderAdapter() {
122124
if (provider === "openrouter" && !baseUrl) {
123125
adapterConfig.baseUrl = "https://openrouter.ai/api/v1";
124126
}
127+
if (provider === "meta" && !baseUrl) {
128+
adapterConfig.baseUrl = "https://api.llama.com/compat/v1/";
129+
}
125130

126131
const adapter = createAdapter(adapterConfig);
127132
return { adapter, config };
@@ -151,7 +156,7 @@ export async function getAiProfiles(db, { includeKeys = false } = {}) {
151156
* Returns { adapter, config } — same shape as createProviderAdapter().
152157
*/
153158
export async function createAdapterFromProfile(profile, model) {
154-
const adapterKey = profile.provider === "openrouter" ? "openai" : profile.provider;
159+
const adapterKey = ["openrouter", "meta"].includes(profile.provider) ? "openai" : profile.provider;
155160
const loader = ADAPTER_LOADERS[adapterKey];
156161
if (!loader) {
157162
throw Object.assign(new Error(`Unknown provider: ${profile.provider}`), { code: "invalid_provider" });
@@ -173,6 +178,9 @@ export async function createAdapterFromProfile(profile, model) {
173178
if (profile.provider === "openrouter" && !profile.baseUrl) {
174179
adapterConfig.baseUrl = "https://openrouter.ai/api/v1";
175180
}
181+
if (profile.provider === "meta" && !profile.baseUrl) {
182+
adapterConfig.baseUrl = "https://api.llama.com/compat/v1/";
183+
}
176184

177185
const adapter = mod.default(adapterConfig);
178186
return { adapter, config: { provider: profile.provider, model: resolvedModel, baseUrl: profile.baseUrl } };

servers/gateway/dashboard/settings/sections/ai-profiles.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ export default {
196196
profiles[idx].name = profile_name;
197197
profiles[idx].provider = profile_provider;
198198
if (profile_api_key) profiles[idx].apiKey = profile_api_key;
199-
profiles[idx].baseUrl = profile_base_url || "";
199+
profiles[idx].baseUrl = (profile_base_url || "").trim();
200200
profiles[idx].models = models;
201201
profiles[idx].defaultModel = defaultModel;
202202
} else {
@@ -207,7 +207,7 @@ export default {
207207
name: profile_name,
208208
provider: profile_provider,
209209
apiKey: profile_api_key || "",
210-
baseUrl: profile_base_url || "",
210+
baseUrl: (profile_base_url || "").trim(),
211211
models,
212212
defaultModel,
213213
});

servers/gateway/dashboard/settings/sections/ai-provider.js

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ export default {
3737
{ id: "google", name: "Google Gemini", defaultModel: "gemini-2.5-flash" },
3838
{ id: "ollama", name: "Ollama (local)", defaultModel: "llama3.1" },
3939
{ id: "openrouter", name: "OpenRouter", defaultModel: "openai/gpt-4o" },
40+
{ id: "meta", name: "Meta AI (Llama)", defaultModel: "Llama-4-Maverick-17B-128E-Instruct-FP8" },
4041
];
4142

4243
const currentProvider = aiProviderConfig?.provider || "";
@@ -88,7 +89,7 @@ export default {
8889
var p = document.getElementById('ai-provider').value;
8990
var urlField = document.getElementById('ai-base-url-field');
9091
urlField.style.display = (p === 'ollama' || p === 'openrouter' || p === '') ? 'block' : 'none';
91-
var defaults = {openai:'gpt-4o',anthropic:'claude-sonnet-4-20250514',google:'gemini-2.5-flash',ollama:'llama3.1',openrouter:'openai/gpt-4o'};
92+
var defaults = {openai:'gpt-4o',anthropic:'claude-sonnet-4-20250514',google:'gemini-2.5-flash',ollama:'llama3.1',openrouter:'openai/gpt-4o',meta:'Llama-4-Maverick-17B-128E-Instruct-FP8'};
9293
document.getElementById('ai-model').placeholder = defaults[p] || 'Model name';
9394
}
9495
async function saveAiProvider() {

servers/gateway/dashboard/shared/layout.js

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -729,7 +729,8 @@ function dashboardCss() {
729729
.theme-glass .header-dropdown,
730730
.theme-glass .crow-dropdown,
731731
.theme-glass #modal-content,
732-
.theme-glass #crow-player-bar {
732+
.theme-glass #crow-player-bar,
733+
.theme-glass .msg-popover {
733734
background: var(--crow-bg-popup);
734735
backdrop-filter: var(--crow-glass-blur);
735736
-webkit-backdrop-filter: var(--crow-glass-blur);

0 commit comments

Comments
 (0)