Skip to content

Commit 3fe556d

Browse files
committed
feat: add MiniMax as a native LLM provider
Add first-class support for MiniMax models (MiniMax-M2.5 and MiniMax-M2.5-highspeed) as a native provider, following the same OpenAI-compatible client pattern used by Groq and Cerebras. - Create MiniMaxClient with OpenAI SDK wrapper pointing to https://api.minimax.io/v1 - Handle MiniMax's temperature constraint (must be in (0.0, 1.0]) - Support both modern (minimax/model) and legacy (minimax-model) formats - Add MINIMAX_API_KEY to .env.example - Update model docs with MiniMax provider tab and configuration examples
1 parent 6b19b8c commit 3fe556d

5 files changed

Lines changed: 358 additions & 1 deletion

File tree

.env.example

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
OPENAI_API_KEY=""
22
CEREBRAS_API_KEY=""
33
GROQ_API_KEY=""
4+
MINIMAX_API_KEY=""
45
BROWSERBASE_API_KEY=""
56
BRAINTRUST_API_KEY=""
67
ANTHROPIC_API_KEY=""

packages/core/lib/v3/llm/LLMProvider.ts

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ import { CerebrasClient } from "./CerebrasClient.js";
1616
import { GoogleClient } from "./GoogleClient.js";
1717
import { GroqClient } from "./GroqClient.js";
1818
import { LLMClient } from "./LLMClient.js";
19+
import { MiniMaxClient } from "./MiniMaxClient.js";
1920
import { OpenAIClient } from "./OpenAIClient.js";
2021
import { openai, createOpenAI } from "@ai-sdk/openai";
2122
import { bedrock, createAmazonBedrock } from "@ai-sdk/amazon-bedrock";
@@ -97,6 +98,8 @@ const modelToProviderMap: { [key in AvailableModel]: ModelProvider } = {
9798
"gemini-2.0-flash": "google",
9899
"gemini-2.5-flash-preview-04-17": "google",
99100
"gemini-2.5-pro-preview-03-25": "google",
101+
"minimax-MiniMax-M2.5": "minimax",
102+
"minimax-MiniMax-M2.5-highspeed": "minimax",
100103
};
101104

102105
export function getAISDKLanguageModel(
@@ -147,6 +150,16 @@ export class LLMProvider {
147150
const firstSlashIndex = modelName.indexOf("/");
148151
const subProvider = modelName.substring(0, firstSlashIndex);
149152
const subModelName = modelName.substring(firstSlashIndex + 1);
153+
154+
// Handle MiniMax natively (no AI SDK provider available)
155+
if (subProvider === "minimax") {
156+
return new MiniMaxClient({
157+
logger: this.logger,
158+
modelName: modelName as AvailableModel,
159+
clientOptions,
160+
});
161+
}
162+
150163
if (
151164
subProvider === "vertex" &&
152165
!options?.disableAPI &&
@@ -211,6 +224,12 @@ export class LLMProvider {
211224
modelName: availableModel,
212225
clientOptions,
213226
});
227+
case "minimax":
228+
return new MiniMaxClient({
229+
logger: this.logger,
230+
modelName: availableModel,
231+
clientOptions,
232+
});
214233
default:
215234
// This default case handles unknown providers that exist in modelToProviderMap
216235
// but aren't implemented in the switch. This is an internal consistency issue.
@@ -224,6 +243,9 @@ export class LLMProvider {
224243
if (modelName.includes("/")) {
225244
const firstSlashIndex = modelName.indexOf("/");
226245
const subProvider = modelName.substring(0, firstSlashIndex);
246+
if (subProvider === "minimax") {
247+
return "minimax";
248+
}
227249
if (AISDKProviders[subProvider]) {
228250
return "aisdk";
229251
}
Lines changed: 302 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,302 @@
1+
import type { ClientOptions } from "openai";
2+
import OpenAI from "openai";
3+
import { LogLine } from "../types/public/logs.js";
4+
import { AvailableModel } from "../types/public/model.js";
5+
import {
6+
ChatMessage,
7+
CreateChatCompletionOptions,
8+
LLMClient,
9+
LLMResponse,
10+
} from "./LLMClient.js";
11+
import { CreateChatCompletionResponseError } from "../types/public/sdkErrors.js";
12+
import { toJsonSchema } from "../zodCompat.js";
13+
14+
export class MiniMaxClient extends LLMClient {
15+
public type = "minimax" as const;
16+
private client: OpenAI;
17+
declare public clientOptions: ClientOptions;
18+
public hasVision = false;
19+
20+
constructor({
21+
modelName,
22+
clientOptions,
23+
userProvidedInstructions,
24+
}: {
25+
logger: (message: LogLine) => void;
26+
modelName: AvailableModel;
27+
clientOptions?: ClientOptions;
28+
userProvidedInstructions?: string;
29+
}) {
30+
super(modelName, userProvidedInstructions);
31+
32+
// Create OpenAI client with the base URL set to MiniMax API
33+
this.client = new OpenAI({
34+
baseURL: "https://api.minimax.io/v1",
35+
apiKey: clientOptions?.apiKey || process.env.MINIMAX_API_KEY,
36+
...clientOptions,
37+
});
38+
39+
this.modelName = modelName;
40+
this.clientOptions = clientOptions;
41+
}
42+
43+
/**
44+
* Extract the actual model name to send to the MiniMax API.
45+
* Handles both modern format (minimax/MiniMax-M2.5) and
46+
* deprecated format (minimax-MiniMax-M2.5).
47+
*/
48+
private getApiModelName(): string {
49+
if (this.modelName.includes("/")) {
50+
return this.modelName.substring(this.modelName.indexOf("/") + 1);
51+
}
52+
if (this.modelName.startsWith("minimax-")) {
53+
return this.modelName.substring("minimax-".length);
54+
}
55+
return this.modelName;
56+
}
57+
58+
async createChatCompletion<T = LLMResponse>({
59+
options,
60+
retries,
61+
logger,
62+
}: CreateChatCompletionOptions): Promise<T> {
63+
const optionsWithoutImage = { ...options };
64+
delete optionsWithoutImage.image;
65+
66+
logger({
67+
category: "minimax",
68+
message: "creating chat completion",
69+
level: 2,
70+
auxiliary: {
71+
options: {
72+
value: JSON.stringify(optionsWithoutImage),
73+
type: "object",
74+
},
75+
},
76+
});
77+
78+
// Format messages for MiniMax API (using OpenAI format)
79+
const formattedMessages = options.messages.map((msg: ChatMessage) => {
80+
const baseMessage = {
81+
content:
82+
typeof msg.content === "string"
83+
? msg.content
84+
: Array.isArray(msg.content) &&
85+
msg.content.length > 0 &&
86+
"text" in msg.content[0]
87+
? msg.content[0].text
88+
: "",
89+
};
90+
91+
if (msg.role === "system") {
92+
return { ...baseMessage, role: "system" as const };
93+
} else if (msg.role === "assistant") {
94+
return { ...baseMessage, role: "assistant" as const };
95+
} else {
96+
return { ...baseMessage, role: "user" as const };
97+
}
98+
});
99+
100+
// Format tools if provided
101+
let tools = options.tools?.map((tool) => ({
102+
type: "function" as const,
103+
function: {
104+
name: tool.name,
105+
description: tool.description,
106+
parameters: {
107+
type: "object",
108+
properties: tool.parameters.properties,
109+
required: tool.parameters.required,
110+
},
111+
},
112+
}));
113+
114+
// Add response model as a tool if provided
115+
if (options.response_model) {
116+
const jsonSchema = toJsonSchema(options.response_model.schema) as {
117+
properties?: Record<string, unknown>;
118+
required?: string[];
119+
};
120+
const schemaProperties = jsonSchema.properties || {};
121+
const schemaRequired = jsonSchema.required || [];
122+
123+
const responseTool = {
124+
type: "function" as const,
125+
function: {
126+
name: "print_extracted_data",
127+
description:
128+
"Prints the extracted data based on the provided schema.",
129+
parameters: {
130+
type: "object",
131+
properties: schemaProperties,
132+
required: schemaRequired,
133+
},
134+
},
135+
};
136+
137+
tools = tools ? [...tools, responseTool] : [responseTool];
138+
}
139+
140+
try {
141+
// MiniMax requires temperature in (0.0, 1.0] - zero is not allowed
142+
let temperature = options.temperature || 0.7;
143+
if (temperature <= 0) {
144+
temperature = 0.01;
145+
} else if (temperature > 1) {
146+
temperature = 1.0;
147+
}
148+
149+
const apiModelName = this.getApiModelName();
150+
151+
// Use OpenAI client with MiniMax API
152+
const apiResponse = await this.client.chat.completions.create({
153+
model: apiModelName,
154+
messages: [
155+
...formattedMessages,
156+
// Add explicit instruction to return JSON if we have a response model
157+
...(options.response_model
158+
? [
159+
{
160+
role: "system" as const,
161+
content: `IMPORTANT: Your response must be valid JSON that matches this schema: ${JSON.stringify(
162+
options.response_model.schema,
163+
)}`,
164+
},
165+
]
166+
: []),
167+
],
168+
temperature,
169+
max_tokens: options.maxOutputTokens,
170+
tools: tools,
171+
tool_choice: options.tool_choice || "auto",
172+
});
173+
174+
// Format the response to match the expected LLMResponse format
175+
const response: LLMResponse = {
176+
id: apiResponse.id,
177+
object: "chat.completion",
178+
created: Date.now(),
179+
model: apiModelName,
180+
choices: [
181+
{
182+
index: 0,
183+
message: {
184+
role: "assistant",
185+
content: apiResponse.choices[0]?.message?.content || null,
186+
tool_calls: apiResponse.choices[0]?.message?.tool_calls || [],
187+
},
188+
finish_reason: apiResponse.choices[0]?.finish_reason || "stop",
189+
},
190+
],
191+
usage: {
192+
prompt_tokens: apiResponse.usage?.prompt_tokens || 0,
193+
completion_tokens: apiResponse.usage?.completion_tokens || 0,
194+
total_tokens: apiResponse.usage?.total_tokens || 0,
195+
},
196+
};
197+
198+
logger({
199+
category: "minimax",
200+
message: "response",
201+
level: 2,
202+
auxiliary: {
203+
response: {
204+
value: JSON.stringify(response),
205+
type: "object",
206+
},
207+
requestId: {
208+
value: options.requestId,
209+
type: "string",
210+
},
211+
},
212+
});
213+
214+
// If there's no response model, return the entire response object
215+
if (!options.response_model) {
216+
return response as T;
217+
}
218+
219+
// Otherwise, try parsing the JSON from the tool call or content
220+
const toolCall = response.choices[0]?.message?.tool_calls?.[0];
221+
if (toolCall?.function?.arguments) {
222+
try {
223+
const result = JSON.parse(toolCall.function.arguments);
224+
const finalResponse = {
225+
data: result,
226+
usage: response.usage,
227+
};
228+
return finalResponse as T;
229+
} catch (e) {
230+
logger({
231+
category: "minimax",
232+
message: "failed to parse tool call arguments as JSON, retrying",
233+
level: 0,
234+
auxiliary: {
235+
error: {
236+
value: e.message,
237+
type: "string",
238+
},
239+
},
240+
});
241+
}
242+
}
243+
244+
// If we have content but no tool calls, try to parse the content as JSON
245+
const content = response.choices[0]?.message?.content;
246+
if (content) {
247+
try {
248+
// Try to extract JSON from the content
249+
const jsonMatch = content.match(/\{[\s\S]*\}/);
250+
if (jsonMatch) {
251+
const result = JSON.parse(jsonMatch[0]);
252+
const finalResponse = {
253+
data: result,
254+
usage: response.usage,
255+
};
256+
return finalResponse as T;
257+
}
258+
} catch (e) {
259+
logger({
260+
category: "minimax",
261+
message: "failed to parse content as JSON",
262+
level: 0,
263+
auxiliary: {
264+
error: {
265+
value: e.message,
266+
type: "string",
267+
},
268+
},
269+
});
270+
}
271+
}
272+
273+
// If we still haven't found valid JSON and have retries left, try again
274+
if (!retries || retries < 5) {
275+
return this.createChatCompletion({
276+
options,
277+
logger,
278+
retries: (retries ?? 0) + 1,
279+
});
280+
}
281+
282+
throw new CreateChatCompletionResponseError("Invalid response schema");
283+
} catch (error) {
284+
logger({
285+
category: "minimax",
286+
message: "error creating chat completion",
287+
level: 0,
288+
auxiliary: {
289+
error: {
290+
value: error.message,
291+
type: "string",
292+
},
293+
requestId: {
294+
value: options.requestId,
295+
type: "string",
296+
},
297+
},
298+
});
299+
throw error;
300+
}
301+
}
302+
}

packages/core/lib/v3/types/public/model.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,8 @@ export type AvailableModel =
8484
| "gemini-2.0-flash"
8585
| "gemini-2.5-flash-preview-04-17"
8686
| "gemini-2.5-pro-preview-03-25"
87+
| "minimax-MiniMax-M2.5"
88+
| "minimax-MiniMax-M2.5-highspeed"
8789
| string;
8890

8991
export type ModelProvider =
@@ -92,6 +94,7 @@ export type ModelProvider =
9294
| "cerebras"
9395
| "groq"
9496
| "google"
97+
| "minimax"
9598
| "aisdk";
9699

97100
export type ClientOptions = (

0 commit comments

Comments
 (0)