diff --git a/.env.example b/.env.example
index eb08f00ee..918abd411 100644
--- a/.env.example
+++ b/.env.example
@@ -1,6 +1,7 @@
OPENAI_API_KEY=""
CEREBRAS_API_KEY=""
GROQ_API_KEY=""
+MINIMAX_API_KEY=""
BROWSERBASE_API_KEY=""
BRAINTRUST_API_KEY=""
ANTHROPIC_API_KEY=""
diff --git a/packages/core/lib/v3/llm/LLMProvider.ts b/packages/core/lib/v3/llm/LLMProvider.ts
index 11986d3cb..1adb216d9 100644
--- a/packages/core/lib/v3/llm/LLMProvider.ts
+++ b/packages/core/lib/v3/llm/LLMProvider.ts
@@ -16,6 +16,7 @@ import { CerebrasClient } from "./CerebrasClient.js";
import { GoogleClient } from "./GoogleClient.js";
import { GroqClient } from "./GroqClient.js";
import { LLMClient } from "./LLMClient.js";
+import { MiniMaxClient } from "./MiniMaxClient.js";
import { OpenAIClient } from "./OpenAIClient.js";
import { openai, createOpenAI } from "@ai-sdk/openai";
import { bedrock, createAmazonBedrock } from "@ai-sdk/amazon-bedrock";
@@ -97,6 +98,10 @@ const modelToProviderMap: { [key in AvailableModel]: ModelProvider } = {
"gemini-2.0-flash": "google",
"gemini-2.5-flash-preview-04-17": "google",
"gemini-2.5-pro-preview-03-25": "google",
+ "minimax-MiniMax-M2.7": "minimax",
+ "minimax-MiniMax-M2.7-highspeed": "minimax",
+ "minimax-MiniMax-M2.5": "minimax",
+ "minimax-MiniMax-M2.5-highspeed": "minimax",
};
export function getAISDKLanguageModel(
@@ -147,6 +152,16 @@ export class LLMProvider {
const firstSlashIndex = modelName.indexOf("/");
const subProvider = modelName.substring(0, firstSlashIndex);
const subModelName = modelName.substring(firstSlashIndex + 1);
+
+ // Handle MiniMax natively (no AI SDK provider available)
+ if (subProvider === "minimax") {
+ return new MiniMaxClient({
+ logger: this.logger,
+ modelName: modelName as AvailableModel,
+ clientOptions,
+ });
+ }
+
if (
subProvider === "vertex" &&
!options?.disableAPI &&
@@ -211,6 +226,12 @@ export class LLMProvider {
modelName: availableModel,
clientOptions,
});
+ case "minimax":
+ return new MiniMaxClient({
+ logger: this.logger,
+ modelName: availableModel,
+ clientOptions,
+ });
default:
// This default case handles unknown providers that exist in modelToProviderMap
// but aren't implemented in the switch. This is an internal consistency issue.
@@ -224,6 +245,9 @@ export class LLMProvider {
if (modelName.includes("/")) {
const firstSlashIndex = modelName.indexOf("/");
const subProvider = modelName.substring(0, firstSlashIndex);
+ if (subProvider === "minimax") {
+ return "minimax";
+ }
if (AISDKProviders[subProvider]) {
return "aisdk";
}
diff --git a/packages/core/lib/v3/llm/MiniMaxClient.ts b/packages/core/lib/v3/llm/MiniMaxClient.ts
new file mode 100644
index 000000000..ee3475894
--- /dev/null
+++ b/packages/core/lib/v3/llm/MiniMaxClient.ts
@@ -0,0 +1,315 @@
+import type { ClientOptions } from "openai";
+import OpenAI from "openai";
+import { LogLine } from "../types/public/logs.js";
+import { AvailableModel } from "../types/public/model.js";
+import {
+ ChatMessage,
+ CreateChatCompletionOptions,
+ LLMClient,
+ LLMResponse,
+} from "./LLMClient.js";
+import { CreateChatCompletionResponseError } from "../types/public/sdkErrors.js";
+import { toJsonSchema } from "../zodCompat.js";
+
+export class MiniMaxClient extends LLMClient {
+ public type = "minimax" as const;
+ private client: OpenAI;
+ declare public clientOptions: ClientOptions;
+ public hasVision = false;
+
+ constructor({
+ modelName,
+ clientOptions,
+ userProvidedInstructions,
+ }: {
+ logger: (message: LogLine) => void;
+ modelName: AvailableModel;
+ clientOptions?: ClientOptions;
+ userProvidedInstructions?: string;
+ }) {
+ super(modelName, userProvidedInstructions);
+
+ // Create OpenAI client with the base URL set to MiniMax API
+ this.client = new OpenAI({
+ baseURL: "https://api.minimax.io/v1",
+ apiKey: clientOptions?.apiKey || process.env.MINIMAX_API_KEY,
+ ...clientOptions,
+ });
+
+ this.modelName = modelName;
+ this.clientOptions = clientOptions;
+ }
+
+ /**
+ * Strip thinking tags from M2.7 model responses.
+ * M2.7 models include `...` blocks in their output.
+ */
+ private stripThinking(content: string | null): string | null {
+ if (!content) return content;
+ return content.replace(/[\s\S]*?<\/think>\s*/g, "").trim() || null;
+ }
+
+ /**
+ * Extract the actual model name to send to the MiniMax API.
+ * Handles both modern format (minimax/MiniMax-M2.7) and
+ * deprecated format (minimax-MiniMax-M2.7).
+ */
+ private getApiModelName(): string {
+ if (this.modelName.includes("/")) {
+ return this.modelName.substring(this.modelName.indexOf("/") + 1);
+ }
+ if (this.modelName.startsWith("minimax-")) {
+ return this.modelName.substring("minimax-".length);
+ }
+ return this.modelName;
+ }
+
+ async createChatCompletion({
+ options,
+ retries,
+ logger,
+ }: CreateChatCompletionOptions): Promise {
+ const optionsWithoutImage = { ...options };
+ delete optionsWithoutImage.image;
+
+ logger({
+ category: "minimax",
+ message: "creating chat completion",
+ level: 2,
+ auxiliary: {
+ options: {
+ value: JSON.stringify(optionsWithoutImage),
+ type: "object",
+ },
+ },
+ });
+
+ // Format messages for MiniMax API (using OpenAI format)
+ const formattedMessages = options.messages.map((msg: ChatMessage) => {
+ const baseMessage = {
+ content:
+ typeof msg.content === "string"
+ ? msg.content
+ : Array.isArray(msg.content) &&
+ msg.content.length > 0 &&
+ "text" in msg.content[0]
+ ? msg.content[0].text
+ : "",
+ };
+
+ if (msg.role === "system") {
+ return { ...baseMessage, role: "system" as const };
+ } else if (msg.role === "assistant") {
+ return { ...baseMessage, role: "assistant" as const };
+ } else {
+ return { ...baseMessage, role: "user" as const };
+ }
+ });
+
+ // Format tools if provided
+ let tools = options.tools?.map((tool) => ({
+ type: "function" as const,
+ function: {
+ name: tool.name,
+ description: tool.description,
+ parameters: {
+ type: "object",
+ properties: tool.parameters.properties,
+ required: tool.parameters.required,
+ },
+ },
+ }));
+
+ // Add response model as a tool if provided
+ if (options.response_model) {
+ const jsonSchema = toJsonSchema(options.response_model.schema) as {
+ properties?: Record;
+ required?: string[];
+ };
+ const schemaProperties = jsonSchema.properties || {};
+ const schemaRequired = jsonSchema.required || [];
+
+ const responseTool = {
+ type: "function" as const,
+ function: {
+ name: "print_extracted_data",
+ description:
+ "Prints the extracted data based on the provided schema.",
+ parameters: {
+ type: "object",
+ properties: schemaProperties,
+ required: schemaRequired,
+ },
+ },
+ };
+
+ tools = tools ? [...tools, responseTool] : [responseTool];
+ }
+
+ try {
+ // MiniMax requires temperature in (0.0, 1.0] - zero is not allowed
+ let temperature = options.temperature || 0.7;
+ if (temperature <= 0) {
+ temperature = 0.01;
+ } else if (temperature > 1) {
+ temperature = 1.0;
+ }
+
+ const apiModelName = this.getApiModelName();
+
+ // Use OpenAI client with MiniMax API
+ const apiResponse = await this.client.chat.completions.create({
+ model: apiModelName,
+ messages: [
+ ...formattedMessages,
+ // Add explicit instruction to return JSON if we have a response model
+ ...(options.response_model
+ ? [
+ {
+ role: "system" as const,
+ content: `IMPORTANT: Your response must be valid JSON that matches this schema: ${JSON.stringify(
+ options.response_model.schema,
+ )}`,
+ },
+ ]
+ : []),
+ ],
+ temperature,
+ max_tokens: options.maxOutputTokens,
+ tools: tools,
+ tool_choice: options.tool_choice || "auto",
+ });
+
+ // Strip thinking tags from M2.7 model responses
+ const rawContent = apiResponse.choices[0]?.message?.content || null;
+ const cleanedContent = this.stripThinking(rawContent);
+
+ // Format the response to match the expected LLMResponse format
+ const response: LLMResponse = {
+ id: apiResponse.id,
+ object: "chat.completion",
+ created: Date.now(),
+ model: apiModelName,
+ choices: [
+ {
+ index: 0,
+ message: {
+ role: "assistant",
+ content: cleanedContent,
+ tool_calls: apiResponse.choices[0]?.message?.tool_calls || [],
+ },
+ finish_reason: apiResponse.choices[0]?.finish_reason || "stop",
+ },
+ ],
+ usage: {
+ prompt_tokens: apiResponse.usage?.prompt_tokens || 0,
+ completion_tokens: apiResponse.usage?.completion_tokens || 0,
+ total_tokens: apiResponse.usage?.total_tokens || 0,
+ },
+ };
+
+ logger({
+ category: "minimax",
+ message: "response",
+ level: 2,
+ auxiliary: {
+ response: {
+ value: JSON.stringify(response),
+ type: "object",
+ },
+ requestId: {
+ value: options.requestId,
+ type: "string",
+ },
+ },
+ });
+
+ // If there's no response model, return the entire response object
+ if (!options.response_model) {
+ return response as T;
+ }
+
+ // Otherwise, try parsing the JSON from the tool call or content
+ const toolCall = response.choices[0]?.message?.tool_calls?.[0];
+ if (toolCall?.function?.arguments) {
+ try {
+ const result = JSON.parse(toolCall.function.arguments);
+ const finalResponse = {
+ data: result,
+ usage: response.usage,
+ };
+ return finalResponse as T;
+ } catch (e) {
+ logger({
+ category: "minimax",
+ message: "failed to parse tool call arguments as JSON, retrying",
+ level: 0,
+ auxiliary: {
+ error: {
+ value: e.message,
+ type: "string",
+ },
+ },
+ });
+ }
+ }
+
+ // If we have content but no tool calls, try to parse the content as JSON
+ const content = response.choices[0]?.message?.content;
+ if (content) {
+ try {
+ // Try to extract JSON from the content
+ const jsonMatch = content.match(/\{[\s\S]*\}/);
+ if (jsonMatch) {
+ const result = JSON.parse(jsonMatch[0]);
+ const finalResponse = {
+ data: result,
+ usage: response.usage,
+ };
+ return finalResponse as T;
+ }
+ } catch (e) {
+ logger({
+ category: "minimax",
+ message: "failed to parse content as JSON",
+ level: 0,
+ auxiliary: {
+ error: {
+ value: e.message,
+ type: "string",
+ },
+ },
+ });
+ }
+ }
+
+ // If we still haven't found valid JSON and have retries left, try again
+ if (!retries || retries < 5) {
+ return this.createChatCompletion({
+ options,
+ logger,
+ retries: (retries ?? 0) + 1,
+ });
+ }
+
+ throw new CreateChatCompletionResponseError("Invalid response schema");
+ } catch (error) {
+ logger({
+ category: "minimax",
+ message: "error creating chat completion",
+ level: 0,
+ auxiliary: {
+ error: {
+ value: error.message,
+ type: "string",
+ },
+ requestId: {
+ value: options.requestId,
+ type: "string",
+ },
+ },
+ });
+ throw error;
+ }
+ }
+}
diff --git a/packages/core/lib/v3/types/public/model.ts b/packages/core/lib/v3/types/public/model.ts
index 97fce86de..c022a52e5 100644
--- a/packages/core/lib/v3/types/public/model.ts
+++ b/packages/core/lib/v3/types/public/model.ts
@@ -84,6 +84,10 @@ export type AvailableModel =
| "gemini-2.0-flash"
| "gemini-2.5-flash-preview-04-17"
| "gemini-2.5-pro-preview-03-25"
+ | "minimax-MiniMax-M2.7"
+ | "minimax-MiniMax-M2.7-highspeed"
+ | "minimax-MiniMax-M2.5"
+ | "minimax-MiniMax-M2.5-highspeed"
| string;
export type ModelProvider =
@@ -92,6 +96,7 @@ export type ModelProvider =
| "cerebras"
| "groq"
| "google"
+ | "minimax"
| "aisdk";
export type ClientOptions = (
diff --git a/packages/docs/v3/configuration/models.mdx b/packages/docs/v3/configuration/models.mdx
index dd829a002..d118fe041 100644
--- a/packages/docs/v3/configuration/models.mdx
+++ b/packages/docs/v3/configuration/models.mdx
@@ -8,7 +8,7 @@ import { V3Banner } from '/snippets/v3-banner.mdx';
-Understand web pages, plan actions, and interact with complex interfaces with Google, OpenAI, Anthropic, xAI, DeepSeek, Perplexity, Azure, Ollama, the [Vercel AI Gateway](https://vercel.com/docs/ai-gateway), or any other LLM model from [the Vercel AI SDK](https://sdk.vercel.ai/providers).
+Understand web pages, plan actions, and interact with complex interfaces with Google, OpenAI, Anthropic, MiniMax, xAI, DeepSeek, Perplexity, Azure, Ollama, the [Vercel AI Gateway](https://vercel.com/docs/ai-gateway), or any other LLM model from [the Vercel AI SDK](https://sdk.vercel.ai/providers).
---
@@ -218,6 +218,28 @@ await stagehand.init();
[View all supported Groq models →](https://console.groq.com/docs/models)
+
+
+
+```typescript TypeScript
+import { Stagehand } from "@browserbasehq/stagehand";
+
+const stagehand = new Stagehand({
+ env: "BROWSERBASE",
+ model: "minimax/MiniMax-M2.7"
+ // API key auto-loads from MINIMAX_API_KEY - set in your .env
+});
+
+await stagehand.init();
+```
+
+
+
+Available models: `MiniMax-M2.7` (recommended), `MiniMax-M2.7-highspeed` (optimized for speed), `MiniMax-M2.5` (204K context), `MiniMax-M2.5-highspeed` (204K context, optimized for speed).
+
+[View all supported MiniMax models →](https://platform.minimaxi.com/document/Models)
+
+
@@ -829,6 +851,14 @@ The following models work without the `provider/` prefix in the model parameter
- `groq-llama-3.3-70b-specdec`
- `moonshotai/kimi-k2-instruct`
+
+
+
+- `minimax-MiniMax-M2.7`
+- `minimax-MiniMax-M2.7-highspeed`
+- `minimax-MiniMax-M2.5`
+- `minimax-MiniMax-M2.5-highspeed`
+
@@ -856,6 +886,7 @@ The following models work without the `provider/` prefix in the model parameter
| Cerebras | `CEREBRAS_API_KEY` |
| DeepSeek | `DEEPSEEK_API_KEY` |
| Groq | `GROQ_API_KEY` |
+| MiniMax | `MINIMAX_API_KEY` |
| Mistral | `MISTRAL_API_KEY` |
| Ollama | None (local) |
| Perplexity | `PERPLEXITY_API_KEY` |