From bf1772a68c35389314065d22d11ed56ef98269e2 Mon Sep 17 00:00:00 2001 From: uinstinct <61635505+uinstinct@users.noreply.github.com> Date: Thu, 12 Feb 2026 15:36:08 +0530 Subject: [PATCH 1/9] convert assistant tool call messages to ai sdk format --- .../src/openaiToVercelMessages.ts | 45 ++++++++++++++----- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/packages/openai-adapters/src/openaiToVercelMessages.ts b/packages/openai-adapters/src/openaiToVercelMessages.ts index 890d42f0d87..1850e5ed93e 100644 --- a/packages/openai-adapters/src/openaiToVercelMessages.ts +++ b/packages/openai-adapters/src/openaiToVercelMessages.ts @@ -13,12 +13,13 @@ export interface VercelCoreMessage { * Converts OpenAI messages to Vercel AI SDK CoreMessage format. * * Key differences: + * - OpenAI tool calls: { role: "assistant", tool_calls: [{ id, function: { name, arguments } }] } + * - Vercel tool calls: { role: "assistant", content: [{ type: "tool-call", toolCallId, toolName, args }] } * - OpenAI tool results: { role: "tool", tool_call_id: "...", content: "string" } * - Vercel tool results: { role: "tool", content: [{ type: "tool-result", toolCallId: "...", toolName: "...", result: any }] } * * IMPORTANT: For multi-turn conversations with tools: - * - We EXCLUDE assistant messages that have tool_calls because Vercel AI SDK manages tool call state internally - * - We only include tool results, and Vercel will associate them with its internal tool call tracking + * - We include assistant messages with tool_calls converted to Vercel format */ export function convertOpenAIMessagesToVercel( messages: ChatCompletionMessageParam[], @@ -60,19 +61,43 @@ export function convertOpenAIMessagesToVercel( break; case "assistant": - // CRITICAL: Skip assistant messages with tool_calls in subsequent turns - // Vercel AI SDK manages tool call state internally and doesn't expect us to re-send them - // Only include assistant messages WITHOUT tool calls (normal responses) - if (!msg.tool_calls || msg.tool_calls.length === 0) { + if (msg.tool_calls && msg.tool_calls.length > 0) { + const contentParts: any[] = []; + + if (msg.content) { + contentParts.push({ + type: "text", + text: msg.content, + }); + } + + for (const tc of msg.tool_calls) { + if (tc.type === "function") { + let args: unknown; + try { + args = JSON.parse(tc.function.arguments); + } catch { + args = tc.function.arguments; + } + contentParts.push({ + type: "tool-call", + toolCallId: tc.id, + toolName: tc.function.name, + args, + }); + } + } + + vercelMessages.push({ + role: "assistant", + content: contentParts, + }); + } else { vercelMessages.push({ role: "assistant", content: msg.content || "", }); } - // Note: We skip assistant messages WITH tool_calls because: - // 1. They were generated by Vercel AI SDK in a previous turn - // 2. Vercel tracks tool call state internally - // 3. Re-sending them causes Anthropic API errors about missing tool_use blocks break; case "tool": From 783fd2a2ce476d428a41c45daae961f16fb8cac5 Mon Sep 17 00:00:00 2001 From: uinstinct <61635505+uinstinct@users.noreply.github.com> Date: Thu, 12 Feb 2026 15:46:39 +0530 Subject: [PATCH 2/9] add ai sdk implementing basellmapi --- packages/openai-adapters/src/apis/AiSdk.ts | 285 +++++++++++++++++++++ 1 file changed, 285 insertions(+) create mode 100644 packages/openai-adapters/src/apis/AiSdk.ts diff --git a/packages/openai-adapters/src/apis/AiSdk.ts b/packages/openai-adapters/src/apis/AiSdk.ts new file mode 100644 index 00000000000..034433ad364 --- /dev/null +++ b/packages/openai-adapters/src/apis/AiSdk.ts @@ -0,0 +1,285 @@ +import { createAnthropic } from "@ai-sdk/anthropic"; +import { createOpenAI } from "@ai-sdk/openai"; +import { + ChatCompletion, + ChatCompletionChunk, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionCreateParamsStreaming, + Completion, + CompletionCreateParamsNonStreaming, + CompletionCreateParamsStreaming, + CreateEmbeddingResponse, + EmbeddingCreateParams, + Model, +} from "openai/resources/index"; +import { AiSdkConfig } from "../types.js"; +import { customFetch, embedding } from "../util.js"; +import { + BaseLlmApi, + CreateRerankResponse, + FimCreateParamsStreaming, + RerankCreateParams, +} from "./base.js"; + +type AiSdkProviderCreator = (options: { + apiKey?: string; + baseURL?: string; + fetch?: typeof fetch; +}) => (modelId: string) => any; + +const PROVIDER_MAP: Record = { + openai: createOpenAI, + anthropic: createAnthropic, +}; + +export class AiSdkApi implements BaseLlmApi { + private provider?: (modelId: string) => any; + private config: AiSdkConfig; + + constructor(config: AiSdkConfig) { + this.config = config; + } + + private initializeProvider() { + if (this.provider) { + return; + } + + const createFn = PROVIDER_MAP[this.config.aiSdkProviderId]; + if (!createFn) { + const supportedProviders = Object.keys(PROVIDER_MAP).join(", "); + throw new Error( + `Unknown AI SDK provider: "${this.config.aiSdkProviderId}". ` + + `Supported providers: ${supportedProviders}. ` + + `To use a different provider, install the @ai-sdk/* package and add it to the provider map.`, + ); + } + + const hasRequestOptions = + this.config.requestOptions && + (this.config.requestOptions.headers || + this.config.requestOptions.proxy || + this.config.requestOptions.caBundlePath || + this.config.requestOptions.clientCertificate || + this.config.requestOptions.extraBodyProperties); + + this.provider = createFn({ + apiKey: this.config.apiKey ?? "", + baseURL: this.config.apiBase, + fetch: hasRequestOptions + ? customFetch(this.config.requestOptions) + : undefined, + }); + } + + async chatCompletionNonStream( + body: ChatCompletionCreateParamsNonStreaming, + signal: AbortSignal, + ): Promise { + this.initializeProvider(); + + const { generateText } = await import("ai"); + const { convertOpenAIMessagesToVercel } = await import( + "../openaiToVercelMessages.js" + ); + const { convertToolsToVercelFormat } = await import( + "../convertToolsToVercel.js" + ); + const { convertToolChoiceToVercel } = await import( + "../convertToolChoiceToVercel.js" + ); + + const vercelMessages = convertOpenAIMessagesToVercel(body.messages); + const systemMsg = vercelMessages.find((msg) => msg.role === "system"); + const systemText = + systemMsg && typeof systemMsg.content === "string" + ? systemMsg.content + : undefined; + const nonSystemMessages = vercelMessages.filter( + (msg) => msg.role !== "system", + ); + + const modelId = this.config.model ?? body.model; + const model = this.provider!(modelId); + const vercelTools = await convertToolsToVercelFormat(body.tools); + + const result = await generateText({ + model, + system: systemText, + messages: nonSystemMessages as any, + temperature: body.temperature ?? undefined, + maxTokens: body.max_tokens ?? undefined, + topP: body.top_p ?? undefined, + stopSequences: body.stop + ? Array.isArray(body.stop) + ? body.stop + : [body.stop] + : undefined, + tools: vercelTools, + toolChoice: convertToolChoiceToVercel(body.tool_choice), + abortSignal: signal, + }); + + const toolCalls = result.toolCalls?.map((tc) => ({ + id: tc.toolCallId, + type: "function" as const, + function: { + name: tc.toolName, + arguments: JSON.stringify(tc.args), + }, + })); + + return { + id: result.response?.id ?? "", + object: "chat.completion", + created: Math.floor(Date.now() / 1000), + model: modelId, + choices: [ + { + index: 0, + message: { + role: "assistant", + content: result.text, + tool_calls: toolCalls, + refusal: null, + }, + finish_reason: + result.finishReason === "tool-calls" ? "tool_calls" : "stop", + logprobs: null, + }, + ], + usage: { + prompt_tokens: result.usage.promptTokens, + completion_tokens: result.usage.completionTokens, + total_tokens: result.usage.totalTokens, + }, + }; + } + + async *chatCompletionStream( + body: ChatCompletionCreateParamsStreaming, + signal: AbortSignal, + ): AsyncGenerator { + this.initializeProvider(); + + const { streamText } = await import("ai"); + const { convertOpenAIMessagesToVercel } = await import( + "../openaiToVercelMessages.js" + ); + const { convertToolsToVercelFormat } = await import( + "../convertToolsToVercel.js" + ); + const { convertVercelStream } = await import("../vercelStreamConverter.js"); + const { convertToolChoiceToVercel } = await import( + "../convertToolChoiceToVercel.js" + ); + + const vercelMessages = convertOpenAIMessagesToVercel(body.messages); + const systemMsg = vercelMessages.find((msg) => msg.role === "system"); + const systemText = + systemMsg && typeof systemMsg.content === "string" + ? systemMsg.content + : undefined; + const nonSystemMessages = vercelMessages.filter( + (msg) => msg.role !== "system", + ); + + const modelId = this.config.model ?? body.model; + const model = this.provider!(modelId); + const vercelTools = await convertToolsToVercelFormat(body.tools); + + const result = streamText({ + model, + system: systemText, + messages: nonSystemMessages as any, + temperature: body.temperature ?? undefined, + maxTokens: body.max_tokens ?? undefined, + topP: body.top_p ?? undefined, + stopSequences: body.stop + ? Array.isArray(body.stop) + ? body.stop + : [body.stop] + : undefined, + tools: vercelTools, + toolChoice: convertToolChoiceToVercel(body.tool_choice), + abortSignal: signal, + }); + + yield* convertVercelStream(result.fullStream as any, { model: modelId }); + } + + async completionNonStream( + _body: CompletionCreateParamsNonStreaming, + _signal: AbortSignal, + ): Promise { + throw new Error( + "AI SDK provider does not support legacy completions API. Use chat completions instead.", + ); + } + + async *completionStream( + _body: CompletionCreateParamsStreaming, + _signal: AbortSignal, + ): AsyncGenerator { + throw new Error( + "AI SDK provider does not support legacy completions API. Use chat completions instead.", + ); + } + + async *fimStream( + _body: FimCreateParamsStreaming, + _signal: AbortSignal, + ): AsyncGenerator { + throw new Error( + "AI SDK provider does not support fill-in-the-middle (FIM) completions.", + ); + } + + async embed(body: EmbeddingCreateParams): Promise { + this.initializeProvider(); + + const { embed: aiEmbed, embedMany } = await import("ai"); + + const modelId = typeof body.model === "string" ? body.model : body.model; + const model = this.provider!(modelId); + + const inputs = Array.isArray(body.input) ? body.input : [body.input]; + + if (inputs.length === 1) { + const result = await aiEmbed({ + model, + value: inputs[0], + }); + return embedding({ + data: [result.embedding], + model: modelId, + usage: { + prompt_tokens: result.usage?.tokens ?? 0, + total_tokens: result.usage?.tokens ?? 0, + }, + }); + } + + const result = await embedMany({ + model, + values: inputs as string[], + }); + + return embedding({ + data: result.embeddings, + model: modelId, + usage: { + prompt_tokens: result.usage?.tokens ?? 0, + total_tokens: result.usage?.tokens ?? 0, + }, + }); + } + + async rerank(_body: RerankCreateParams): Promise { + throw new Error("AI SDK provider does not support reranking."); + } + + async list(): Promise { + return []; + } +} From 73ee3e047a0f0f9eeb518f645a67ab52b9d126e7 Mon Sep 17 00:00:00 2001 From: uinstinct <61635505+uinstinct@users.noreply.github.com> Date: Thu, 12 Feb 2026 15:52:35 +0530 Subject: [PATCH 3/9] remove usage of aisdkprovider id --- extensions/cli/src/config.ts | 51 +++++++++++----------- packages/config-yaml/src/schemas/models.ts | 1 + packages/openai-adapters/src/apis/AiSdk.ts | 13 ++++-- 3 files changed, 36 insertions(+), 29 deletions(-) diff --git a/extensions/cli/src/config.ts b/extensions/cli/src/config.ts index f26e1de2c64..fc38409ebfe 100644 --- a/extensions/cli/src/config.ts +++ b/extensions/cli/src/config.ts @@ -44,7 +44,7 @@ function mergeUserAgentIntoRequestOptions( /** * Creates an LLM API instance from a ModelConfig and auth configuration - * Handles special logic for continue-proxy provider and constructs the API + * Handles special logic for continue-proxy and ai-sdk providers and constructs the API */ export function createLlmApi( model: ModelConfig, @@ -53,30 +53,31 @@ export function createLlmApi( const accessToken = getAccessToken(authConfig); const organizationId = getOrganizationId(authConfig); - const config: LLMConfig = - model.provider === "continue-proxy" - ? { - provider: model.provider, - requestOptions: mergeUserAgentIntoRequestOptions( - model.requestOptions, - ), - apiBase: model.apiBase, - apiKey: accessToken ?? undefined, - env: { - apiKeyLocation: (model as any).apiKeyLocation, - orgScopeId: organizationId ?? null, - proxyUrl: - (model as { onPremProxyUrl: string | undefined }) - .onPremProxyUrl ?? (env.apiBase ? env.apiBase : undefined), - }, - } - : { - provider: model.provider as any, - apiKey: model.apiKey, - apiBase: model.apiBase, - requestOptions: model.requestOptions, - env: model.env, - }; + let config: LLMConfig; + + if (model.provider === "continue-proxy") { + config = { + provider: model.provider, + requestOptions: mergeUserAgentIntoRequestOptions(model.requestOptions), + apiBase: model.apiBase, + apiKey: accessToken ?? undefined, + env: { + apiKeyLocation: (model as any).apiKeyLocation, + orgScopeId: organizationId ?? null, + proxyUrl: + (model as { onPremProxyUrl: string | undefined }).onPremProxyUrl ?? + (env.apiBase ? env.apiBase : undefined), + }, + }; + } else { + config = { + provider: model.provider as any, + apiKey: model.apiKey, + apiBase: model.apiBase, + requestOptions: model.requestOptions, + env: model.env, + }; + } return constructLlmApi(config) ?? null; } diff --git a/packages/config-yaml/src/schemas/models.ts b/packages/config-yaml/src/schemas/models.ts index 89856e35c95..0badd35d9df 100644 --- a/packages/config-yaml/src/schemas/models.ts +++ b/packages/config-yaml/src/schemas/models.ts @@ -193,6 +193,7 @@ const baseModelFields = { .record(z.string(), z.union([z.string(), z.boolean(), z.number()])) .optional(), autocompleteOptions: autocompleteOptionsSchema.optional(), + aiSdkProviderId: z.string().optional(), }; export const modelSchema = z.union([ diff --git a/packages/openai-adapters/src/apis/AiSdk.ts b/packages/openai-adapters/src/apis/AiSdk.ts index 034433ad364..8ad8f72ae62 100644 --- a/packages/openai-adapters/src/apis/AiSdk.ts +++ b/packages/openai-adapters/src/apis/AiSdk.ts @@ -35,9 +35,14 @@ const PROVIDER_MAP: Record = { export class AiSdkApi implements BaseLlmApi { private provider?: (modelId: string) => any; private config: AiSdkConfig; + private providerId: string; + private modelId: string; constructor(config: AiSdkConfig) { this.config = config; + const [providerId, ...modelParts] = config.model.split("/"); + this.providerId = providerId; + this.modelId = modelParts.join("/"); } private initializeProvider() { @@ -45,11 +50,11 @@ export class AiSdkApi implements BaseLlmApi { return; } - const createFn = PROVIDER_MAP[this.config.aiSdkProviderId]; + const createFn = PROVIDER_MAP[this.providerId]; if (!createFn) { const supportedProviders = Object.keys(PROVIDER_MAP).join(", "); throw new Error( - `Unknown AI SDK provider: "${this.config.aiSdkProviderId}". ` + + `Unknown AI SDK provider: "${this.providerId}". ` + `Supported providers: ${supportedProviders}. ` + `To use a different provider, install the @ai-sdk/* package and add it to the provider map.`, ); @@ -99,7 +104,7 @@ export class AiSdkApi implements BaseLlmApi { (msg) => msg.role !== "system", ); - const modelId = this.config.model ?? body.model; + const modelId = this.modelId ?? body.model; const model = this.provider!(modelId); const vercelTools = await convertToolsToVercelFormat(body.tools); @@ -184,7 +189,7 @@ export class AiSdkApi implements BaseLlmApi { (msg) => msg.role !== "system", ); - const modelId = this.config.model ?? body.model; + const modelId = this.modelId ?? body.model; const model = this.provider!(modelId); const vercelTools = await convertToolsToVercelFormat(body.tools); From 73b5db9410b04bf6371bc95e33c048166d5355b4 Mon Sep 17 00:00:00 2001 From: uinstinct <61635505+uinstinct@users.noreply.github.com> Date: Thu, 12 Feb 2026 16:04:25 +0530 Subject: [PATCH 4/9] pass in the format of "provider/model" --- extensions/cli/src/config.ts | 1 + packages/openai-adapters/src/apis/AiSdk.ts | 5 +++++ packages/openai-adapters/src/index.ts | 15 ++++++++++----- packages/openai-adapters/src/types.ts | 8 ++++++++ 4 files changed, 24 insertions(+), 5 deletions(-) diff --git a/extensions/cli/src/config.ts b/extensions/cli/src/config.ts index fc38409ebfe..5a77778bbd9 100644 --- a/extensions/cli/src/config.ts +++ b/extensions/cli/src/config.ts @@ -72,6 +72,7 @@ export function createLlmApi( } else { config = { provider: model.provider as any, + model: model.model, apiKey: model.apiKey, apiBase: model.apiBase, requestOptions: model.requestOptions, diff --git a/packages/openai-adapters/src/apis/AiSdk.ts b/packages/openai-adapters/src/apis/AiSdk.ts index 8ad8f72ae62..a6cd933eae9 100644 --- a/packages/openai-adapters/src/apis/AiSdk.ts +++ b/packages/openai-adapters/src/apis/AiSdk.ts @@ -40,6 +40,11 @@ export class AiSdkApi implements BaseLlmApi { constructor(config: AiSdkConfig) { this.config = config; + if (!config.model) { + throw new Error( + "AI SDK provider requires a model in the format '/' (e.g., 'openai/gpt-4o')", + ); + } const [providerId, ...modelParts] = config.model.split("/"); this.providerId = providerId; this.modelId = modelParts.join("/"); diff --git a/packages/openai-adapters/src/index.ts b/packages/openai-adapters/src/index.ts index 3a7eaa1ffd1..f969f193b23 100644 --- a/packages/openai-adapters/src/index.ts +++ b/packages/openai-adapters/src/index.ts @@ -1,5 +1,6 @@ import dotenv from "dotenv"; import { z } from "zod"; +import { AiSdkApi } from "./apis/AiSdk.js"; import { AnthropicApi } from "./apis/Anthropic.js"; import { AskSageApi } from "./apis/AskSage.js"; import { AzureApi } from "./apis/Azure.js"; @@ -164,6 +165,8 @@ export function constructLlmApi(config: LLMConfig): BaseLlmApi | undefined { // Return undefined for native HuggingFace endpoints // (handled by HuggingFaceInferenceAPI class in core) return undefined; + case "ai-sdk": + return new AiSdkApi(config); default: return undefined; } @@ -182,14 +185,16 @@ export { } from "openai/resources/index"; // export +export { AiSdkApi } from "./apis/AiSdk.js"; export type { BaseLlmApi } from "./apis/base.js"; export type { - LLMConfig, - AskSageTool, - AskSageToolChoice, - AskSageToolCall, + AiSdkConfig, AskSageResponse, AskSageTokenResponse, + AskSageTool, + AskSageToolCall, + AskSageToolChoice, + LLMConfig, } from "./types.js"; export { @@ -200,4 +205,4 @@ export { } from "./apis/AnthropicUtils.js"; export { isResponsesModel } from "./apis/openaiResponses.js"; -export { parseDataUrl, extractBase64FromDataUrl } from "./util/url.js"; +export { extractBase64FromDataUrl, parseDataUrl } from "./util/url.js"; diff --git a/packages/openai-adapters/src/types.ts b/packages/openai-adapters/src/types.ts index a206b9a1ac3..1b1190db14d 100644 --- a/packages/openai-adapters/src/types.ts +++ b/packages/openai-adapters/src/types.ts @@ -249,6 +249,13 @@ export const VertexAIConfigSchema = BasePlusConfig.extend({ }); export type VertexAIConfig = z.infer; +export const AiSdkConfigSchema = BasePlusConfig.extend({ + provider: z.literal("ai-sdk"), + model: z.string(), + providerOptions: z.record(z.unknown()).optional(), +}); +export type AiSdkConfig = z.infer; + // Discriminated union export const LLMConfigSchema = z.discriminatedUnion("provider", [ OpenAIConfigSchema, @@ -268,5 +275,6 @@ export const LLMConfigSchema = z.discriminatedUnion("provider", [ ContinueProxyConfigSchema, CometAPIConfigSchema, AskSageConfigSchema, + AiSdkConfigSchema, ]); export type LLMConfig = z.infer; From e5e62aac287f9a65b26d0e67acf6aa75298c7a13 Mon Sep 17 00:00:00 2001 From: uinstinct <61635505+uinstinct@users.noreply.github.com> Date: Thu, 12 Feb 2026 16:08:08 +0530 Subject: [PATCH 5/9] revert unnecessary changes --- extensions/cli/src/config.ts | 53 ++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/extensions/cli/src/config.ts b/extensions/cli/src/config.ts index 5a77778bbd9..161c4306ed3 100644 --- a/extensions/cli/src/config.ts +++ b/extensions/cli/src/config.ts @@ -44,7 +44,7 @@ function mergeUserAgentIntoRequestOptions( /** * Creates an LLM API instance from a ModelConfig and auth configuration - * Handles special logic for continue-proxy and ai-sdk providers and constructs the API + * Handles special logic for continue-proxy provider and constructs the API */ export function createLlmApi( model: ModelConfig, @@ -53,32 +53,31 @@ export function createLlmApi( const accessToken = getAccessToken(authConfig); const organizationId = getOrganizationId(authConfig); - let config: LLMConfig; - - if (model.provider === "continue-proxy") { - config = { - provider: model.provider, - requestOptions: mergeUserAgentIntoRequestOptions(model.requestOptions), - apiBase: model.apiBase, - apiKey: accessToken ?? undefined, - env: { - apiKeyLocation: (model as any).apiKeyLocation, - orgScopeId: organizationId ?? null, - proxyUrl: - (model as { onPremProxyUrl: string | undefined }).onPremProxyUrl ?? - (env.apiBase ? env.apiBase : undefined), - }, - }; - } else { - config = { - provider: model.provider as any, - model: model.model, - apiKey: model.apiKey, - apiBase: model.apiBase, - requestOptions: model.requestOptions, - env: model.env, - }; - } + const config: LLMConfig = + model.provider === "continue-proxy" + ? { + provider: model.provider, + requestOptions: mergeUserAgentIntoRequestOptions( + model.requestOptions, + ), + apiBase: model.apiBase, + apiKey: accessToken ?? undefined, + env: { + apiKeyLocation: (model as any).apiKeyLocation, + orgScopeId: organizationId ?? null, + proxyUrl: + (model as { onPremProxyUrl: string | undefined }) + .onPremProxyUrl ?? (env.apiBase ? env.apiBase : undefined), + }, + } + : { + provider: model.provider as any, + model: model.model, + apiKey: model.apiKey, + apiBase: model.apiBase, + requestOptions: model.requestOptions, + env: model.env, + }; return constructLlmApi(config) ?? null; } From 1689ac28df6f35ee496213c9436ed667b52e7571 Mon Sep 17 00:00:00 2001 From: uinstinct <61635505+uinstinct@users.noreply.github.com> Date: Thu, 12 Feb 2026 16:09:00 +0530 Subject: [PATCH 6/9] remove from config yaml --- packages/config-yaml/src/schemas/models.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/config-yaml/src/schemas/models.ts b/packages/config-yaml/src/schemas/models.ts index 0badd35d9df..89856e35c95 100644 --- a/packages/config-yaml/src/schemas/models.ts +++ b/packages/config-yaml/src/schemas/models.ts @@ -193,7 +193,6 @@ const baseModelFields = { .record(z.string(), z.union([z.string(), z.boolean(), z.number()])) .optional(), autocompleteOptions: autocompleteOptionsSchema.optional(), - aiSdkProviderId: z.string().optional(), }; export const modelSchema = z.union([ From 2642558ff0351081f4d2ceabb42588115b1ca864 Mon Sep 17 00:00:00 2001 From: uinstinct <61635505+uinstinct@users.noreply.github.com> Date: Tue, 17 Feb 2026 17:57:48 +0530 Subject: [PATCH 7/9] make schema openai compatible --- .../src/convertToolsToVercel.ts | 54 +++++++++++++++++-- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/packages/openai-adapters/src/convertToolsToVercel.ts b/packages/openai-adapters/src/convertToolsToVercel.ts index df0811ff042..bd25581f528 100644 --- a/packages/openai-adapters/src/convertToolsToVercel.ts +++ b/packages/openai-adapters/src/convertToolsToVercel.ts @@ -4,6 +4,53 @@ import type { ChatCompletionCreateParams } from "openai/resources/index.js"; +/** + * Recursively transforms schemas to be OpenAI strict mode compatible: + * - Adds additionalProperties: false to all object schemas + * - Ensures all properties are listed in the required array + */ +function makeOpenAIStrictCompatible(schema: any): any { + if (!schema || typeof schema !== "object") { + return schema; + } + + const result = { ...schema }; + + if (result.type === "object") { + result.additionalProperties = false; + + if (result.properties) { + const propertyKeys = Object.keys(result.properties); + result.required = propertyKeys; + + result.properties = Object.fromEntries( + Object.entries(result.properties).map(([key, value]) => [ + key, + makeOpenAIStrictCompatible(value), + ]), + ); + } + } + + if (result.items) { + result.items = makeOpenAIStrictCompatible(result.items); + } + + if (result.anyOf) { + result.anyOf = result.anyOf.map(makeOpenAIStrictCompatible); + } + + if (result.oneOf) { + result.oneOf = result.oneOf.map(makeOpenAIStrictCompatible); + } + + if (result.allOf) { + result.allOf = result.allOf.map(makeOpenAIStrictCompatible); + } + + return result; +} + /** * Converts OpenAI tool format to Vercel AI SDK format. * @@ -25,11 +72,12 @@ export async function convertToolsToVercelFormat( const vercelTools: Record = {}; for (const tool of openaiTools) { if (tool.type === "function") { + const parameters = makeOpenAIStrictCompatible( + tool.function.parameters ?? { type: "object", properties: {} }, + ); vercelTools[tool.function.name] = { description: tool.function.description, - parameters: aiJsonSchema( - tool.function.parameters ?? { type: "object", properties: {} }, - ), + parameters: aiJsonSchema(parameters), }; } } From da5238889520ef167920b34f7dcaf6dc6a1a56ab Mon Sep 17 00:00:00 2001 From: uinstinct <61635505+uinstinct@users.noreply.github.com> Date: Tue, 17 Feb 2026 18:07:57 +0530 Subject: [PATCH 8/9] use openai compatible over anthropic adapter --- packages/openai-adapters/src/apis/AiSdk.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/packages/openai-adapters/src/apis/AiSdk.ts b/packages/openai-adapters/src/apis/AiSdk.ts index a6cd933eae9..0107b51102c 100644 --- a/packages/openai-adapters/src/apis/AiSdk.ts +++ b/packages/openai-adapters/src/apis/AiSdk.ts @@ -1,4 +1,3 @@ -import { createAnthropic } from "@ai-sdk/anthropic"; import { createOpenAI } from "@ai-sdk/openai"; import { ChatCompletion, @@ -29,7 +28,11 @@ type AiSdkProviderCreator = (options: { const PROVIDER_MAP: Record = { openai: createOpenAI, - anthropic: createAnthropic, + anthropic: (options) => + createOpenAI({ + ...options, + baseURL: options.baseURL ?? "https://api.anthropic.com/v1/", + }), }; export class AiSdkApi implements BaseLlmApi { From ec8263d0292caa6534dd472a3250dc28ca97b97c Mon Sep 17 00:00:00 2001 From: uinstinct <61635505+uinstinct@users.noreply.github.com> Date: Tue, 17 Feb 2026 19:21:38 +0530 Subject: [PATCH 9/9] add support for openrouter --- packages/openai-adapters/src/apis/AiSdk.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/packages/openai-adapters/src/apis/AiSdk.ts b/packages/openai-adapters/src/apis/AiSdk.ts index 0107b51102c..25515d18830 100644 --- a/packages/openai-adapters/src/apis/AiSdk.ts +++ b/packages/openai-adapters/src/apis/AiSdk.ts @@ -33,6 +33,11 @@ const PROVIDER_MAP: Record = { ...options, baseURL: options.baseURL ?? "https://api.anthropic.com/v1/", }), + openrouter: (options) => + createOpenAI({ + ...options, + baseURL: options.baseURL ?? "https://openrouter.ai/api/v1/", + }), }; export class AiSdkApi implements BaseLlmApi {