diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 33e167e8d..d161736ea 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -142,6 +142,7 @@ jobs: run: yarn workspace @forestadmin/ai-proxy test --testPathPattern='llm.integration' env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} send-coverage: name: Send Coverage diff --git a/packages/ai-proxy/jest.config.ts b/packages/ai-proxy/jest.config.ts index 4a5344add..57142cb1a 100644 --- a/packages/ai-proxy/jest.config.ts +++ b/packages/ai-proxy/jest.config.ts @@ -1,9 +1,18 @@ /* eslint-disable import/no-relative-packages */ +import path from 'path'; + import jestConfig from '../../jest.config'; +// Jest < 30 doesn't resolve wildcard exports in package.json. +// @anthropic-ai/sdk uses "./lib/*" exports that need this workaround. +const anthropicSdkDir = path.dirname(require.resolve('@anthropic-ai/sdk')); + export default { ...jestConfig, collectCoverageFrom: ['/src/**/*.ts', '!/src/examples/**'], testMatch: ['/test/**/*.test.ts'], setupFiles: ['/test/setup-env.ts'], + moduleNameMapper: { + '^@anthropic-ai/sdk/(.*)$': `${anthropicSdkDir}/$1`, + }, }; diff --git a/packages/ai-proxy/package.json b/packages/ai-proxy/package.json index d9bef70f6..28a4d9bf0 100644 --- a/packages/ai-proxy/package.json +++ b/packages/ai-proxy/package.json @@ -14,6 +14,7 @@ "dependencies": { "@forestadmin/agent-toolkit": "1.0.0", "@forestadmin/datasource-toolkit": "1.50.1", + "@langchain/anthropic": "1.3.17", "@langchain/community": "1.1.4", "@langchain/core": "1.1.15", "@langchain/langgraph": "^1.1.0", diff --git a/packages/ai-proxy/src/anthropic-adapter.ts b/packages/ai-proxy/src/anthropic-adapter.ts new file mode 100644 index 000000000..a5a9728fd --- /dev/null +++ b/packages/ai-proxy/src/anthropic-adapter.ts @@ -0,0 +1,87 @@ +import type { OpenAIMessage } from './langchain-adapter'; +import type { ChatCompletionTool, ChatCompletionToolChoice } from './provider'; +import type { ChatAnthropic } from '@langchain/anthropic'; +import type { BaseMessage } from '@langchain/core/messages'; + +import { LangChainAdapter } from './langchain-adapter'; + +/** + * Extended tool_choice type for Anthropic. + * + * LangChain's AnthropicToolChoice doesn't include `disable_parallel_tool_use`, + * but the Anthropic API supports it and LangChain passes objects through directly. + */ +type AnthropicToolChoiceWithParallelControl = + | 'auto' + | 'any' + | 'none' + | { type: 'tool'; name: string; disable_parallel_tool_use?: boolean } + | { type: 'auto' | 'any'; disable_parallel_tool_use: boolean }; + +export default class AnthropicAdapter { + static convertMessages(messages: OpenAIMessage[]): BaseMessage[] { + return LangChainAdapter.convertMessages(AnthropicAdapter.mergeSystemMessages(messages)); + } + + /** Cast `as string` works around LangChain's AnthropicToolChoice missing `disable_parallel_tool_use`. */ + static bindTools( + model: ChatAnthropic, + tools: ChatCompletionTool[], + { + toolChoice, + parallelToolCalls, + }: { toolChoice?: ChatCompletionToolChoice; parallelToolCalls?: boolean }, + ): ChatAnthropic { + return model.bindTools(tools, { + tool_choice: AnthropicAdapter.convertToolChoice({ toolChoice, parallelToolCalls }) as string, + }) as ChatAnthropic; + } + + /** + * Convert OpenAI tool_choice to Anthropic format, applying parallel tool restriction. + * + * Converts to LangChain format first, then applies `disable_parallel_tool_use` + * when `parallelToolCalls` is explicitly `false` (not just falsy — `undefined` means + * no restriction). + */ + private static convertToolChoice({ + toolChoice, + parallelToolCalls, + }: { + toolChoice?: ChatCompletionToolChoice; + parallelToolCalls?: boolean; + } = {}): AnthropicToolChoiceWithParallelControl | undefined { + const base = LangChainAdapter.convertToolChoice(toolChoice); + + if (parallelToolCalls !== false) return base; + + // Anthropic requires object form to set disable_parallel_tool_use + if (base === undefined || base === 'auto') { + return { type: 'auto', disable_parallel_tool_use: true }; + } + + if (base === 'any') { + return { type: 'any', disable_parallel_tool_use: true }; + } + + if (base === 'none') return 'none'; + + return { ...base, disable_parallel_tool_use: true }; + } + + /** + * Merge all system messages into a single one placed first. + * + * Anthropic only allows a single system message at the beginning of the conversation. + */ + private static mergeSystemMessages(messages: OpenAIMessage[]): OpenAIMessage[] { + const systemContents = messages.filter(m => m.role === 'system').map(m => m.content || ''); + + if (systemContents.length <= 1) return messages; + + const merged: OpenAIMessage = { role: 'system', content: systemContents.join('\n\n') }; + const nonSystem = messages.filter(m => m.role !== 'system'); + + return [merged, ...nonSystem]; + } +} diff --git a/packages/ai-proxy/src/errors.ts b/packages/ai-proxy/src/errors.ts index 108a93a72..e47feadcd 100644 --- a/packages/ai-proxy/src/errors.ts +++ b/packages/ai-proxy/src/errors.ts @@ -42,9 +42,12 @@ export class AINotFoundError extends NotFoundError { } export class AIUnprocessableError extends UnprocessableError { - constructor(message: string) { + readonly cause?: Error; + + constructor(message: string, options?: { cause?: Error }) { super(message); this.name = 'AIUnprocessableError'; + if (options?.cause) this.cause = options.cause; } } @@ -55,17 +58,10 @@ export class AINotConfiguredError extends AIError { } } -export class OpenAIUnprocessableError extends AIUnprocessableError { - constructor(message: string) { - super(message); - this.name = 'OpenAIError'; - } -} - export class AIToolUnprocessableError extends AIUnprocessableError { constructor(message: string) { super(message); - this.name = 'AIToolError'; + this.name = 'AIToolUnprocessableError'; } } diff --git a/packages/ai-proxy/src/index.ts b/packages/ai-proxy/src/index.ts index dfa50e46e..5dd913d5d 100644 --- a/packages/ai-proxy/src/index.ts +++ b/packages/ai-proxy/src/index.ts @@ -3,6 +3,7 @@ import type { McpConfiguration } from './mcp-client'; import McpConfigChecker from './mcp-config-checker'; export { createAiProvider } from './create-ai-provider'; +export { default as ProviderDispatcher } from './provider-dispatcher'; export * from './provider-dispatcher'; export * from './remote-tools'; export * from './router'; diff --git a/packages/ai-proxy/src/langchain-adapter.ts b/packages/ai-proxy/src/langchain-adapter.ts new file mode 100644 index 000000000..c79d3fcd7 --- /dev/null +++ b/packages/ai-proxy/src/langchain-adapter.ts @@ -0,0 +1,189 @@ +import type { ChatCompletionResponse, ChatCompletionToolChoice } from './provider'; +import type { BaseMessage } from '@langchain/core/messages'; + +import { AIMessage, HumanMessage, SystemMessage, ToolMessage } from '@langchain/core/messages'; +import crypto from 'crypto'; + +import { AIBadRequestError } from './errors'; + +interface OpenAISystemMessage { + role: 'system'; + content: string | null; +} + +interface OpenAIUserMessage { + role: 'user'; + content: string | null; +} + +interface OpenAIAssistantMessage { + role: 'assistant'; + content: string | null; + tool_calls?: Array<{ + id: string; + function: { + name: string; + arguments: string; + }; + }>; +} + +interface OpenAIToolMessage { + role: 'tool'; + content: string | null; + tool_call_id: string; +} + +export type OpenAIMessage = + | OpenAISystemMessage + | OpenAIUserMessage + | OpenAIAssistantMessage + | OpenAIToolMessage; + +export type LangChainToolChoice = + | 'auto' + | 'any' + | 'none' + | { type: 'tool'; name: string } + | undefined; + +/** Handles generic format conversions between OpenAI and LangChain. */ +export class LangChainAdapter { + /** Convert OpenAI-format messages to LangChain messages. */ + static convertMessages(messages: OpenAIMessage[]): BaseMessage[] { + const result: BaseMessage[] = []; + + for (const msg of messages) { + switch (msg.role) { + case 'system': + result.push(new SystemMessage(msg.content || '')); + break; + case 'user': + result.push(new HumanMessage(msg.content || '')); + break; + case 'assistant': + if (msg.tool_calls) { + result.push( + new AIMessage({ + content: msg.content || '', + tool_calls: msg.tool_calls.map(tc => ({ + id: tc.id, + name: tc.function.name, + args: LangChainAdapter.parseToolArguments( + tc.function.name, + tc.function.arguments, + ), + })), + }), + ); + } else { + result.push(new AIMessage(msg.content || '')); + } + + break; + case 'tool': + if (!msg.tool_call_id) { + throw new AIBadRequestError('Tool message is missing required "tool_call_id" field.'); + } + + result.push( + new ToolMessage({ + content: msg.content || '', + tool_call_id: msg.tool_call_id, + }), + ); + break; + default: + throw new AIBadRequestError( + `Unsupported message role '${ + (msg as { role: string }).role + }'. Expected: system, user, assistant, or tool.`, + ); + } + } + + return result; + } + + /** Convert a LangChain AIMessage to an OpenAI-compatible ChatCompletionResponse. */ + static convertResponse(response: AIMessage, modelName: string | null): ChatCompletionResponse { + const toolCalls = response.tool_calls?.map(tc => ({ + id: tc.id || `call_${crypto.randomUUID()}`, + type: 'function' as const, + function: { + name: tc.name, + arguments: JSON.stringify(tc.args), + }, + })); + + const usageMetadata = response.usage_metadata as + | { input_tokens?: number; output_tokens?: number; total_tokens?: number } + | undefined; + + return { + id: response.id || `msg_${crypto.randomUUID()}`, + object: 'chat.completion', + created: Math.floor(Date.now() / 1000), + model: modelName, + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: LangChainAdapter.extractTextContent(response.content), + refusal: null, + tool_calls: toolCalls?.length ? toolCalls : undefined, + }, + finish_reason: toolCalls?.length ? 'tool_calls' : 'stop', + logprobs: null, + }, + ], + usage: { + prompt_tokens: usageMetadata?.input_tokens ?? 0, + completion_tokens: usageMetadata?.output_tokens ?? 0, + total_tokens: usageMetadata?.total_tokens ?? 0, + }, + }; + } + + /** Convert OpenAI tool_choice to LangChain format. */ + static convertToolChoice(toolChoice: ChatCompletionToolChoice | undefined): LangChainToolChoice { + if (!toolChoice) return undefined; + if (toolChoice === 'auto') return 'auto'; + if (toolChoice === 'none') return 'none'; + if (toolChoice === 'required') return 'any'; + + if (typeof toolChoice === 'object' && toolChoice.type === 'function') { + return { type: 'tool', name: toolChoice.function.name }; + } + + throw new AIBadRequestError( + `Unsupported tool_choice value. Expected: 'auto', 'none', 'required', or {type: 'function', function: {name: '...'}}.`, + ); + } + + private static extractTextContent(content: AIMessage['content']): string | null { + if (typeof content === 'string') return content || null; + + if (Array.isArray(content)) { + const text = content + .filter(block => block.type === 'text') + .map(block => ('text' in block ? block.text : '')) + .join(''); + + return text || null; + } + + return null; + } + + private static parseToolArguments(toolName: string, args: string): Record { + try { + return JSON.parse(args); + } catch { + throw new AIBadRequestError( + `Invalid JSON in tool_calls arguments for tool '${toolName}': ${args}`, + ); + } + } +} diff --git a/packages/ai-proxy/src/provider-dispatcher.ts b/packages/ai-proxy/src/provider-dispatcher.ts index ac324d01f..de285bf60 100644 --- a/packages/ai-proxy/src/provider-dispatcher.ts +++ b/packages/ai-proxy/src/provider-dispatcher.ts @@ -1,17 +1,22 @@ +import type { OpenAIMessage } from './langchain-adapter'; import type { AiConfiguration, ChatCompletionResponse, ChatCompletionTool } from './provider'; import type { RemoteTools } from './remote-tools'; import type { DispatchBody } from './schemas/route'; -import type { BaseMessageLike } from '@langchain/core/messages'; +import type { AIMessage, BaseMessageLike } from '@langchain/core/messages'; +import { ChatAnthropic } from '@langchain/anthropic'; import { convertToOpenAIFunction } from '@langchain/core/utils/function_calling'; import { ChatOpenAI } from '@langchain/openai'; -import { AINotConfiguredError, OpenAIUnprocessableError } from './errors'; +import AnthropicAdapter from './anthropic-adapter'; +import { AIBadRequestError, AINotConfiguredError, AIUnprocessableError } from './errors'; +import { LangChainAdapter } from './langchain-adapter'; // Re-export types for consumers export type { AiConfiguration, AiProvider, + AnthropicConfiguration, BaseAiConfiguration, ChatCompletionMessage, ChatCompletionResponse, @@ -21,8 +26,12 @@ export type { } from './provider'; export type { DispatchBody } from './schemas/route'; -export class ProviderDispatcher { - private readonly chatModel: ChatOpenAI | null = null; +export default class ProviderDispatcher { + private readonly openaiModel: ChatOpenAI | null = null; + + private readonly anthropicModel: ChatAnthropic | null = null; + + private readonly modelName: string | null = null; private readonly remoteTools: RemoteTools; @@ -31,19 +40,39 @@ export class ProviderDispatcher { if (configuration?.provider === 'openai') { const { provider, name, ...chatOpenAIOptions } = configuration; - this.chatModel = new ChatOpenAI({ + this.openaiModel = new ChatOpenAI({ maxRetries: 0, // No retries by default - this lib is a passthrough ...chatOpenAIOptions, __includeRawResponse: true, }); + } else if (configuration?.provider === 'anthropic') { + const { provider, name, model, ...clientOptions } = configuration; + this.anthropicModel = new ChatAnthropic({ + maxRetries: 0, // No retries by default - this lib is a passthrough + ...clientOptions, + model, + }); + this.modelName = model; + } else if (configuration) { + throw new AIBadRequestError( + `Unsupported AI provider '${(configuration as { provider: string }).provider}'.`, + ); } } async dispatch(body: DispatchBody): Promise { - if (!this.chatModel) { - throw new AINotConfiguredError(); + if (this.openaiModel) { + return this.dispatchOpenAI(body); + } + + if (this.anthropicModel) { + return this.dispatchAnthropic(body); } + throw new AINotConfiguredError(); + } + + private async dispatchOpenAI(body: DispatchBody): Promise { const { tools, messages, @@ -53,44 +82,102 @@ export class ProviderDispatcher { const enrichedTools = this.enrichToolDefinitions(tools); const model = enrichedTools?.length - ? this.chatModel.bindTools(enrichedTools, { + ? this.openaiModel.bindTools(enrichedTools, { tool_choice: toolChoice, parallel_tool_calls: parallelToolCalls, }) - : this.chatModel; + : this.openaiModel; + + let response: AIMessage; try { - const response = await model.invoke(messages as BaseMessageLike[]); + response = await model.invoke(messages as BaseMessageLike[]); + } catch (error) { + throw ProviderDispatcher.wrapProviderError(error, 'OpenAI'); + } - // eslint-disable-next-line no-underscore-dangle - const rawResponse = response.additional_kwargs.__raw_response as ChatCompletionResponse; + // eslint-disable-next-line no-underscore-dangle + const rawResponse = response.additional_kwargs.__raw_response as ChatCompletionResponse; - if (!rawResponse) { - throw new OpenAIUnprocessableError( - 'OpenAI response missing raw response data. This may indicate an API change.', - ); - } + if (!rawResponse) { + throw new AIUnprocessableError( + 'OpenAI response missing raw response data. This may indicate an API change.', + ); + } - return rawResponse; + return rawResponse; + } + + private async dispatchAnthropic(body: DispatchBody): Promise { + const { + tools, + messages, + tool_choice: toolChoice, + parallel_tool_calls: parallelToolCalls, + } = body; + + // Convert messages outside try-catch so input validation errors propagate directly + const langChainMessages = AnthropicAdapter.convertMessages(messages as OpenAIMessage[]); + const enrichedTools = this.enrichToolDefinitions(tools); + + const model = enrichedTools?.length + ? AnthropicAdapter.bindTools(this.anthropicModel, enrichedTools, { + toolChoice, + parallelToolCalls, + }) + : this.anthropicModel; + + let response: AIMessage; + + try { + response = (await model.invoke(langChainMessages)) as AIMessage; } catch (error) { - if (error instanceof OpenAIUnprocessableError) throw error; + throw ProviderDispatcher.wrapProviderError(error, 'Anthropic'); + } - const err = error as Error & { status?: number }; + return LangChainAdapter.convertResponse(response, this.modelName); + } - if (err.status === 429) { - throw new OpenAIUnprocessableError(`Rate limit exceeded: ${err.message}`); - } + /** + * Wraps provider errors into AI-specific error types. + * + * TODO: Currently all provider errors are wrapped as AIUnprocessableError, + * losing the original HTTP semantics (429 rate limit, 401 auth failure). + * To fix this properly we need to: + * 1. Add UnauthorizedError and TooManyRequestsError to datasource-toolkit + * 2. Add corresponding cases in the agent's error-handling middleware + * 3. Create AIProviderError, AIRateLimitError, AIAuthenticationError in ai-proxy + * with baseBusinessErrorName overrides for correct HTTP status mapping + */ + private static wrapProviderError(error: unknown, providerName: string): Error { + if (error instanceof AIUnprocessableError) return error; + if (error instanceof AIBadRequestError) return error; + + if (!(error instanceof Error)) { + return new AIUnprocessableError(`Error while calling ${providerName}: ${String(error)}`); + } - if (err.status === 401) { - throw new OpenAIUnprocessableError(`Authentication failed: ${err.message}`); - } + const { status } = error as Error & { status?: number }; - throw new OpenAIUnprocessableError(`Error while calling OpenAI: ${err.message}`); + if (status === 429) { + return new AIUnprocessableError(`${providerName} rate limit exceeded: ${error.message}`, { + cause: error, + }); } + + if (status === 401) { + return new AIUnprocessableError(`${providerName} authentication failed: ${error.message}`, { + cause: error, + }); + } + + return new AIUnprocessableError(`Error while calling ${providerName}: ${error.message}`, { + cause: error, + }); } - private enrichToolDefinitions(tools?: ChatCompletionTool[]) { - if (!tools || !Array.isArray(tools)) return tools; + private enrichToolDefinitions(tools?: ChatCompletionTool[]): ChatCompletionTool[] | undefined { + if (!tools) return tools; const remoteToolSchemas = this.remoteTools.tools.map(remoteTool => convertToOpenAIFunction(remoteTool.base), diff --git a/packages/ai-proxy/src/provider.ts b/packages/ai-proxy/src/provider.ts index ba1730c5f..c59bcab3b 100644 --- a/packages/ai-proxy/src/provider.ts +++ b/packages/ai-proxy/src/provider.ts @@ -1,4 +1,6 @@ -import type { ChatOpenAIFields, OpenAIChatModelId } from '@langchain/openai'; +import type Anthropic from '@anthropic-ai/sdk'; +import type { AnthropicInput } from '@langchain/anthropic'; +import type { ChatOpenAIFields } from '@langchain/openai'; import type OpenAI from 'openai'; // OpenAI type aliases @@ -8,7 +10,7 @@ export type ChatCompletionTool = OpenAI.Chat.Completions.ChatCompletionTool; export type ChatCompletionToolChoice = OpenAI.Chat.Completions.ChatCompletionToolChoiceOption; // AI Provider types -export type AiProvider = 'openai'; +export type AiProvider = 'openai' | 'anthropic'; /** * Base configuration common to all AI providers. @@ -24,12 +26,21 @@ export type BaseAiConfiguration = { * OpenAI-specific configuration. * Extends base with all ChatOpenAI options (temperature, maxTokens, configuration, etc.) */ -export type OpenAiConfiguration = BaseAiConfiguration & +export type OpenAiConfiguration = Omit & Omit & { provider: 'openai'; - // OpenAIChatModelId provides autocomplete for known models (gpt-4o, gpt-4-turbo, etc.) - // (string & NonNullable) allows custom model strings without losing autocomplete - model: OpenAIChatModelId | (string & NonNullable); + model: OpenAI.ChatModel | (string & NonNullable); }; -export type AiConfiguration = OpenAiConfiguration; +/** + * Anthropic-specific configuration. + * Extends base with all ChatAnthropic options (temperature, maxTokens, etc.) + * Supports both `apiKey` (unified) and `anthropicApiKey` (native) for flexibility. + */ +export type AnthropicConfiguration = Omit & + Omit & { + provider: 'anthropic'; + model: Anthropic.Messages.Model; + }; + +export type AiConfiguration = OpenAiConfiguration | AnthropicConfiguration; diff --git a/packages/ai-proxy/src/router.ts b/packages/ai-proxy/src/router.ts index 5812df10b..8424a35bc 100644 --- a/packages/ai-proxy/src/router.ts +++ b/packages/ai-proxy/src/router.ts @@ -7,7 +7,7 @@ import type { z } from 'zod'; import { AIBadRequestError, AIModelNotSupportedError } from './errors'; import McpClient from './mcp-client'; -import { ProviderDispatcher } from './provider-dispatcher'; +import ProviderDispatcher from './provider-dispatcher'; import { RemoteTools } from './remote-tools'; import { routeArgsSchema } from './schemas/route'; import isModelSupportingTools from './supported-models'; @@ -45,7 +45,7 @@ export class Router { private validateConfigurations(): void { for (const config of this.aiConfigurations) { - if (!isModelSupportingTools(config.model)) { + if (!isModelSupportingTools(config.model, config.provider)) { throw new AIModelNotSupportedError(config.model); } } @@ -152,7 +152,7 @@ export class Router { const fallback = this.aiConfigurations[0]; this.logger?.( 'Warn', - `AI configuration '${aiName}' not found. Falling back to '${fallback.name}'.`, + `AI configuration '${aiName}' not found. Falling back to '${fallback.name}' (provider: ${fallback.provider}, model: ${fallback.model})`, ); return fallback; diff --git a/packages/ai-proxy/src/supported-models.ts b/packages/ai-proxy/src/supported-models.ts index 37647b94e..e9d34fd4a 100644 --- a/packages/ai-proxy/src/supported-models.ts +++ b/packages/ai-proxy/src/supported-models.ts @@ -1,14 +1,9 @@ -/** - * OpenAI model prefixes that do NOT support tool calls via the chat completions API. - * - * Uses prefix matching: model === prefix OR model.startsWith(prefix + '-') - * - * Unknown models are allowed by default. - * If a model fails the integration test, add it here. - * - * @see https://platform.openai.com/docs/guides/function-calling - */ -const UNSUPPORTED_MODEL_PREFIXES = [ +import type { AiProvider } from './provider'; + +// ─── OpenAI ────────────────────────────────────────────────────────────────── +// If a model fails the llm.integration test, add it here. + +const OPENAI_UNSUPPORTED_PREFIXES = [ // Legacy models 'gpt-4', // Base gpt-4 doesn't honor tool_choice: required 'text-davinci', @@ -35,11 +30,7 @@ const UNSUPPORTED_MODEL_PREFIXES = [ 'codex', // codex-mini-latest ]; -/** - * OpenAI model patterns that do NOT support tool calls. - * Uses contains matching: model.includes(pattern) - */ -const UNSUPPORTED_MODEL_PATTERNS = [ +const OPENAI_UNSUPPORTED_PATTERNS = [ // Non-chat model variants (can appear in the middle of model names) '-realtime', '-audio', @@ -53,36 +44,49 @@ const UNSUPPORTED_MODEL_PATTERNS = [ '-deep-research', ]; -/** - * Models that DO support tool calls even though they match an unsupported prefix. - * These override the UNSUPPORTED_MODEL_PREFIXES list. - */ -const SUPPORTED_MODEL_OVERRIDES = ['gpt-4-turbo', 'gpt-4o', 'gpt-4.1']; - -/** - * Checks if a model is compatible with Forest Admin AI. - * - * Supported models must handle tool calls and the parallel_tool_calls parameter. - */ -export default function isModelSupportingTools(model: string): boolean { - // Check pattern matches first (contains) - these NEVER support tools - const matchesUnsupportedPattern = UNSUPPORTED_MODEL_PATTERNS.some(pattern => - model.includes(pattern), - ); - if (matchesUnsupportedPattern) return false; +const OPENAI_UNSUPPORTED_MODELS = [ + 'us-40-51r-vm-ev3', // Not a chat model (v1/completions only) +]; - // Check unsupported prefixes - const matchesUnsupportedPrefix = UNSUPPORTED_MODEL_PREFIXES.some( +const OPENAI_SUPPORTED_OVERRIDES = ['gpt-4-turbo', 'gpt-4o', 'gpt-4.1']; + +function isOpenAIModelSupported(model: string): boolean { + if (OPENAI_UNSUPPORTED_MODELS.includes(model)) return false; + + const matchesPattern = OPENAI_UNSUPPORTED_PATTERNS.some(p => model.includes(p)); + if (matchesPattern) return false; + + const matchesPrefix = OPENAI_UNSUPPORTED_PREFIXES.some( prefix => model === prefix || model.startsWith(`${prefix}-`), ); - // Check if model is in the supported overrides list - const isSupportedOverride = SUPPORTED_MODEL_OVERRIDES.some( + const isOverride = OPENAI_SUPPORTED_OVERRIDES.some( override => model === override || model.startsWith(`${override}-`), ); - // If it matches an unsupported prefix but is not in overrides, reject it - if (matchesUnsupportedPrefix && !isSupportedOverride) return false; + return !matchesPrefix || isOverride; +} + +// ─── Anthropic ─────────────────────────────────────────────────────────────── +// If a model fails the llm.integration test, add it here. + +const ANTHROPIC_UNSUPPORTED_MODELS = [ + 'claude-3-haiku-20240307', // EOL 2025-03-14 + 'claude-3-5-haiku-20241022', // EOL 2026-02-19 + 'claude-3-5-haiku-latest', // Points to deprecated claude-3-5-haiku-20241022 + 'claude-3-7-sonnet-20250219', // EOL 2026-02-19 + 'claude-opus-4-20250514', // Requires streaming (non-streaming times out) + 'claude-opus-4-1-20250805', // Requires streaming (non-streaming times out) +]; + +function isAnthropicModelSupported(model: string): boolean { + return !ANTHROPIC_UNSUPPORTED_MODELS.includes(model); +} + +// ─── Public API ────────────────────────────────────────────────────────────── + +export default function isModelSupportingTools(model: string, provider?: AiProvider): boolean { + if (provider === 'anthropic') return isAnthropicModelSupported(model); - return true; + return isOpenAIModelSupported(model); } diff --git a/packages/ai-proxy/test/.env-test.example b/packages/ai-proxy/test/.env-test.example index d11920933..3e5aab7d7 100644 --- a/packages/ai-proxy/test/.env-test.example +++ b/packages/ai-proxy/test/.env-test.example @@ -2,3 +2,4 @@ # This file is used for integration tests OPENAI_API_KEY=sk-your-openai-api-key-here +ANTHROPIC_API_KEY=sk-ant-your-anthropic-api-key-here diff --git a/packages/ai-proxy/test/anthropic-adapter.test.ts b/packages/ai-proxy/test/anthropic-adapter.test.ts new file mode 100644 index 000000000..27da1d449 --- /dev/null +++ b/packages/ai-proxy/test/anthropic-adapter.test.ts @@ -0,0 +1,134 @@ +import { HumanMessage, SystemMessage } from '@langchain/core/messages'; + +import AnthropicAdapter from '../src/anthropic-adapter'; + +jest.mock('@langchain/anthropic', () => ({ + ChatAnthropic: jest.fn(), +})); + +describe('AnthropicAdapter', () => { + describe('convertMessages', () => { + it('should merge multiple system messages into one before conversion', () => { + const result = AnthropicAdapter.convertMessages([ + { role: 'system', content: 'You are an AI agent.' }, + { role: 'system', content: 'The record belongs to Account.' }, + { role: 'user', content: 'get name' }, + ]); + + expect(result).toHaveLength(2); + expect(result[0]).toBeInstanceOf(SystemMessage); + expect(result[0].content).toBe('You are an AI agent.\n\nThe record belongs to Account.'); + expect(result[1]).toBeInstanceOf(HumanMessage); + }); + + it('should pass through single system message unchanged', () => { + const result = AnthropicAdapter.convertMessages([ + { role: 'system', content: 'You are helpful' }, + { role: 'user', content: 'Hello' }, + ]); + + expect(result).toHaveLength(2); + expect(result[0]).toBeInstanceOf(SystemMessage); + expect(result[0].content).toBe('You are helpful'); + }); + + it('should handle no system messages', () => { + const result = AnthropicAdapter.convertMessages([{ role: 'user', content: 'Hello' }]); + + expect(result).toHaveLength(1); + expect(result[0]).toBeInstanceOf(HumanMessage); + }); + }); + + describe('bindTools', () => { + const tools = [{ type: 'function' as const, function: { name: 'my_tool', parameters: {} } }]; + + function makeModel() { + const bindToolsMock = jest.fn().mockReturnThis(); + + return { bindTools: bindToolsMock } as any; + } + + it('should set disable_parallel_tool_use when parallelToolCalls is false', () => { + const model = makeModel(); + + AnthropicAdapter.bindTools(model, tools, { + toolChoice: 'required', + parallelToolCalls: false, + }); + + expect(model.bindTools).toHaveBeenCalledWith(tools, { + tool_choice: { type: 'any', disable_parallel_tool_use: true }, + }); + }); + + it('should default to auto with disable_parallel_tool_use when toolChoice undefined', () => { + const model = makeModel(); + + AnthropicAdapter.bindTools(model, tools, { parallelToolCalls: false }); + + expect(model.bindTools).toHaveBeenCalledWith(tools, { + tool_choice: { type: 'auto', disable_parallel_tool_use: true }, + }); + }); + + it('should add disable_parallel_tool_use to specific function', () => { + const model = makeModel(); + + AnthropicAdapter.bindTools(model, tools, { + toolChoice: { type: 'function', function: { name: 'specific_tool' } }, + parallelToolCalls: false, + }); + + expect(model.bindTools).toHaveBeenCalledWith(tools, { + tool_choice: { type: 'tool', name: 'specific_tool', disable_parallel_tool_use: true }, + }); + }); + + it('should pass "none" unchanged when parallelToolCalls is false', () => { + const model = makeModel(); + + AnthropicAdapter.bindTools(model, tools, { toolChoice: 'none', parallelToolCalls: false }); + + expect(model.bindTools).toHaveBeenCalledWith(tools, { tool_choice: 'none' }); + }); + + it('should not add disable_parallel_tool_use when parallelToolCalls is true', () => { + const model = makeModel(); + + AnthropicAdapter.bindTools(model, tools, { + toolChoice: 'required', + parallelToolCalls: true, + }); + + expect(model.bindTools).toHaveBeenCalledWith(tools, { tool_choice: 'any' }); + }); + + it('should not add disable_parallel_tool_use when parallelToolCalls is undefined', () => { + const model = makeModel(); + + AnthropicAdapter.bindTools(model, tools, { toolChoice: 'auto' }); + + expect(model.bindTools).toHaveBeenCalledWith(tools, { tool_choice: 'auto' }); + }); + + it('should convert tool_choice without parallel restriction', () => { + const model = makeModel(); + + AnthropicAdapter.bindTools(model, tools, { toolChoice: 'auto' }); + expect(model.bindTools).toHaveBeenCalledWith(tools, { tool_choice: 'auto' }); + + model.bindTools.mockClear(); + AnthropicAdapter.bindTools(model, tools, { toolChoice: 'none' }); + expect(model.bindTools).toHaveBeenCalledWith(tools, { tool_choice: 'none' }); + + model.bindTools.mockClear(); + AnthropicAdapter.bindTools(model, tools, { toolChoice: 'required' }); + expect(model.bindTools).toHaveBeenCalledWith(tools, { tool_choice: 'any' }); + + model.bindTools.mockClear(); + AnthropicAdapter.bindTools(model, tools, {}); + expect(model.bindTools).toHaveBeenCalledWith(tools, { tool_choice: undefined }); + }); + }); +}); diff --git a/packages/ai-proxy/test/errors.test.ts b/packages/ai-proxy/test/errors.test.ts index 527f3bab6..f3a1569ec 100644 --- a/packages/ai-proxy/test/errors.test.ts +++ b/packages/ai-proxy/test/errors.test.ts @@ -17,7 +17,6 @@ import { McpConflictError, McpConnectionError, McpError, - OpenAIUnprocessableError, } from '../src/errors'; describe('AI Error Hierarchy', () => { @@ -62,16 +61,11 @@ describe('AI Error Hierarchy', () => { expect(error).toBeInstanceOf(UnprocessableError); }); - test('OpenAIUnprocessableError extends UnprocessableError via AIUnprocessableError', () => { - const error = new OpenAIUnprocessableError('test'); - expect(error).toBeInstanceOf(AIUnprocessableError); - expect(error).toBeInstanceOf(UnprocessableError); - }); - test('AIToolUnprocessableError extends UnprocessableError via AIUnprocessableError', () => { const error = new AIToolUnprocessableError('test'); expect(error).toBeInstanceOf(AIUnprocessableError); expect(error).toBeInstanceOf(UnprocessableError); + expect(error.name).toBe('AIToolUnprocessableError'); }); }); diff --git a/packages/ai-proxy/test/langchain-adapter.test.ts b/packages/ai-proxy/test/langchain-adapter.test.ts new file mode 100644 index 000000000..cc7ca89ae --- /dev/null +++ b/packages/ai-proxy/test/langchain-adapter.test.ts @@ -0,0 +1,260 @@ +import { AIMessage, HumanMessage, SystemMessage, ToolMessage } from '@langchain/core/messages'; + +import { AIBadRequestError } from '../src/errors'; +import { LangChainAdapter } from '../src/langchain-adapter'; + +describe('LangChainAdapter', () => { + describe('convertMessages', () => { + it('should convert each role to the correct LangChain message type', () => { + const result = LangChainAdapter.convertMessages([ + { role: 'system', content: 'You are helpful' }, + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there' }, + ]); + + expect(result).toEqual([ + expect.any(SystemMessage), + expect.any(HumanMessage), + expect.any(AIMessage), + ]); + expect(result[0].content).toBe('You are helpful'); + expect(result[1].content).toBe('Hello'); + expect(result[2].content).toBe('Hi there'); + }); + + it('should keep multiple system messages as separate SystemMessages', () => { + const result = LangChainAdapter.convertMessages([ + { role: 'system', content: 'First' }, + { role: 'system', content: 'Second' }, + { role: 'user', content: 'Hello' }, + ]); + + expect(result).toHaveLength(3); + expect(result[0]).toBeInstanceOf(SystemMessage); + expect(result[1]).toBeInstanceOf(SystemMessage); + expect(result[0].content).toBe('First'); + expect(result[1].content).toBe('Second'); + }); + + it('should convert assistant tool_calls with parsed JSON arguments', () => { + const result = LangChainAdapter.convertMessages([ + { + role: 'assistant', + content: '', + tool_calls: [ + { + id: 'call_123', + function: { name: 'get_weather', arguments: '{"city":"Paris"}' }, + }, + ], + }, + { role: 'tool', content: 'Sunny', tool_call_id: 'call_123' }, + ]); + + expect(result[0]).toBeInstanceOf(AIMessage); + expect((result[0] as AIMessage).tool_calls).toEqual([ + { id: 'call_123', name: 'get_weather', args: { city: 'Paris' } }, + ]); + expect(result[1]).toBeInstanceOf(ToolMessage); + }); + + it('should handle content: null on assistant messages', () => { + const result = LangChainAdapter.convertMessages([ + { role: 'assistant', content: null }, + { role: 'user', content: 'Hello' }, + ]); + + expect(result[0]).toBeInstanceOf(AIMessage); + expect(result[0].content).toBe(''); + }); + + it('should throw AIBadRequestError for tool message without tool_call_id', () => { + expect(() => + LangChainAdapter.convertMessages([{ role: 'tool', content: 'result' } as any]), + ).toThrow(new AIBadRequestError('Tool message is missing required "tool_call_id" field.')); + }); + + it('should throw AIBadRequestError for unsupported message role', () => { + expect(() => + LangChainAdapter.convertMessages([{ role: 'unknown', content: 'test' }] as any), + ).toThrow( + new AIBadRequestError( + "Unsupported message role 'unknown'. Expected: system, user, assistant, or tool.", + ), + ); + }); + + it('should throw AIBadRequestError for invalid JSON in tool_calls arguments', () => { + expect(() => + LangChainAdapter.convertMessages([ + { + role: 'assistant', + content: '', + tool_calls: [ + { id: 'call_1', function: { name: 'my_tool', arguments: 'not-json' } }, + ], + }, + ]), + ).toThrow( + new AIBadRequestError( + "Invalid JSON in tool_calls arguments for tool 'my_tool': not-json", + ), + ); + }); + }); + + describe('convertResponse', () => { + it('should return a complete OpenAI-compatible response', () => { + const aiMessage = new AIMessage({ content: 'Hello from Claude' }); + Object.assign(aiMessage, { + id: 'msg_123', + usage_metadata: { input_tokens: 10, output_tokens: 20, total_tokens: 30 }, + }); + + const response = LangChainAdapter.convertResponse(aiMessage, 'claude-3-5-sonnet-latest'); + + expect(response).toEqual( + expect.objectContaining({ + id: 'msg_123', + object: 'chat.completion', + model: 'claude-3-5-sonnet-latest', + choices: [ + expect.objectContaining({ + index: 0, + message: expect.objectContaining({ + role: 'assistant', + content: 'Hello from Claude', + }), + finish_reason: 'stop', + }), + ], + usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 }, + }), + ); + }); + + it('should default usage to zeros when usage_metadata is missing', () => { + const response = LangChainAdapter.convertResponse( + new AIMessage({ content: 'Response' }), + 'claude-3-5-sonnet-latest', + ); + + expect(response.usage).toEqual({ + prompt_tokens: 0, + completion_tokens: 0, + total_tokens: 0, + }); + }); + + it('should return null content for empty string responses', () => { + const response = LangChainAdapter.convertResponse( + new AIMessage({ content: '' }), + 'claude-3-5-sonnet-latest', + ); + + expect(response.choices[0].message.content).toBeNull(); + }); + + it('should extract text from array content blocks', () => { + const aiMessage = new AIMessage({ + content: [ + { type: 'text', text: 'Here is the result' }, + { type: 'tool_use', id: 'call_1', name: 'search', input: { q: 'test' } }, + ], + }); + Object.assign(aiMessage, { + tool_calls: [{ id: 'call_1', name: 'search', args: { q: 'test' } }], + }); + + const response = LangChainAdapter.convertResponse(aiMessage, 'claude-3-5-sonnet-latest'); + + expect(response.choices[0].message.content).toBe('Here is the result'); + expect(response.choices[0].message.tool_calls).toHaveLength(1); + }); + + it('should return null content when array has no text blocks', () => { + const aiMessage = new AIMessage({ + content: [{ type: 'tool_use', id: 'call_1', name: 'search', input: { q: 'test' } }], + }); + Object.assign(aiMessage, { + tool_calls: [{ id: 'call_1', name: 'search', args: { q: 'test' } }], + }); + + const response = LangChainAdapter.convertResponse(aiMessage, 'claude-3-5-sonnet-latest'); + + expect(response.choices[0].message.content).toBeNull(); + }); + + it('should convert tool_calls to OpenAI format with finish_reason "tool_calls"', () => { + const aiMessage = new AIMessage({ content: '' }); + Object.assign(aiMessage, { + tool_calls: [{ id: 'call_456', name: 'search', args: { query: 'test' } }], + }); + + const response = LangChainAdapter.convertResponse(aiMessage, 'claude-3-5-sonnet-latest'); + + expect(response.choices[0].message.tool_calls).toEqual([ + { + id: 'call_456', + type: 'function', + function: { name: 'search', arguments: '{"query":"test"}' }, + }, + ]); + expect(response.choices[0].finish_reason).toBe('tool_calls'); + }); + + it('should generate a UUID fallback when tool_call has no id', () => { + const aiMessage = new AIMessage({ content: '' }); + Object.assign(aiMessage, { + tool_calls: [{ name: 'search', args: { q: 'test' } }], + }); + + const response = LangChainAdapter.convertResponse(aiMessage, 'claude-3-5-sonnet-latest'); + + expect(response.choices[0].message.tool_calls![0].id).toMatch(/^call_/); + }); + + it('should generate a UUID fallback when response has no id', () => { + const response = LangChainAdapter.convertResponse( + new AIMessage({ content: 'Hello' }), + 'claude-3-5-sonnet-latest', + ); + + expect(response.id).toMatch(/^msg_/); + }); + }); + + describe('convertToolChoice', () => { + it('should pass "auto" through unchanged', () => { + expect(LangChainAdapter.convertToolChoice('auto')).toBe('auto'); + }); + + it('should convert "required" to "any"', () => { + expect(LangChainAdapter.convertToolChoice('required')).toBe('any'); + }); + + it('should pass "none" through unchanged', () => { + expect(LangChainAdapter.convertToolChoice('none')).toBe('none'); + }); + + it('should return undefined when no tool_choice', () => { + expect(LangChainAdapter.convertToolChoice(undefined)).toBeUndefined(); + }); + + it('should convert specific function to { type: "tool", name }', () => { + expect( + LangChainAdapter.convertToolChoice({ + type: 'function', + function: { name: 'specific_tool' }, + }), + ).toEqual({ type: 'tool', name: 'specific_tool' }); + }); + + it('should throw AIBadRequestError for unrecognized tool_choice', () => { + expect(() => LangChainAdapter.convertToolChoice({ type: 'unknown' } as any)).toThrow( + AIBadRequestError, + ); + }); + }); + +}); diff --git a/packages/ai-proxy/test/llm.integration.test.ts b/packages/ai-proxy/test/llm.integration.test.ts index 05e440035..c68efee76 100644 --- a/packages/ai-proxy/test/llm.integration.test.ts +++ b/packages/ai-proxy/test/llm.integration.test.ts @@ -1,14 +1,18 @@ /** - * End-to-end integration tests with real OpenAI API and MCP server. + * End-to-end integration tests with real OpenAI and Anthropic APIs and MCP server. * - * These tests require a valid OPENAI_API_KEY environment variable. - * They are skipped if the key is not present. + * These tests require valid API key environment variables: + * - OPENAI_API_KEY for OpenAI tests + * - ANTHROPIC_API_KEY for Anthropic tests * - * Run with: yarn workspace @forestadmin/ai-proxy test openai.integration + * Tests are skipped if the corresponding key is not present. + * + * Run with: yarn workspace @forestadmin/ai-proxy test llm.integration */ import type { ChatCompletionResponse } from '../src'; import type { Server } from 'http'; +import Anthropic from '@anthropic-ai/sdk'; // eslint-disable-next-line import/extensions import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'; import OpenAI from 'openai'; @@ -18,15 +22,10 @@ import { Router } from '../src'; import runMcpServer from '../src/examples/simple-mcp-server'; import isModelSupportingTools from '../src/supported-models'; -const { OPENAI_API_KEY } = process.env; +const { OPENAI_API_KEY, ANTHROPIC_API_KEY } = process.env; const describeWithOpenAI = OPENAI_API_KEY ? describe : describe.skip; +const describeWithAnthropic = ANTHROPIC_API_KEY ? describe : describe.skip; -/** - * Fetches available models from OpenAI API. - * Returns all models that pass `isModelSupportingTools`. - * - * If a model fails the integration test, update the blacklist in supported-models.ts. - */ async function fetchChatModelsFromOpenAI(): Promise { const openai = new OpenAI({ apiKey: OPENAI_API_KEY }); @@ -48,145 +47,460 @@ async function fetchChatModelsFromOpenAI(): Promise { .sort(); } -describeWithOpenAI('OpenAI Integration (real API)', () => { - const router = new Router({ - aiConfigurations: [ - { - name: 'test-gpt', - provider: 'openai', - model: 'gpt-4o-mini', // Cheapest model with tool support - apiKey: OPENAI_API_KEY, - }, - ], - }); +async function fetchChatModelsFromAnthropic(): Promise { + const anthropic = new Anthropic({ apiKey: ANTHROPIC_API_KEY }); - describe('route: ai-query', () => { - it('should complete a simple chat request', async () => { - const response = (await router.route({ - route: 'ai-query', - body: { - messages: [ - { role: 'system', content: 'You are a helpful assistant. Be very concise.' }, - { role: 'user', content: 'What is 2+2? Reply with just the number.' }, - ], - }, - })) as ChatCompletionResponse; + let models; - expect(response).toMatchObject({ - id: expect.stringMatching(/^chatcmpl-/), - object: 'chat.completion', - model: expect.stringContaining('gpt-4o-mini'), - choices: expect.arrayContaining([ - expect.objectContaining({ - index: 0, - message: expect.objectContaining({ - role: 'assistant', - content: expect.stringContaining('4'), - }), - finish_reason: 'stop', - }), - ]), - usage: expect.objectContaining({ - prompt_tokens: expect.any(Number), - completion_tokens: expect.any(Number), - total_tokens: expect.any(Number), - }), + try { + models = await anthropic.models.list({ limit: 1000 }); + } catch (error) { + throw new Error( + `Failed to fetch models from Anthropic API. ` + + `Ensure ANTHROPIC_API_KEY is valid and network is available. ` + + `Original error: ${error}`, + ); + } + + return models.data + .map(m => m.id) + .filter(id => isModelSupportingTools(id, 'anthropic')) + .sort(); +} + +// ─── Provider contract ─────────────────────────────────────────────────────── +// Every provider must pass the exact same test suite. + +const providers = [ + { + label: 'OpenAI', + describeProvider: describeWithOpenAI, + aiConfig: { + name: 'test', + provider: 'openai' as const, + model: 'gpt-4o-mini', + apiKey: OPENAI_API_KEY, + }, + invalidApiKey: 'sk-invalid-key', + authErrorPattern: /authentication failed|Incorrect API key/i, + fetchModels: fetchChatModelsFromOpenAI, + }, + { + label: 'Anthropic', + describeProvider: describeWithAnthropic, + aiConfig: { + name: 'test', + provider: 'anthropic' as const, + model: 'claude-haiku-4-5-20251001', + apiKey: ANTHROPIC_API_KEY, + }, + invalidApiKey: 'sk-ant-invalid-key', + authErrorPattern: /authentication failed|invalid x-api-key/i, + fetchModels: fetchChatModelsFromAnthropic, + }, +]; + +providers.forEach( + ({ label, describeProvider, aiConfig, invalidApiKey, authErrorPattern, fetchModels }) => { + describeProvider(`${label} Integration (real API)`, () => { + const router = new Router({ + aiConfigurations: [aiConfig], }); - }, 10000); - it('should handle tool calls', async () => { - const response = (await router.route({ - route: 'ai-query', - body: { - messages: [{ role: 'user', content: 'What is the weather in Paris?' }], - tools: [ - { - type: 'function', - function: { - name: 'get_weather', - description: 'Get the current weather in a given location', - parameters: { - type: 'object', - properties: { - location: { type: 'string', description: 'The city name' }, + describe('route: ai-query', () => { + it('should complete a simple chat request', async () => { + const response = (await router.route({ + route: 'ai-query', + body: { + messages: [ + { role: 'system', content: 'You are a helpful assistant. Be very concise.' }, + { role: 'user', content: 'What is 2+2? Reply with just the number.' }, + ], + }, + })) as ChatCompletionResponse; + + expect(response).toMatchObject({ + object: 'chat.completion', + model: expect.stringContaining(aiConfig.model), + choices: expect.arrayContaining([ + expect.objectContaining({ + index: 0, + message: expect.objectContaining({ + role: 'assistant', + content: expect.stringContaining('4'), + }), + finish_reason: 'stop', + }), + ]), + usage: expect.objectContaining({ + prompt_tokens: expect.any(Number), + completion_tokens: expect.any(Number), + total_tokens: expect.any(Number), + }), + }); + }, 10000); + + it('should handle multiple system messages', async () => { + const response = (await router.route({ + route: 'ai-query', + body: { + messages: [ + { role: 'system', content: 'You are a helpful assistant. Be very concise.' }, + { role: 'system', content: 'The user is asking about math.' }, + { role: 'user', content: 'What is 2+2? Reply with just the number.' }, + ], + }, + })) as ChatCompletionResponse; + + expect(response.choices[0].message.content).toContain('4'); + }, 10000); + + it('should handle tool calls', async () => { + const response = (await router.route({ + route: 'ai-query', + body: { + messages: [{ role: 'user', content: 'What is the weather in Paris?' }], + tools: [ + { + type: 'function', + function: { + name: 'get_weather', + description: 'Get the current weather in a given location', + parameters: { + type: 'object', + properties: { + location: { type: 'string', description: 'The city name' }, + }, + required: ['location'], + }, }, - required: ['location'], }, - }, + ], + tool_choice: 'auto', }, - ], - tool_choice: 'auto', - }, - })) as ChatCompletionResponse; + })) as ChatCompletionResponse; - expect(response.choices[0].finish_reason).toBe('tool_calls'); - expect(response.choices[0].message.tool_calls).toEqual( - expect.arrayContaining([ - expect.objectContaining({ - type: 'function', - function: expect.objectContaining({ - name: 'get_weather', - arguments: expect.stringContaining('Paris'), - }), - }), - ]), - ); - }, 10000); + expect(response.choices[0].finish_reason).toBe('tool_calls'); + expect(response.choices[0].message.tool_calls).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + type: 'function', + function: expect.objectContaining({ + name: 'get_weather', + arguments: expect.stringContaining('Paris'), + }), + }), + ]), + ); + }, 10000); + + it('should handle tool_choice: required', async () => { + const response = (await router.route({ + route: 'ai-query', + body: { + messages: [{ role: 'user', content: 'Hello!' }], + tools: [ + { + type: 'function', + function: { + name: 'greet', + description: 'Greet the user', + parameters: { type: 'object', properties: {} }, + }, + }, + ], + tool_choice: 'required', + }, + })) as ChatCompletionResponse; - it('should handle tool_choice: required', async () => { - const response = (await router.route({ - route: 'ai-query', - body: { - messages: [{ role: 'user', content: 'Hello!' }], - tools: [ - { - type: 'function', - function: { - name: 'greet', - description: 'Greet the user', - parameters: { type: 'object', properties: {} }, - }, + expect(response.choices[0].finish_reason).toBe('tool_calls'); + const toolCall = response.choices[0].message.tool_calls?.[0] as { + function: { name: string }; + }; + expect(toolCall.function.name).toBe('greet'); + }, 10000); + + it('should handle parallel_tool_calls: false', async () => { + const response = (await router.route({ + route: 'ai-query', + body: { + messages: [{ role: 'user', content: 'Get weather in Paris and London' }], + tools: [ + { + type: 'function', + function: { + name: 'get_weather', + description: 'Get weather for a city', + parameters: { + type: 'object', + properties: { city: { type: 'string' } }, + required: ['city'], + }, + }, + }, + ], + tool_choice: 'required', + parallel_tool_calls: false, }, - ], - tool_choice: 'required', - }, - })) as ChatCompletionResponse; + })) as ChatCompletionResponse; - expect(response.choices[0].finish_reason).toBe('tool_calls'); - const toolCall = response.choices[0].message.tool_calls?.[0] as { - function: { name: string }; - }; - expect(toolCall.function.name).toBe('greet'); - }, 10000); + // With parallel_tool_calls: false, should only get one tool call + expect(response.choices[0].message.tool_calls).toHaveLength(1); + }, 10000); - it('should handle parallel_tool_calls: false', async () => { - const response = (await router.route({ - route: 'ai-query', - body: { - messages: [{ role: 'user', content: 'Get weather in Paris and London' }], - tools: [ - { - type: 'function', - function: { - name: 'get_weather', - description: 'Get weather for a city', - parameters: { - type: 'object', - properties: { city: { type: 'string' } }, - required: ['city'], + it('should handle tool_choice with specific function name', async () => { + const response = (await router.route({ + route: 'ai-query', + body: { + messages: [{ role: 'user', content: 'Hello there!' }], + tools: [ + { + type: 'function', + function: { + name: 'greet', + description: 'Greet the user', + parameters: { type: 'object', properties: { name: { type: 'string' } } }, + }, + }, + { + type: 'function', + function: { + name: 'farewell', + description: 'Say goodbye', + parameters: { type: 'object', properties: {} }, + }, }, + ], + // Force specific function to be called + tool_choice: { type: 'function', function: { name: 'greet' } }, + }, + })) as ChatCompletionResponse; + + const toolCalls = response.choices[0].message.tool_calls; + expect(toolCalls).toBeDefined(); + expect(toolCalls).toHaveLength(1); + + const toolCall = toolCalls![0] as { function: { name: string } }; + // Should call 'greet' specifically, not 'farewell' + expect(toolCall.function.name).toBe('greet'); + }, 10000); + + it('should complete multi-turn conversation with tool results', async () => { + const addTool = { + type: 'function' as const, + function: { + name: 'calculate', + description: 'Calculate a math expression', + parameters: { + type: 'object', + properties: { expression: { type: 'string' } }, + required: ['expression'], }, }, - ], - tool_choice: 'required', - parallel_tool_calls: false, - }, - })) as ChatCompletionResponse; + }; - // With parallel_tool_calls: false, should only get one tool call - expect(response.choices[0].message.tool_calls).toHaveLength(1); - }, 10000); + // First turn: get tool call + const response1 = (await router.route({ + route: 'ai-query', + body: { + messages: [{ role: 'user', content: 'What is 5 + 3?' }], + tools: [addTool], + tool_choice: 'required', + }, + })) as ChatCompletionResponse; + + expect(response1.choices[0].finish_reason).toBe('tool_calls'); + const toolCall = response1.choices[0].message.tool_calls?.[0]; + expect(toolCall).toBeDefined(); + + // Second turn: provide tool result and get final answer + const response2 = (await router.route({ + route: 'ai-query', + body: { + messages: [ + { role: 'user', content: 'What is 5 + 3?' }, + response1.choices[0].message, + { + role: 'tool', + tool_call_id: toolCall!.id, + content: '8', + }, + ], + }, + })) as ChatCompletionResponse; + expect(response2.choices[0].finish_reason).toBe('stop'); + expect(response2.choices[0].message.content).toContain('8'); + }, 15000); + }); + + describe('error handling', () => { + it('should throw authentication error with invalid API key', async () => { + const invalidRouter = new Router({ + aiConfigurations: [ + { + name: 'invalid', + provider: aiConfig.provider, + model: aiConfig.model, + apiKey: invalidApiKey, + }, + ], + }); + + await expect( + invalidRouter.route({ + route: 'ai-query', + body: { + messages: [{ role: 'user', content: 'test' }], + }, + }), + ).rejects.toThrow(authErrorPattern); + }, 10000); + + it('should throw AINotConfiguredError when no AI configuration provided', async () => { + const routerWithoutAI = new Router({}); + + await expect( + routerWithoutAI.route({ + route: 'ai-query', + body: { + messages: [{ role: 'user', content: 'Hello' }], + }, + }), + ).rejects.toThrow('AI is not configured'); + }); + + it('should throw error for missing messages in body', async () => { + await expect( + router.route({ + route: 'ai-query', + body: {} as any, + }), + ).rejects.toThrow(/messages|required|invalid/i); + }, 10000); + + it('should throw error for empty messages array', async () => { + await expect( + router.route({ + route: 'ai-query', + body: { messages: [] }, + }), + ).rejects.toThrow(/messages|empty|at least one/i); + }, 10000); + + it('should throw error for invalid route', async () => { + await expect( + router.route({ + route: 'invalid-route' as any, + }), + ).rejects.toThrow(/No action to perform|invalid.*route/i); + }); + }); + + describe('Model tool support verification', () => { + let modelsToTest: string[]; + + beforeAll(async () => { + modelsToTest = await fetchModels(); + }); + + it('should have found models from API', () => { + expect(modelsToTest.length).toBeGreaterThan(0); + // eslint-disable-next-line no-console + console.log(`Testing ${modelsToTest.length} ${label} models:`, modelsToTest); + }); + + it('all models should support tool calls', async () => { + const results: { model: string; success: boolean; error?: string }[] = []; + + for (const model of modelsToTest) { + const modelRouter = new Router({ + aiConfigurations: [ + { name: 'test', provider: aiConfig.provider, model, apiKey: aiConfig.apiKey }, + ], + }); + + try { + const response = (await modelRouter.route({ + route: 'ai-query', + body: { + messages: [{ role: 'user', content: 'What is 2+2?' }], + tools: [ + { + type: 'function', + function: { + name: 'calculate', + description: 'Calculate a math expression', + parameters: { + type: 'object', + properties: { result: { type: 'number' } }, + }, + }, + }, + ], + tool_choice: 'required', + parallel_tool_calls: false, + }, + })) as ChatCompletionResponse; + + const success = + response.choices[0].finish_reason === 'tool_calls' && + response.choices[0].message.tool_calls !== undefined; + + results.push({ model, success }); + } catch (error) { + const errorMessage = String(error); + + const isInfrastructureError = + errorMessage.includes('rate limit') || + errorMessage.includes('429') || + errorMessage.includes('401') || + errorMessage.includes('Authentication') || + errorMessage.includes('ECONNREFUSED') || + errorMessage.includes('ETIMEDOUT') || + errorMessage.includes('getaddrinfo'); + + if (isInfrastructureError) { + throw new Error(`Infrastructure error testing model ${model}: ${errorMessage}`); + } + + results.push({ model, success: false, error: errorMessage }); + } + } + + const failures = results.filter(r => !r.success); + + if (failures.length > 0) { + const failedModelNames = failures.map(f => f.model).join(', '); + // eslint-disable-next-line no-console + console.error( + `\n❌ ${failures.length} ${label} model(s) failed tool support: ${failedModelNames}\n`, + failures, + ); + } + + expect(failures).toEqual([]); + }, 300000); // 5 minutes for all models + }); + }); + }, +); + +// ─── Router-level tests (provider-independent, run once with OpenAI) ───────── + +describeWithOpenAI('Router integration tests', () => { + const router = new Router({ + aiConfigurations: [ + { + name: 'test-gpt', + provider: 'openai', + model: 'gpt-4o-mini', + apiKey: OPENAI_API_KEY, + }, + ], + }); + + describe('AI configuration selection', () => { it('should select AI configuration by name without fallback warning', async () => { const mockLogger = jest.fn(); const multiConfigRouter = new Router({ @@ -206,7 +520,6 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { })) as ChatCompletionResponse; expect(response.choices[0].message.content).toBeDefined(); - // Verify no fallback warning was logged - this proves 'secondary' was found and selected expect(mockLogger).not.toHaveBeenCalledWith('Warn', expect.stringContaining('not found')); }, 10000); @@ -228,99 +541,11 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { })) as ChatCompletionResponse; expect(response.choices[0].message.content).toBeDefined(); - // Verify fallback warning WAS logged expect(mockLogger).toHaveBeenCalledWith( 'Warn', expect.stringContaining("'non-existent' not found"), ); }, 10000); - - it('should handle tool_choice with specific function name', async () => { - const response = (await router.route({ - route: 'ai-query', - body: { - messages: [{ role: 'user', content: 'Hello there!' }], - tools: [ - { - type: 'function', - function: { - name: 'greet', - description: 'Greet the user', - parameters: { type: 'object', properties: { name: { type: 'string' } } }, - }, - }, - { - type: 'function', - function: { - name: 'farewell', - description: 'Say goodbye', - parameters: { type: 'object', properties: {} }, - }, - }, - ], - // Force specific function to be called - tool_choice: { type: 'function', function: { name: 'greet' } }, - }, - })) as ChatCompletionResponse; - - // When forcing a specific function, OpenAI returns finish_reason: 'stop' but still includes tool_calls - // The key assertion is that the specified function was called - const toolCalls = response.choices[0].message.tool_calls; - expect(toolCalls).toBeDefined(); - expect(toolCalls).toHaveLength(1); - - const toolCall = toolCalls![0] as { function: { name: string } }; - // Should call 'greet' specifically, not 'farewell' - expect(toolCall.function.name).toBe('greet'); - }, 10000); - - it('should complete multi-turn conversation with tool results', async () => { - const addTool = { - type: 'function' as const, - function: { - name: 'calculate', - description: 'Calculate a math expression', - parameters: { - type: 'object', - properties: { expression: { type: 'string' } }, - required: ['expression'], - }, - }, - }; - - // First turn: get tool call - const response1 = (await router.route({ - route: 'ai-query', - body: { - messages: [{ role: 'user', content: 'What is 5 + 3?' }], - tools: [addTool], - tool_choice: 'required', - }, - })) as ChatCompletionResponse; - - expect(response1.choices[0].finish_reason).toBe('tool_calls'); - const toolCall = response1.choices[0].message.tool_calls?.[0]; - expect(toolCall).toBeDefined(); - - // Second turn: provide tool result and get final answer - const response2 = (await router.route({ - route: 'ai-query', - body: { - messages: [ - { role: 'user', content: 'What is 5 + 3?' }, - response1.choices[0].message, - { - role: 'tool', - tool_call_id: toolCall!.id, - content: '8', - }, - ], - }, - })) as ChatCompletionResponse; - - expect(response2.choices[0].finish_reason).toBe('stop'); - expect(response2.choices[0].message.content).toContain('8'); - }, 15000); }); describe('route: remote-tools', () => { @@ -329,7 +554,6 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { route: 'remote-tools', }); - // No API keys configured, so no tools available expect(response).toEqual([]); }); @@ -347,7 +571,7 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { expect(response).toEqual( expect.arrayContaining([ expect.objectContaining({ - name: 'brave-search', // sanitized name uses hyphen + name: 'brave-search', description: expect.any(String), sourceId: 'brave_search', sourceType: 'server', @@ -369,70 +593,6 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { }); }); - describe('error handling', () => { - it('should throw authentication error with invalid API key', async () => { - const invalidRouter = new Router({ - aiConfigurations: [ - { - name: 'invalid', - provider: 'openai', - model: 'gpt-4o-mini', - apiKey: 'sk-invalid-key', - }, - ], - }); - - await expect( - invalidRouter.route({ - route: 'ai-query', - body: { - messages: [{ role: 'user', content: 'test' }], - }, - }), - ).rejects.toThrow(/Authentication failed|Incorrect API key/); - }, 10000); - - it('should throw AINotConfiguredError when no AI configuration provided', async () => { - const routerWithoutAI = new Router({}); - - await expect( - routerWithoutAI.route({ - route: 'ai-query', - body: { - messages: [{ role: 'user', content: 'Hello' }], - }, - }), - ).rejects.toThrow('AI is not configured'); - }); - - it('should throw error for missing messages in body', async () => { - await expect( - router.route({ - route: 'ai-query', - body: {} as any, - }), - ).rejects.toThrow(/messages|required|invalid/i); - }, 10000); - - it('should throw error for empty messages array', async () => { - // OpenAI requires at least one message - await expect( - router.route({ - route: 'ai-query', - body: { messages: [] }, - }), - ).rejects.toThrow(/messages|empty|at least one/i); - }, 10000); - - it('should throw error for invalid route', async () => { - await expect( - router.route({ - route: 'invalid-route' as any, - }), - ).rejects.toThrow(/No action to perform|invalid.*route/i); - }); - }); - describe('MCP Server Integration', () => { const MCP_PORT = 3124; const MCP_TOKEN = 'test-token'; @@ -453,21 +613,13 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { beforeAll(() => { const mcp = new McpServer({ name: 'calculator', version: '1.0.0' }); - mcp.registerTool( - 'add', - { inputSchema: { a: z.number(), b: z.number() } }, - async ({ a, b }) => { - return { content: [{ type: 'text' as const, text: String(a + b) }] }; - }, - ); + mcp.tool('add', { a: z.number(), b: z.number() }, async ({ a, b }) => { + return { content: [{ type: 'text', text: String(a + b) }] }; + }); - mcp.registerTool( - 'multiply', - { inputSchema: { a: z.number(), b: z.number() } }, - async ({ a, b }) => { - return { content: [{ type: 'text' as const, text: String(a * b) }] }; - }, - ); + mcp.tool('multiply', { a: z.number(), b: z.number() }, async ({ a, b }) => { + return { content: [{ type: 'text', text: String(a * b) }] }; + }); mcpServer = runMcpServer(mcp, MCP_PORT, MCP_TOKEN); }); @@ -525,29 +677,25 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { logger: mockLogger, }); - // Configure working server + unreachable server const mixedConfig = { configs: { - calculator: mcpConfig.configs.calculator, // working + calculator: mcpConfig.configs.calculator, broken: { - url: 'http://localhost:59999/mcp', // unreachable port + url: 'http://localhost:59999/mcp', type: 'http' as const, }, }, }; - // Should still return tools from the working server const response = (await routerWithLogger.route({ route: 'remote-tools', mcpConfigs: mixedConfig, })) as Array<{ name: string; sourceId: string }>; - // Working server's tools should be available const toolNames = response.map(t => t.name); expect(toolNames).toContain('add'); expect(toolNames).toContain('multiply'); - // Verify the error for 'broken' server was logged expect(mockLogger).toHaveBeenCalledWith( 'Error', expect.stringContaining('broken'), @@ -576,16 +724,13 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { }, }; - // Should return empty array when auth fails (server rejects) const response = (await routerWithLogger.route({ route: 'remote-tools', mcpConfigs: badAuthConfig, })) as Array<{ name: string }>; - // No tools loaded due to auth failure expect(response).toEqual([]); - // Verify the auth error was logged expect(mockLogger).toHaveBeenCalledWith( 'Error', expect.stringContaining('calculator'), @@ -603,7 +748,6 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { }, }; - // ai-query should still work (without MCP tools) const response = (await router.route({ route: 'ai-query', body: { @@ -618,17 +762,15 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { describe('route: invoke-remote-tool (with MCP)', () => { it('should invoke MCP add tool and return result', async () => { - // MCP tools expect arguments directly matching their schema const response = await router.route({ route: 'invoke-remote-tool', query: { 'tool-name': 'add' }, body: { - inputs: { a: 5, b: 3 } as any, // Direct tool arguments + inputs: { a: 5, b: 3 } as any, }, mcpConfigs: mcpConfig, }); - // MCP tool returns the computed result as string expect(response).toBe('8'); }, 10000); @@ -637,7 +779,7 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { route: 'invoke-remote-tool', query: { 'tool-name': 'multiply' }, body: { - inputs: { a: 6, b: 7 } as any, // Direct tool arguments + inputs: { a: 6, b: 7 } as any, }, mcpConfigs: mcpConfig, }); @@ -647,7 +789,7 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { }); describe('route: ai-query (with MCP tools)', () => { - it('should allow OpenAI to call MCP tools', async () => { + it('should allow LLM to call MCP tools', async () => { const response = (await router.route({ route: 'ai-query', body: { @@ -692,9 +834,7 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { expect(args.b).toBe(27); }, 10000); - it('should enrich MCP tool definitions when calling OpenAI', async () => { - // This test verifies that even with minimal tool definition, - // the router enriches it with the full MCP schema + it('should enrich MCP tool definitions', async () => { const response = (await router.route({ route: 'ai-query', body: { @@ -702,7 +842,6 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { tools: [ { type: 'function', - // Minimal definition - router should enrich from MCP function: { name: 'multiply', parameters: {} }, }, ], @@ -718,97 +857,10 @@ describeWithOpenAI('OpenAI Integration (real API)', () => { }; expect(toolCall.function.name).toBe('multiply'); - // The enriched schema allows OpenAI to properly parse the arguments const args = JSON.parse(toolCall.function.arguments); expect(typeof args.a).toBe('number'); expect(typeof args.b).toBe('number'); }, 10000); }); }); - - describe('Model tool support verification', () => { - let modelsToTest: string[]; - - beforeAll(async () => { - modelsToTest = await fetchChatModelsFromOpenAI(); - }); - - it('should have found chat models from OpenAI API', () => { - expect(modelsToTest.length).toBeGreaterThan(0); - // eslint-disable-next-line no-console - console.log(`Testing ${modelsToTest.length} models:`, modelsToTest); - }); - - it('all chat models should support tool calls', async () => { - const results: { model: string; success: boolean; error?: string }[] = []; - - for (const model of modelsToTest) { - const modelRouter = new Router({ - aiConfigurations: [{ name: 'test', provider: 'openai', model, apiKey: OPENAI_API_KEY }], - }); - - try { - const response = (await modelRouter.route({ - route: 'ai-query', - body: { - messages: [{ role: 'user', content: 'What is 2+2?' }], - tools: [ - { - type: 'function', - function: { - name: 'calculate', - description: 'Calculate a math expression', - parameters: { type: 'object', properties: { result: { type: 'number' } } }, - }, - }, - ], - tool_choice: 'required', - parallel_tool_calls: false, - }, - })) as ChatCompletionResponse; - - const success = - response.choices[0].finish_reason === 'tool_calls' && - response.choices[0].message.tool_calls !== undefined; - - results.push({ model, success }); - } catch (error) { - const errorMessage = String(error); - - // Infrastructure errors should fail the test immediately - const isInfrastructureError = - errorMessage.includes('rate limit') || - errorMessage.includes('429') || - errorMessage.includes('401') || - errorMessage.includes('Authentication') || - errorMessage.includes('ECONNREFUSED') || - errorMessage.includes('ETIMEDOUT') || - errorMessage.includes('getaddrinfo'); - - if (isInfrastructureError) { - throw new Error(`Infrastructure error testing model ${model}: ${errorMessage}`); - } - - results.push({ model, success: false, error: errorMessage }); - } - } - - const failures = results.filter(r => !r.success); - - if (failures.length > 0) { - const failedModelNames = failures.map(f => f.model).join(', '); - // eslint-disable-next-line no-console - console.error( - `\n❌ ${failures.length} model(s) failed: ${failedModelNames}\n\n` + - `To fix this, add the failing model(s) to the blacklist in:\n` + - ` packages/ai-proxy/src/supported-models.ts\n\n` + - `Add to UNSUPPORTED_MODEL_PREFIXES (for prefix match)\n` + - `or UNSUPPORTED_MODEL_PATTERNS (for contains match)\n`, - failures, - ); - } - - expect(failures).toEqual([]); - }, 300000); // 5 minutes for all models - }); }); diff --git a/packages/ai-proxy/test/provider-dispatcher.test.ts b/packages/ai-proxy/test/provider-dispatcher.test.ts index a83102266..d32ac6d00 100644 --- a/packages/ai-proxy/test/provider-dispatcher.test.ts +++ b/packages/ai-proxy/test/provider-dispatcher.test.ts @@ -1,8 +1,16 @@ import type { DispatchBody } from '../src'; +import { AIMessage } from '@langchain/core/messages'; import { convertToOpenAIFunction } from '@langchain/core/utils/function_calling'; +import { ChatOpenAI } from '@langchain/openai'; -import { AINotConfiguredError, ProviderDispatcher, RemoteTools } from '../src'; +import { + AIBadRequestError, + AINotConfiguredError, + AIUnprocessableError, + ProviderDispatcher, + RemoteTools, +} from '../src'; // Mock raw OpenAI response (returned via __includeRawResponse: true) const mockOpenAIResponse = { @@ -45,166 +53,165 @@ jest.mock('@langchain/openai', () => ({ })), })); +const anthropicInvokeMock = jest.fn(); +const anthropicBindToolsMock = jest.fn().mockReturnValue({ invoke: anthropicInvokeMock }); + +jest.mock('@langchain/anthropic', () => ({ + ChatAnthropic: jest.fn().mockImplementation(() => ({ + invoke: anthropicInvokeMock, + bindTools: anthropicBindToolsMock, + })), +})); + +function buildBody(overrides: Partial = {}): DispatchBody { + return { tools: [], messages: [], ...overrides } as unknown as DispatchBody; +} + +function mockAnthropicResponse( + content: AIMessage['content'] = 'Response', + extra?: Record, +): AIMessage { + const response = new AIMessage({ content }); + if (extra) Object.assign(response, extra); + anthropicInvokeMock.mockResolvedValueOnce(response); + + return response; +} + describe('ProviderDispatcher', () => { const apiKeys = { AI_REMOTE_TOOL_BRAVE_SEARCH_API_KEY: 'api-key' }; + const openaiConfig = { + name: 'gpt4', + provider: 'openai' as const, + apiKey: 'dev', + model: 'gpt-4o', + }; + + const anthropicConfig = { + name: 'claude', + provider: 'anthropic' as const, + apiKey: 'test-api-key', + model: 'claude-3-5-sonnet-latest', + }; + beforeEach(() => { jest.clearAllMocks(); }); describe('dispatch', () => { - describe('when AI is not configured', () => { - it('should throw AINotConfiguredError', async () => { - const dispatcher = new ProviderDispatcher(null, new RemoteTools(apiKeys)); - await expect(dispatcher.dispatch({} as DispatchBody)).rejects.toThrow(AINotConfiguredError); - await expect(dispatcher.dispatch({} as DispatchBody)).rejects.toThrow( - 'AI is not configured', - ); - }); + it('should throw AINotConfiguredError when no provider is configured', async () => { + const dispatcher = new ProviderDispatcher(null, new RemoteTools(apiKeys)); + + await expect(dispatcher.dispatch(buildBody())).rejects.toThrow(AINotConfiguredError); + await expect(dispatcher.dispatch(buildBody())).rejects.toThrow('AI is not configured'); + }); + + it('should throw AIBadRequestError for unknown provider', () => { + expect( + () => + new ProviderDispatcher( + { provider: 'unknown', name: 'test', model: 'x' } as any, + new RemoteTools(apiKeys), + ), + ).toThrow(new AIBadRequestError("Unsupported AI provider 'unknown'.")); }); }); describe('openai', () => { - describe('when openai is configured', () => { - it('should return the response in OpenAI-compatible format', async () => { - const dispatcher = new ProviderDispatcher( - { - name: 'gpt4', - provider: 'openai', - apiKey: 'dev', - model: 'gpt-4o', - }, - new RemoteTools(apiKeys), - ); - const response = await dispatcher.dispatch({ - tools: [], - messages: [], - } as unknown as DispatchBody); + let dispatcher: ProviderDispatcher; - // Response is the raw OpenAI response (via __includeRawResponse) - expect(response).toEqual(mockOpenAIResponse); - }); + beforeEach(() => { + dispatcher = new ProviderDispatcher(openaiConfig, new RemoteTools(apiKeys)); + }); - describe('when the user tries to override the configuration', () => { - it('should only pass allowed parameters', async () => { - const dispatcher = new ProviderDispatcher( - { - name: 'base', - provider: 'openai', - apiKey: 'dev', - model: 'BASE MODEL', - }, - new RemoteTools(apiKeys), - ); - const messages = [{ role: 'user', content: 'Hello' }]; - await dispatcher.dispatch({ - model: 'OTHER MODEL', - propertyInjection: 'hack', - tools: [], - messages, - tool_choice: 'auto', - } as unknown as DispatchBody); + it('should return the raw OpenAI response', async () => { + const response = await dispatcher.dispatch(buildBody()); - // When no tools, invoke is called directly with messages - expect(invokeMock).toHaveBeenCalledWith(messages); - }); + expect(response).toEqual(mockOpenAIResponse); + }); + + it('should not forward user-supplied model or arbitrary properties to the LLM', async () => { + const customConfig = { ...openaiConfig, name: 'base', model: 'BASE MODEL' }; + const customDispatcher = new ProviderDispatcher(customConfig, new RemoteTools(apiKeys)); + + await customDispatcher.dispatch( + buildBody({ + model: 'OTHER MODEL', + messages: [{ role: 'user', content: 'Hello' }], + } as unknown as DispatchBody), + ); + + expect(ChatOpenAI).toHaveBeenCalledWith(expect.objectContaining({ model: 'BASE MODEL' })); + expect(ChatOpenAI).not.toHaveBeenCalledWith( + expect.objectContaining({ model: 'OTHER MODEL' }), + ); + }); + + describe('error handling', () => { + it('should wrap generic errors as AIUnprocessableError with cause', async () => { + const original = new Error('OpenAI error'); + invokeMock.mockRejectedValueOnce(original); + + const thrown = await dispatcher.dispatch(buildBody()).catch(e => e); + + expect(thrown).toBeInstanceOf(AIUnprocessableError); + expect(thrown.message).toBe('Error while calling OpenAI: OpenAI error'); + expect(thrown.cause).toBe(original); }); - describe('when the openai client throws an error', () => { - it('should throw an OpenAIUnprocessableError', async () => { - const dispatcher = new ProviderDispatcher( - { - name: 'gpt4', - provider: 'openai', - apiKey: 'dev', - model: 'gpt-4o', - }, - new RemoteTools(apiKeys), - ); - invokeMock.mockRejectedValueOnce(new Error('OpenAI error')); + it('should wrap 429 as AIUnprocessableError with rate limit message', async () => { + const error = Object.assign(new Error('Too many requests'), { status: 429 }); + invokeMock.mockRejectedValueOnce(error); - await expect( - dispatcher.dispatch({ tools: [], messages: [] } as unknown as DispatchBody), - ).rejects.toThrow('Error while calling OpenAI: OpenAI error'); - }); + const thrown = await dispatcher.dispatch(buildBody()).catch(e => e); - it('should throw rate limit error when status is 429', async () => { - const dispatcher = new ProviderDispatcher( - { name: 'gpt4', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }, - new RemoteTools(apiKeys), - ); - const rateLimitError = new Error('Too many requests') as Error & { status?: number }; - rateLimitError.status = 429; - invokeMock.mockRejectedValueOnce(rateLimitError); - - await expect( - dispatcher.dispatch({ tools: [], messages: [] } as unknown as DispatchBody), - ).rejects.toThrow('Rate limit exceeded: Too many requests'); - }); + expect(thrown).toBeInstanceOf(AIUnprocessableError); + expect(thrown.message).toBe('OpenAI rate limit exceeded: Too many requests'); + expect(thrown.cause).toBe(error); + }); - it('should throw authentication error when status is 401', async () => { - const dispatcher = new ProviderDispatcher( - { name: 'gpt4', provider: 'openai', apiKey: 'invalid', model: 'gpt-4o' }, - new RemoteTools(apiKeys), - ); - const authError = new Error('Invalid API key') as Error & { status?: number }; - authError.status = 401; - invokeMock.mockRejectedValueOnce(authError); - - await expect( - dispatcher.dispatch({ tools: [], messages: [] } as unknown as DispatchBody), - ).rejects.toThrow('Authentication failed: Invalid API key'); - }); + it('should wrap 401 as AIUnprocessableError with auth message', async () => { + const error = Object.assign(new Error('Invalid API key'), { status: 401 }); + invokeMock.mockRejectedValueOnce(error); + + const thrown = await dispatcher.dispatch(buildBody()).catch(e => e); + + expect(thrown).toBeInstanceOf(AIUnprocessableError); + expect(thrown.message).toBe('OpenAI authentication failed: Invalid API key'); + expect(thrown.cause).toBe(error); }); - describe('when rawResponse is missing', () => { - it('should throw an error indicating API change', async () => { - const dispatcher = new ProviderDispatcher( - { name: 'gpt4', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }, - new RemoteTools(apiKeys), - ); - invokeMock.mockResolvedValueOnce({ - content: 'response', - additional_kwargs: { __raw_response: null }, - }); - - await expect( - dispatcher.dispatch({ tools: [], messages: [] } as unknown as DispatchBody), - ).rejects.toThrow( - 'OpenAI response missing raw response data. This may indicate an API change.', - ); + it('should throw when rawResponse is missing', async () => { + invokeMock.mockResolvedValueOnce({ + content: 'response', + additional_kwargs: { __raw_response: null }, }); + + await expect(dispatcher.dispatch(buildBody())).rejects.toThrow( + 'OpenAI response missing raw response data. This may indicate an API change.', + ); }); }); - describe('when there is a remote tool', () => { - it('should enhance the remote tools definition', async () => { + describe('remote tools', () => { + it('should enhance remote tools definition with full schema', async () => { const remoteTools = new RemoteTools(apiKeys); - remoteTools.invokeTool = jest.fn().mockResolvedValue('response'); - - const dispatcher = new ProviderDispatcher( - { - name: 'gpt4', - provider: 'openai', - apiKey: 'dev', - model: 'gpt-4o', - }, - remoteTools, + const remoteDispatcher = new ProviderDispatcher(openaiConfig, remoteTools); + + await remoteDispatcher.dispatch( + buildBody({ + tools: [ + { + type: 'function', + // Front end sends empty parameters because it doesn't know the tool schema + function: { name: remoteTools.tools[0].base.name, parameters: {} }, + }, + ], + messages: [{ role: 'user', content: 'test' }], + }), ); - const messages = [{ role: 'user', content: 'test' }]; - await dispatcher.dispatch({ - tools: [ - { - type: 'function', - // parameters is an empty object because it simulates the front end sending an empty object - // because it doesn't know the parameters of the tool - function: { name: remoteTools.tools[0].base.name, parameters: {} }, - }, - ], - messages, - } as unknown as DispatchBody); - // When tools are provided, bindTools is called first expect(bindToolsMock).toHaveBeenCalledWith( [ { @@ -214,41 +221,50 @@ describe('ProviderDispatcher', () => { ], { tool_choice: undefined }, ); - expect(invokeMock).toHaveBeenCalledWith(messages); }); - }); - describe('when parallel_tool_calls is provided', () => { - it('should pass parallel_tool_calls to bindTools', async () => { - const dispatcher = new ProviderDispatcher( - { name: 'gpt4', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }, - new RemoteTools(apiKeys), - ); + it('should not modify non-remote tools', async () => { + const remoteDispatcher = new ProviderDispatcher(openaiConfig, new RemoteTools(apiKeys)); - await dispatcher.dispatch({ - messages: [{ role: 'user', content: 'test' }], - tools: [{ type: 'function', function: { name: 'test', parameters: {} } }], - tool_choice: 'auto', - parallel_tool_calls: false, - } as unknown as DispatchBody); + await remoteDispatcher.dispatch( + buildBody({ + tools: [{ type: 'function', function: { name: 'notRemoteTool', parameters: {} } }], + messages: [{ role: 'user', content: 'test' }], + }), + ); expect(bindToolsMock).toHaveBeenCalledWith( - [{ type: 'function', function: { name: 'test', parameters: {} } }], - { tool_choice: 'auto', parallel_tool_calls: false }, + [{ type: 'function', function: { name: 'notRemoteTool', parameters: {} } }], + { tool_choice: undefined }, ); }); + }); - it('should pass parallel_tool_calls: true when explicitly set', async () => { - const dispatcher = new ProviderDispatcher( - { name: 'gpt4', provider: 'openai', apiKey: 'dev', model: 'gpt-4o' }, - new RemoteTools(apiKeys), + describe('parallel_tool_calls', () => { + it('should pass parallel_tool_calls: false to bindTools', async () => { + await dispatcher.dispatch( + buildBody({ + tools: [{ type: 'function', function: { name: 'test', parameters: {} } }], + messages: [{ role: 'user', content: 'test' }], + tool_choice: 'auto', + parallel_tool_calls: false, + }), ); - await dispatcher.dispatch({ - messages: [{ role: 'user', content: 'test' }], - tools: [{ type: 'function', function: { name: 'test', parameters: {} } }], - parallel_tool_calls: true, - } as unknown as DispatchBody); + expect(bindToolsMock).toHaveBeenCalledWith(expect.any(Array), { + tool_choice: 'auto', + parallel_tool_calls: false, + }); + }); + + it('should pass parallel_tool_calls: true to bindTools', async () => { + await dispatcher.dispatch( + buildBody({ + tools: [{ type: 'function', function: { name: 'test', parameters: {} } }], + messages: [{ role: 'user', content: 'test' }], + parallel_tool_calls: true, + }), + ); expect(bindToolsMock).toHaveBeenCalledWith(expect.any(Array), { tool_choice: undefined, @@ -256,43 +272,180 @@ describe('ProviderDispatcher', () => { }); }); }); + }); - describe('when there is not remote tool', () => { - it('should not enhance the remote tools definition', async () => { - const remoteTools = new RemoteTools(apiKeys); - remoteTools.invokeTool = jest.fn().mockResolvedValue('response'); - - const dispatcher = new ProviderDispatcher( - { - name: 'gpt4', - provider: 'openai', - apiKey: 'dev', - model: 'gpt-4o', - }, - remoteTools, - ); - const messages = [{ role: 'user', content: 'test' }]; - await dispatcher.dispatch({ - tools: [ - { - type: 'function', - function: { name: 'notRemoteTool', parameters: {} }, - }, + describe('anthropic', () => { + let dispatcher: ProviderDispatcher; + + beforeEach(() => { + dispatcher = new ProviderDispatcher(anthropicConfig, new RemoteTools(apiKeys)); + }); + + it('should not forward user-supplied model from body to the LLM', async () => { + const { ChatAnthropic } = jest.requireMock('@langchain/anthropic'); + mockAnthropicResponse(); + + await dispatcher.dispatch( + buildBody({ + model: 'OTHER MODEL', + messages: [{ role: 'user', content: 'Hello' }], + } as unknown as DispatchBody), + ); + + expect(ChatAnthropic).toHaveBeenCalledWith( + expect.objectContaining({ model: 'claude-3-5-sonnet-latest' }), + ); + expect(ChatAnthropic).not.toHaveBeenCalledWith( + expect.objectContaining({ model: 'OTHER MODEL' }), + ); + }); + + it('should return an OpenAI-compatible response', async () => { + mockAnthropicResponse('Hello from Claude', { + id: 'msg_123', + usage_metadata: { input_tokens: 10, output_tokens: 20, total_tokens: 30 }, + }); + + const response = await dispatcher.dispatch( + buildBody({ messages: [{ role: 'user', content: 'Hello' }] }), + ); + + expect(response).toEqual( + expect.objectContaining({ + id: 'msg_123', + object: 'chat.completion', + model: 'claude-3-5-sonnet-latest', + choices: [ + expect.objectContaining({ + message: expect.objectContaining({ + role: 'assistant', + content: 'Hello from Claude', + }), + finish_reason: 'stop', + }), ], - messages, - } as unknown as DispatchBody); + }), + ); + }); - // When tools are provided, bindTools is called - expect(bindToolsMock).toHaveBeenCalledWith( + describe('tool binding', () => { + it('should bind tools and pass converted tool_choice to Anthropic', async () => { + mockAnthropicResponse(); + + await dispatcher.dispatch( + buildBody({ + tools: [ + { + type: 'function', + function: { + name: 'get_weather', + description: 'Get weather for a city', + parameters: { type: 'object', properties: { city: { type: 'string' } } }, + }, + }, + ], + messages: [{ role: 'user', content: 'What is the weather in Paris?' }], + tool_choice: 'auto', + }), + ); + + expect(anthropicBindToolsMock).toHaveBeenCalledWith( [ { type: 'function', - function: { name: 'notRemoteTool', parameters: {} }, + function: { + name: 'get_weather', + description: 'Get weather for a city', + parameters: { type: 'object', properties: { city: { type: 'string' } } }, + }, }, ], - { tool_choice: undefined }, + { tool_choice: 'auto' }, + ); + }); + + it('should enhance remote tools definition with full schema', async () => { + mockAnthropicResponse(); + const remoteTools = new RemoteTools(apiKeys); + const remoteDispatcher = new ProviderDispatcher(anthropicConfig, remoteTools); + + await remoteDispatcher.dispatch( + buildBody({ + tools: [ + { + type: 'function', + function: { name: remoteTools.tools[0].base.name, parameters: {} }, + }, + ], + }), + ); + + expect(anthropicBindToolsMock).toHaveBeenCalledWith( + [{ type: 'function', function: convertToOpenAIFunction(remoteTools.tools[0].base) }], + expect.anything(), ); - expect(invokeMock).toHaveBeenCalledWith(messages); + }); + }); + + describe('error handling', () => { + it('should wrap generic errors as AIUnprocessableError with cause', async () => { + const original = new Error('Anthropic API error'); + anthropicInvokeMock.mockRejectedValueOnce(original); + + const thrown = await dispatcher + .dispatch(buildBody({ messages: [{ role: 'user', content: 'Hello' }] })) + .catch(e => e); + + expect(thrown).toBeInstanceOf(AIUnprocessableError); + expect(thrown.message).toBe('Error while calling Anthropic: Anthropic API error'); + expect(thrown.cause).toBe(original); + }); + + it('should wrap 429 as AIUnprocessableError with rate limit message', async () => { + const error = Object.assign(new Error('Too many requests'), { status: 429 }); + anthropicInvokeMock.mockRejectedValueOnce(error); + + const thrown = await dispatcher + .dispatch(buildBody({ messages: [{ role: 'user', content: 'Hello' }] })) + .catch(e => e); + + expect(thrown).toBeInstanceOf(AIUnprocessableError); + expect(thrown.message).toBe('Anthropic rate limit exceeded: Too many requests'); + expect(thrown.cause).toBe(error); + }); + + it('should wrap 401 as AIUnprocessableError with auth message', async () => { + const error = Object.assign(new Error('Invalid API key'), { status: 401 }); + anthropicInvokeMock.mockRejectedValueOnce(error); + + const thrown = await dispatcher + .dispatch(buildBody({ messages: [{ role: 'user', content: 'Hello' }] })) + .catch(e => e); + + expect(thrown).toBeInstanceOf(AIUnprocessableError); + expect(thrown.message).toBe('Anthropic authentication failed: Invalid API key'); + expect(thrown.cause).toBe(error); + }); + + it('should handle non-Error throws gracefully', async () => { + anthropicInvokeMock.mockRejectedValueOnce('string error'); + + const thrown = await dispatcher + .dispatch(buildBody({ messages: [{ role: 'user', content: 'Hello' }] })) + .catch(e => e); + + expect(thrown).toBeInstanceOf(AIUnprocessableError); + expect(thrown.message).toBe('Error while calling Anthropic: string error'); + }); + + it('should not wrap conversion errors as provider errors', async () => { + await expect( + dispatcher.dispatch( + buildBody({ + messages: [{ role: 'tool', content: 'result' }], + }), + ), + ).rejects.toThrow(AIBadRequestError); }); }); }); diff --git a/packages/ai-proxy/test/router.test.ts b/packages/ai-proxy/test/router.test.ts index 8a64759b2..0a6d25587 100644 --- a/packages/ai-proxy/test/router.test.ts +++ b/packages/ai-proxy/test/router.test.ts @@ -1,8 +1,9 @@ -import type { DispatchBody, InvokeRemoteToolArgs, Route } from '../src'; +import type { DispatchBody, InvokeRemoteToolArgs } from '../src'; import type { Logger } from '@forestadmin/datasource-toolkit'; import { AIModelNotSupportedError, Router } from '../src'; import McpClient from '../src/mcp-client'; +import ProviderDispatcher from '../src/provider-dispatcher'; const invokeToolMock = jest.fn(); const toolDefinitionsForFrontend = [{ name: 'tool-name', description: 'tool-description' }]; @@ -20,15 +21,13 @@ jest.mock('../src/remote-tools', () => { const dispatchMock = jest.fn(); jest.mock('../src/provider-dispatcher', () => { return { - ProviderDispatcher: jest.fn().mockImplementation(() => ({ + __esModule: true, + default: jest.fn().mockImplementation(() => ({ dispatch: dispatchMock, })), }; }); -// eslint-disable-next-line import/first -import { ProviderDispatcher } from '../src/provider-dispatcher'; - const ProviderDispatcherMock = ProviderDispatcher as jest.MockedClass; jest.mock('../src/mcp-client', () => { @@ -123,15 +122,14 @@ describe('route', () => { it('falls back to first configuration with warning when ai-name not found', async () => { const mockLogger = jest.fn(); + const gpt4Config = { + name: 'gpt4', + provider: 'openai' as const, + apiKey: 'dev', + model: 'gpt-4o', + }; const router = new Router({ - aiConfigurations: [ - { - name: 'gpt4', - provider: 'openai', - apiKey: 'dev', - model: 'gpt-4o', - }, - ], + aiConfigurations: [gpt4Config], logger: mockLogger, }); @@ -143,9 +141,9 @@ describe('route', () => { expect(mockLogger).toHaveBeenCalledWith( 'Warn', - "AI configuration 'non-existent' not found. Falling back to 'gpt4'.", + "AI configuration 'non-existent' not found. Falling back to 'gpt4' (provider: openai, model: gpt-4o)", ); - expect(dispatchMock).toHaveBeenCalled(); + expect(ProviderDispatcherMock).toHaveBeenCalledWith(gpt4Config, expect.anything()); }); }); @@ -431,5 +429,84 @@ describe('route', () => { }), ).toThrow("Model 'gpt-4' does not support tools"); }); + + describe('should accept supported models', () => { + const supportedModels = [ + 'gpt-4o', + 'gpt-4o-mini', + 'gpt-4o-2024-08-06', + 'gpt-4-turbo', + 'gpt-4-turbo-2024-04-09', + 'gpt-4.1', + 'gpt-4.1-mini', + 'gpt-3.5-turbo', + 'gpt-3.5-turbo-0125', + 'gpt-3.5', + 'gpt-5', + 'unknown-model', + 'future-gpt-model', + ]; + + it.each(supportedModels)('%s', model => { + expect( + () => + new Router({ + aiConfigurations: [{ name: 'test', provider: 'openai', apiKey: 'dev', model }], + }), + ).not.toThrow(); + }); + }); + + it('should accept supported Anthropic configurations', () => { + expect( + () => + new Router({ + aiConfigurations: [ + { + name: 'claude', + provider: 'anthropic', + apiKey: 'key', + model: 'claude-3-5-sonnet-latest', + }, + ], + }), + ).not.toThrow(); + }); + + describe('should reject known unsupported OpenAI models', () => { + const unsupportedModels = [ + 'gpt-4', + 'gpt-4-0613', + 'o1', + 'o3-mini', + 'text-davinci-003', + 'davinci', + 'curie', + 'babbage', + 'ada', + ]; + + it.each(unsupportedModels)('%s', model => { + expect( + () => + new Router({ + aiConfigurations: [{ name: 'test', provider: 'openai', apiKey: 'dev', model }], + }), + ).toThrow(AIModelNotSupportedError); + }); + }); + + describe('should reject deprecated Anthropic models', () => { + const deprecatedModels = ['claude-3-7-sonnet-20250219', 'claude-3-haiku-20240307']; + + it.each(deprecatedModels)('%s', model => { + expect( + () => + new Router({ + aiConfigurations: [{ name: 'test', provider: 'anthropic', apiKey: 'dev', model }], + }), + ).toThrow(AIModelNotSupportedError); + }); + }); }); }); diff --git a/yarn.lock b/yarn.lock index 7c0b51e3b..f29862524 100644 --- a/yarn.lock +++ b/yarn.lock @@ -43,6 +43,13 @@ "@jridgewell/gen-mapping" "^0.3.0" "@jridgewell/trace-mapping" "^0.3.9" +"@anthropic-ai/sdk@^0.73.0": + version "0.73.0" + resolved "https://registry.yarnpkg.com/@anthropic-ai/sdk/-/sdk-0.73.0.tgz#ee4d744f3e0fbce3111edf3e8e67c6613fc6929f" + integrity sha512-URURVzhxXGJDGUGFunIOtBlSl7KWvZiAAKY/ttTkZAkXT9bTPqdk2eK0b8qqSxXpikh3QKPnPYpiyX98zf5ebw== + dependencies: + json-schema-to-ts "^3.1.1" + "@aws-crypto/crc32@3.0.0": version "3.0.0" resolved "https://registry.yarnpkg.com/@aws-crypto/crc32/-/crc32-3.0.0.tgz#07300eca214409c33e3ff769cd5697b57fdd38fa" @@ -1406,6 +1413,11 @@ dependencies: "@babel/helper-plugin-utils" "^7.22.5" +"@babel/runtime@^7.18.3": + version "7.28.6" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.28.6.tgz#d267a43cb1836dc4d182cce93ae75ba954ef6d2b" + integrity sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA== + "@babel/template@^7.22.15", "@babel/template@^7.3.3": version "7.22.15" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.22.15.tgz#09576efc3830f0430f4548ef971dde1350ef2f38" @@ -2397,6 +2409,14 @@ koa-compose "^4.1.0" path-to-regexp "^6.3.0" +"@langchain/anthropic@1.3.17": + version "1.3.17" + resolved "https://registry.yarnpkg.com/@langchain/anthropic/-/anthropic-1.3.17.tgz#441a4bc1e38c41760e8957e87ef8f549c9c62f09" + integrity sha512-5z/dqw/atLvH1hGtHrF9q4ZT3uSL34Y3XmSYKMtpgsibVnlFG2QmxikkjJnwAYGBTjrsTQuE7khrBPFkJSEucA== + dependencies: + "@anthropic-ai/sdk" "^0.73.0" + zod "^3.25.76 || ^4" + "@langchain/classic@1.0.9": version "1.0.9" resolved "https://registry.yarnpkg.com/@langchain/classic/-/classic-1.0.9.tgz#bdb19539db47469370727f32e1bf63c52777426b" @@ -11606,6 +11626,14 @@ json-schema-ref-resolver@^1.0.1: dependencies: fast-deep-equal "^3.1.3" +json-schema-to-ts@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/json-schema-to-ts/-/json-schema-to-ts-3.1.1.tgz#81f3acaf5a34736492f6f5f51870ef9ece1ca853" + integrity sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g== + dependencies: + "@babel/runtime" "^7.18.3" + ts-algebra "^2.0.0" + json-schema-traverse@^0.4.1: version "0.4.1" resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" @@ -17415,6 +17443,11 @@ trough@^1.0.0: resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.5.tgz#b8b639cefad7d0bb2abd37d433ff8293efa5f406" integrity sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA== +ts-algebra@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ts-algebra/-/ts-algebra-2.0.0.tgz#4e3e0953878f26518fce7f6bb115064a65388b7a" + integrity sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw== + ts-invariant@^0.4.0: version "0.4.4" resolved "https://registry.yarnpkg.com/ts-invariant/-/ts-invariant-0.4.4.tgz#97a523518688f93aafad01b0e80eb803eb2abd86"