Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
58 commits
Select commit Hold shift + click to select a range
dec0642
fix(ai-proxy): validate model tool support at Router init (fail fast)
Feb 5, 2026
c781b17
test(ai-proxy): add end-to-end integration tests with real OpenAI API
Feb 4, 2026
73f5df0
test(ai-proxy): add comprehensive integration tests for production re…
Feb 4, 2026
e9ab433
fix(ai-proxy): disable langchain retries by default
Feb 4, 2026
b1e54e8
test(ai-proxy): improve integration test quality based on PR review
Feb 4, 2026
c30d4f5
test(ai-proxy): fix tool_choice with specific function name test
Feb 4, 2026
379f2da
refactor(ai-proxy): use generic error message for AINotConfiguredError
Feb 4, 2026
af8b852
fix(ai-proxy): properly close MCP server in tests to avoid forceExit
Feb 4, 2026
c9adb2f
test(ai-proxy): reduce test timeouts from 30s to 10s
Feb 4, 2026
6a56bf0
feat(ai-proxy): add Anthropic LLM provider support
Jan 23, 2026
ea3ab0f
fix(ai-proxy): handle null content and JSON parse errors in Anthropic…
Jan 23, 2026
c1a7baf
test(ai-proxy): add Anthropic integration tests
Feb 4, 2026
927c09c
refactor(ai-proxy): consolidate LLM integration tests
Feb 4, 2026
ee6474c
fix(ai-proxy): fix lint errors in provider-dispatcher
Feb 4, 2026
5f1bfbd
test(ai-proxy): increase timeout for MCP tool enrichment test
Feb 4, 2026
d49cdb3
test(ai-proxy): add model compatibility tests for all OpenAI and Anth…
Feb 4, 2026
94e216f
fix: package
Feb 5, 2026
f7e3589
refactor(ai-proxy): use Anthropic SDK's Model type instead of custom …
Feb 5, 2026
28f6b38
fix(ai-proxy): fix ANTHROPIC_MODELS_TO_TEST typo
Feb 5, 2026
91433d8
refactor(ai-proxy): move isModelSupportingTools to router.ts
Feb 5, 2026
57e8ff1
refactor(ai-proxy): extract isModelSupportingTools tests to dedicated…
Feb 5, 2026
647c275
fix(ai-proxy): improve Anthropic error handling and input validation
Feb 6, 2026
530edda
revert(ai-proxy): restore llm.integration.test.ts to main version
Feb 6, 2026
a19aa3d
test(ai-proxy): add Anthropic model compatibility integration tests
Feb 7, 2026
0890539
fix(ai-proxy): move @anthropic-ai/sdk jest module mapper to root config
Feb 11, 2026
ef13abc
test(ai-proxy): add missing coverage for Anthropic provider edge cases
Feb 11, 2026
88329e4
feat(ai-proxy): add parallel_tool_calls support for Anthropic and fac…
Feb 13, 2026
01e52c1
fix(ai-proxy): address PR review findings
Feb 13, 2026
a524f2d
chore: update yarn.lock after removing root @langchain/anthropic dep
Feb 13, 2026
83ccae7
test(ai-proxy): improve test readability and reliability
Feb 13, 2026
a3e8620
fix(ai-proxy): fix import order in router tests
Feb 13, 2026
badde36
refactor(ai-proxy): remove unnecessary non-null assertions in provide…
Feb 13, 2026
03dc04b
feat(ai-proxy): add Anthropic deprecated model validation
Feb 13, 2026
b9d724c
chore(ai-proxy): remove JSDoc comments from supported-models
Feb 13, 2026
aae1234
chore(ai-proxy): add guidance comment for unsupported model lists
Feb 13, 2026
a974bd0
chore(ai-proxy): remove redundant @anthropic-ai/sdk jest mapper override
Feb 13, 2026
7ebdbb5
refactor: move @anthropic-ai/sdk jest mapper from root to ai-proxy
Feb 13, 2026
87c1930
fix(ai-proxy): fix model autocomplete for OpenAI and Anthropic configs
Feb 14, 2026
2753d3e
fix(ai-proxy): merge multiple system messages for Anthropic compatibi…
Feb 16, 2026
ff984a6
refactor(ai-proxy): extract LangChainAdapter for format conversions
Feb 16, 2026
7dffe8d
refactor(ai-proxy): extract AnthropicAdapter from LangChainAdapter
Feb 16, 2026
8bef864
refactor(ai-proxy): use object param for AnthropicAdapter.convertTool…
Feb 16, 2026
91ffafd
fix(ai-proxy): update imports for default export of ProviderDispatcher
Feb 16, 2026
e5681b5
fix(ai-proxy): add deprecated/streaming-only Anthropic models to unsu…
Feb 16, 2026
b6d053e
fix(ai-proxy): use claude-haiku-4-5 in integration tests
Feb 16, 2026
5691f78
fix(ai-proxy): remove haiku-3-5 from unsupported models
Feb 16, 2026
24f75d1
fix(ai-proxy): re-add haiku-3-5 to unsupported models (EOL 2026-02-19)
Feb 16, 2026
eb59d4a
refactor(ai-proxy): switch to default exports and clean up JSDoc
Feb 16, 2026
77c093a
fix(ai-proxy): add return-await in try-catch context for proper error…
Feb 16, 2026
0d0574e
fix(ai-proxy): update router test mock for default export
Feb 16, 2026
313edc6
refactor(ai-proxy): encapsulate Anthropic bindTools and clean up erro…
Feb 16, 2026
e781f81
fix(ai-proxy): narrow try-catch scope, improve error context and type…
Feb 16, 2026
b160ed6
refactor(ai-proxy): move Anthropic bindTools outside try-catch
Feb 16, 2026
0a453fe
refactor(ai-proxy): simplify return logic, add explicit types, conden…
Feb 17, 2026
89fb565
fix(ai-proxy): include provider and model in fallback warning message
Feb 17, 2026
be51327
fix(ai-proxy): update router test for new fallback warning message
Feb 17, 2026
e70b161
docs(ai-proxy): add TODO for precise provider error types
Feb 17, 2026
842dbb2
fix(ai-proxy): blacklist us-40-51r-vm-ev3 non-chat OpenAI model
Feb 17, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ jobs:
run: yarn workspace @forestadmin/ai-proxy test --testPathPattern='llm.integration'
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}

send-coverage:
name: Send Coverage
Expand Down
9 changes: 9 additions & 0 deletions packages/ai-proxy/jest.config.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,18 @@
/* eslint-disable import/no-relative-packages */
import path from 'path';

import jestConfig from '../../jest.config';

// Jest < 30 doesn't resolve wildcard exports in package.json.
// @anthropic-ai/sdk uses "./lib/*" exports that need this workaround.
const anthropicSdkDir = path.dirname(require.resolve('@anthropic-ai/sdk'));

export default {
...jestConfig,
collectCoverageFrom: ['<rootDir>/src/**/*.ts', '!<rootDir>/src/examples/**'],
testMatch: ['<rootDir>/test/**/*.test.ts'],
setupFiles: ['<rootDir>/test/setup-env.ts'],
moduleNameMapper: {
'^@anthropic-ai/sdk/(.*)$': `${anthropicSdkDir}/$1`,
},
};
1 change: 1 addition & 0 deletions packages/ai-proxy/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
"dependencies": {
"@forestadmin/agent-toolkit": "1.0.0",
"@forestadmin/datasource-toolkit": "1.50.1",
"@langchain/anthropic": "1.3.17",
"@langchain/community": "1.1.4",
"@langchain/core": "1.1.15",
"@langchain/langgraph": "^1.1.0",
Expand Down
87 changes: 87 additions & 0 deletions packages/ai-proxy/src/anthropic-adapter.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import type { OpenAIMessage } from './langchain-adapter';
import type { ChatCompletionTool, ChatCompletionToolChoice } from './provider';
import type { ChatAnthropic } from '@langchain/anthropic';
import type { BaseMessage } from '@langchain/core/messages';

import { LangChainAdapter } from './langchain-adapter';

/**
* Extended tool_choice type for Anthropic.
*
* LangChain's AnthropicToolChoice doesn't include `disable_parallel_tool_use`,
* but the Anthropic API supports it and LangChain passes objects through directly.
*/
type AnthropicToolChoiceWithParallelControl =
| 'auto'
| 'any'
| 'none'
| { type: 'tool'; name: string; disable_parallel_tool_use?: boolean }
| { type: 'auto' | 'any'; disable_parallel_tool_use: boolean };

export default class AnthropicAdapter {
static convertMessages(messages: OpenAIMessage[]): BaseMessage[] {
return LangChainAdapter.convertMessages(AnthropicAdapter.mergeSystemMessages(messages));
}

/** Cast `as string` works around LangChain's AnthropicToolChoice missing `disable_parallel_tool_use`. */
static bindTools(
model: ChatAnthropic,
tools: ChatCompletionTool[],
{
toolChoice,
parallelToolCalls,
}: { toolChoice?: ChatCompletionToolChoice; parallelToolCalls?: boolean },
): ChatAnthropic {
return model.bindTools(tools, {
tool_choice: AnthropicAdapter.convertToolChoice({ toolChoice, parallelToolCalls }) as string,
}) as ChatAnthropic;
}

/**
* Convert OpenAI tool_choice to Anthropic format, applying parallel tool restriction.
*
* Converts to LangChain format first, then applies `disable_parallel_tool_use`
* when `parallelToolCalls` is explicitly `false` (not just falsy — `undefined` means
* no restriction).
*/
private static convertToolChoice({
toolChoice,
parallelToolCalls,
}: {
toolChoice?: ChatCompletionToolChoice;
parallelToolCalls?: boolean;
} = {}): AnthropicToolChoiceWithParallelControl | undefined {
const base = LangChainAdapter.convertToolChoice(toolChoice);

if (parallelToolCalls !== false) return base;

// Anthropic requires object form to set disable_parallel_tool_use
if (base === undefined || base === 'auto') {
return { type: 'auto', disable_parallel_tool_use: true };
}

if (base === 'any') {
return { type: 'any', disable_parallel_tool_use: true };
}

if (base === 'none') return 'none';

return { ...base, disable_parallel_tool_use: true };
}

/**
* Merge all system messages into a single one placed first.
*
* Anthropic only allows a single system message at the beginning of the conversation.
*/
private static mergeSystemMessages(messages: OpenAIMessage[]): OpenAIMessage[] {
const systemContents = messages.filter(m => m.role === 'system').map(m => m.content || '');

if (systemContents.length <= 1) return messages;

const merged: OpenAIMessage = { role: 'system', content: systemContents.join('\n\n') };
const nonSystem = messages.filter(m => m.role !== 'system');

return [merged, ...nonSystem];
}
}
14 changes: 5 additions & 9 deletions packages/ai-proxy/src/errors.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,12 @@ export class AINotFoundError extends NotFoundError {
}

export class AIUnprocessableError extends UnprocessableError {
constructor(message: string) {
readonly cause?: Error;

constructor(message: string, options?: { cause?: Error }) {
super(message);
this.name = 'AIUnprocessableError';
if (options?.cause) this.cause = options.cause;
}
}

Expand All @@ -55,17 +58,10 @@ export class AINotConfiguredError extends AIError {
}
}

export class OpenAIUnprocessableError extends AIUnprocessableError {
constructor(message: string) {
super(message);
this.name = 'OpenAIError';
}
}

export class AIToolUnprocessableError extends AIUnprocessableError {
constructor(message: string) {
super(message);
this.name = 'AIToolError';
this.name = 'AIToolUnprocessableError';
}
}

Expand Down
1 change: 1 addition & 0 deletions packages/ai-proxy/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import type { McpConfiguration } from './mcp-client';
import McpConfigChecker from './mcp-config-checker';

export { createAiProvider } from './create-ai-provider';
export { default as ProviderDispatcher } from './provider-dispatcher';
export * from './provider-dispatcher';
export * from './remote-tools';
export * from './router';
Expand Down
189 changes: 189 additions & 0 deletions packages/ai-proxy/src/langchain-adapter.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
import type { ChatCompletionResponse, ChatCompletionToolChoice } from './provider';
import type { BaseMessage } from '@langchain/core/messages';

import { AIMessage, HumanMessage, SystemMessage, ToolMessage } from '@langchain/core/messages';
import crypto from 'crypto';

import { AIBadRequestError } from './errors';

interface OpenAISystemMessage {
role: 'system';
content: string | null;
}

interface OpenAIUserMessage {
role: 'user';
content: string | null;
}

interface OpenAIAssistantMessage {
role: 'assistant';
content: string | null;
tool_calls?: Array<{
id: string;
function: {
name: string;
arguments: string;
};
}>;
}

interface OpenAIToolMessage {
role: 'tool';
content: string | null;
tool_call_id: string;
}

export type OpenAIMessage =
| OpenAISystemMessage
| OpenAIUserMessage
| OpenAIAssistantMessage
| OpenAIToolMessage;

export type LangChainToolChoice =
| 'auto'
| 'any'
| 'none'
| { type: 'tool'; name: string }
| undefined;

/** Handles generic format conversions between OpenAI and LangChain. */
export class LangChainAdapter {
/** Convert OpenAI-format messages to LangChain messages. */
static convertMessages(messages: OpenAIMessage[]): BaseMessage[] {
const result: BaseMessage[] = [];

for (const msg of messages) {
switch (msg.role) {
case 'system':
result.push(new SystemMessage(msg.content || ''));
break;
case 'user':
result.push(new HumanMessage(msg.content || ''));
break;
case 'assistant':
if (msg.tool_calls) {
result.push(
new AIMessage({
content: msg.content || '',
tool_calls: msg.tool_calls.map(tc => ({
id: tc.id,
name: tc.function.name,
args: LangChainAdapter.parseToolArguments(
tc.function.name,
tc.function.arguments,
),
})),
}),
);
} else {
result.push(new AIMessage(msg.content || ''));
}

break;
case 'tool':
if (!msg.tool_call_id) {
throw new AIBadRequestError('Tool message is missing required "tool_call_id" field.');
}

result.push(
new ToolMessage({
content: msg.content || '',
tool_call_id: msg.tool_call_id,
}),
);
break;
default:
throw new AIBadRequestError(
`Unsupported message role '${
(msg as { role: string }).role
}'. Expected: system, user, assistant, or tool.`,
);
}
}

return result;
}

/** Convert a LangChain AIMessage to an OpenAI-compatible ChatCompletionResponse. */
static convertResponse(response: AIMessage, modelName: string | null): ChatCompletionResponse {
const toolCalls = response.tool_calls?.map(tc => ({
id: tc.id || `call_${crypto.randomUUID()}`,
type: 'function' as const,
function: {
name: tc.name,
arguments: JSON.stringify(tc.args),
},
}));

const usageMetadata = response.usage_metadata as
| { input_tokens?: number; output_tokens?: number; total_tokens?: number }
| undefined;

return {
id: response.id || `msg_${crypto.randomUUID()}`,
object: 'chat.completion',
created: Math.floor(Date.now() / 1000),
model: modelName,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: LangChainAdapter.extractTextContent(response.content),
refusal: null,
tool_calls: toolCalls?.length ? toolCalls : undefined,
},
finish_reason: toolCalls?.length ? 'tool_calls' : 'stop',
logprobs: null,
},
],
usage: {
prompt_tokens: usageMetadata?.input_tokens ?? 0,
completion_tokens: usageMetadata?.output_tokens ?? 0,
total_tokens: usageMetadata?.total_tokens ?? 0,
},
};
}

/** Convert OpenAI tool_choice to LangChain format. */
static convertToolChoice(toolChoice: ChatCompletionToolChoice | undefined): LangChainToolChoice {
if (!toolChoice) return undefined;
if (toolChoice === 'auto') return 'auto';
if (toolChoice === 'none') return 'none';
if (toolChoice === 'required') return 'any';

if (typeof toolChoice === 'object' && toolChoice.type === 'function') {
return { type: 'tool', name: toolChoice.function.name };
}

throw new AIBadRequestError(
`Unsupported tool_choice value. Expected: 'auto', 'none', 'required', or {type: 'function', function: {name: '...'}}.`,
);
}

private static extractTextContent(content: AIMessage['content']): string | null {
if (typeof content === 'string') return content || null;

if (Array.isArray(content)) {
const text = content
.filter(block => block.type === 'text')
.map(block => ('text' in block ? block.text : ''))
.join('');

return text || null;
}

return null;
}

private static parseToolArguments(toolName: string, args: string): Record<string, unknown> {
try {
return JSON.parse(args);
} catch {
throw new AIBadRequestError(
`Invalid JSON in tool_calls arguments for tool '${toolName}': ${args}`,
);
}
}
}
Loading
Loading