Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -50,5 +50,6 @@
"javascriptreact",
"typescript",
"typescriptreact"
]
],
"typescript.autoClosingTags": false
}
9 changes: 8 additions & 1 deletion backend/app/agent/agent_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,11 +135,18 @@ def agent_model(
)
model_platform_enum = None

# Anthropic SDK adds /v1 to every endpoint path internally, so a user-
# provided base URL must NOT include /v1 (unlike OpenAI-compatible APIs).
model_url = effective_config["api_url"]
if effective_config["model_platform"].lower() == "anthropic" and model_url:
stripped = model_url.rstrip("/")
if stripped.endswith("/v1"):
model_url = stripped[:-3]
model = ModelFactory.create(
model_platform=effective_config["model_platform"],
model_type=effective_config["model_type"],
api_key=effective_config["api_key"],
url=effective_config["api_url"],
url=model_url,
model_config_dict=model_config or None,
timeout=600, # 10 minutes
**init_params,
Expand Down
62 changes: 62 additions & 0 deletions backend/app/component/model_suggestions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# ========= Copyright 2025-2026 @ Eigent.ai All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2025-2026 @ Eigent.ai All Rights Reserved. =========

from camel.types import ModelType

# Maps platform name (lowercase) to model-name prefixes for filtering.
# An empty list means the platform accepts any model.
MODEL_PREFIXES: dict[str, list[str]] = {
"openai": ["gpt-", "o1", "o3", "o4", "chatgpt-"],
"anthropic": ["claude-"],
"gemini": ["gemini-"],
"deepseek": ["deepseek"],
"qwen": ["qwen"],
"minimax": ["minimax"],
"moonshot": ["moonshot", "kimi"],
"azure": ["gpt-", "o1", "o3", "o4"],
"openai-compatible-model": [],
"openrouter": [],
"bedrock": [],
"ollama": [],
"vllm": [],
"sglang": [],
"lmstudio": [],
"modelark": [],
"zai": [],
}


def get_model_type_suggestions(platform: str | None) -> list[str]:
"""Return CAMEL model names for the given platform, newest first.

Filters by platform prefix when known; returns all models otherwise.

Args:
platform (str | None): Platform name (e.g. 'openai', 'anthropic').
Case-insensitive. None or empty returns all models.

Returns:
list[str]: Model name strings, newest (by CAMEL enum order) first.
"""
platform_key = (platform or "").lower()
# Reversed enum order puts newer models (defined later in CAMEL) first
all_camel = list(reversed([mt.value for mt in ModelType]))
prefixes = MODEL_PREFIXES.get(platform_key)
if prefixes:
return [
m
for m in all_camel
if any(m.lower().startswith(p) for p in prefixes)
]
return all_camel
12 changes: 12 additions & 0 deletions backend/app/component/model_validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,12 @@ def create_agent(
raise ValueError(f"Invalid model_type: {model_type}")
if platform is None:
raise ValueError(f"Invalid model_platform: {model_platform}")
# Anthropic SDK adds /v1 to every endpoint path internally, so a user-
# provided base URL must NOT include /v1 (unlike OpenAI-compatible APIs).
if platform.lower() == "anthropic" and url:
stripped = url.rstrip("/")
if stripped.endswith("/v1"):
url = stripped[:-3]
model = ModelFactory.create(
model_platform=platform,
model_type=mtype,
Expand Down Expand Up @@ -321,6 +327,12 @@ def validate_model_with_details(

# Stage 2: Model Creation
result.validation_stages[ValidationStage.MODEL_CREATION] = False
# Anthropic SDK adds /v1 to every endpoint path internally, so a user-
# provided base URL must NOT include /v1 (unlike OpenAI-compatible APIs).
if model_platform.lower() == "anthropic" and url:
stripped = url.rstrip("/")
if stripped.endswith("/v1"):
url = stripped[:-3]
try:
logger.debug(
"Creating model",
Expand Down
26 changes: 26 additions & 0 deletions backend/app/controller/model_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from pydantic import BaseModel, Field

from app.component.error_format import normalize_error_to_openai_format
from app.component.model_suggestions import get_model_type_suggestions
from app.component.model_validation import (
ValidationErrorType,
ValidationStage,
Expand Down Expand Up @@ -289,3 +290,28 @@ async def validate_model(request: ValidateModelRequest):
},
},
)


class ModelTypeSuggestionRequest(BaseModel):
platform: str | None = Field(None, description="Model platform")


class ModelTypeSuggestionResponse(BaseModel):
model_types: list[str] = Field(
..., description="List of available model types"
)


@router.post("/model/types")
async def get_model_types(request: ModelTypeSuggestionRequest):
"""Return CAMEL model types for the given platform, newest first."""
try:
return ModelTypeSuggestionResponse(
model_types=get_model_type_suggestions(request.platform)
)
except Exception as e:
logger.error("Error getting model types: %s", e, exc_info=True)
raise HTTPException(
status_code=500,
detail={"message": f"Failed to get model types: {e}"},
)
10 changes: 4 additions & 6 deletions backend/app/service/chat_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,12 +341,10 @@ async def step_solve(options: Chat, request: Request, task_lock: TaskLock):
extra={"project_id": options.project_id, "task_id": options.task_id},
)
logger.info("=" * 80)
logger.debug(
"Step solve options",
extra={
"task_id": options.task_id,
"model_platform": options.model_platform,
},
logger.info(
"Step solve options: platform=%s type=%s",
options.model_platform,
options.model_type,
)

while True:
Expand Down
178 changes: 178 additions & 0 deletions backend/tests/app/component/test_model_suggestions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,178 @@
# ========= Copyright 2025-2026 @ Eigent.ai All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2025-2026 @ Eigent.ai All Rights Reserved. =========

from unittest.mock import MagicMock, patch

import pytest

from app.component.model_suggestions import (
MODEL_PREFIXES,
get_model_type_suggestions,
)

# Controlled model list (oldest → newest, matching CAMEL enum definition order)
_MOCK_MODELS = [
"gpt-3.5-turbo",
"gpt-4o",
"claude-3-opus",
"gemini-pro",
"deepseek-chat",
"qwen-turbo",
]


def _make_mock_enum(values: list[str]):
mocks = []
for v in values:
m = MagicMock()
m.value = v
mocks.append(m)
return mocks


@pytest.fixture(autouse=True)
def patch_model_type():
with patch(
"app.component.model_suggestions.ModelType",
new=_make_mock_enum(_MOCK_MODELS),
):
yield


def test_model_prefixes_contains_known_platforms():
assert "openai" in MODEL_PREFIXES
assert "anthropic" in MODEL_PREFIXES
assert "gemini" in MODEL_PREFIXES
assert "deepseek" in MODEL_PREFIXES
assert "qwen" in MODEL_PREFIXES


def test_model_prefixes_empty_list_for_open_platforms():
for platform in (
"openai-compatible-model",
"openrouter",
"ollama",
"vllm",
):
assert MODEL_PREFIXES[platform] == [], (
f"{platform} should have empty prefix list"
)


def test_returns_list():
assert isinstance(get_model_type_suggestions(None), list)


def test_all_entries_are_strings():
result = get_model_type_suggestions(None)
assert all(isinstance(m, str) for m in result)


def test_no_duplicates():
result = get_model_type_suggestions(None)
assert len(result) == len(set(result))


def test_none_platform_returns_all_models():
result = get_model_type_suggestions(None)
assert set(result) == set(_MOCK_MODELS)


def test_empty_string_platform_returns_all_models():
result = get_model_type_suggestions("")
assert set(result) == set(_MOCK_MODELS)


def test_unknown_platform_returns_all_models():
result = get_model_type_suggestions("totally-unknown-xyz")
assert set(result) == set(_MOCK_MODELS)


def test_newest_first_ordering():
result = get_model_type_suggestions(None)
assert result == list(reversed(_MOCK_MODELS))


def test_openai_platform_includes_gpt_models():
result = get_model_type_suggestions("openai")
assert "gpt-3.5-turbo" in result
assert "gpt-4o" in result


def test_openai_platform_excludes_other_models():
result = get_model_type_suggestions("openai")
assert "claude-3-opus" not in result
assert "gemini-pro" not in result
assert "deepseek-chat" not in result


def test_anthropic_platform_includes_claude_models():
result = get_model_type_suggestions("anthropic")
assert "claude-3-opus" in result


def test_anthropic_platform_excludes_other_models():
result = get_model_type_suggestions("anthropic")
assert "gpt-4o" not in result
assert "gemini-pro" not in result


def test_gemini_platform_includes_gemini_models():
result = get_model_type_suggestions("gemini")
assert "gemini-pro" in result


def test_gemini_platform_excludes_other_models():
result = get_model_type_suggestions("gemini")
assert "gpt-4o" not in result


def test_deepseek_platform_includes_deepseek_models():
result = get_model_type_suggestions("deepseek")
assert "deepseek-chat" in result


def test_qwen_platform_includes_qwen_models():
result = get_model_type_suggestions("qwen")
assert "qwen-turbo" in result


def test_platform_lookup_is_case_insensitive():
assert get_model_type_suggestions("OPENAI") == get_model_type_suggestions(
"openai"
)
assert get_model_type_suggestions(
"Anthropic"
) == get_model_type_suggestions("anthropic")


def test_openai_compatible_model_returns_all_models():
result = get_model_type_suggestions("openai-compatible-model")
assert set(result) == set(_MOCK_MODELS)


def test_ollama_returns_all_models():
result = get_model_type_suggestions("ollama")
assert set(result) == set(_MOCK_MODELS)


def test_vllm_returns_all_models():
result = get_model_type_suggestions("vllm")
assert set(result) == set(_MOCK_MODELS)


def test_filtered_result_is_subset_of_all():
all_models = set(get_model_type_suggestions(None))
openai_models = set(get_model_type_suggestions("openai"))
assert openai_models.issubset(all_models)
Loading