Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .env-template
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ INTERNAL_KEY=<internal key for worker-to-backend authentication>
EMBEDDINGS_BASE_URL=
EMBEDDINGS_KEY=

#For MiniMax (optional - for using MiniMax models)
MINIMAX_API_KEY=

#For Azure (you can delete it if you don't use Azure)
OPENAI_API_BASE=
OPENAI_API_VERSION=
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
<li><strong>🔑 Streamlined API Keys:</strong> Generate keys linked to your settings, documents, and models, simplifying chatbot and integration setup.</li>
<li><strong>🔗 Actionable Tooling:</strong> Connect to APIs, tools, and other services to enable LLM actions.</li>
<li><strong>🧩 Pre-built Integrations:</strong> Use readily available HTML/React chat widgets, search tools, Discord/Telegram bots, and more.</li>
<li><strong>🔌 Flexible Deployment:</strong> Works with major LLMs (OpenAI, Google, Anthropic) and local models (Ollama, llama_cpp).</li>
<li><strong>🔌 Flexible Deployment:</strong> Works with major LLMs (OpenAI, Google, Anthropic, MiniMax) and local models (Ollama, llama_cpp).</li>
<li><strong>🏢 Secure & Scalable:</strong> Run privately and securely with Kubernetes support, designed for enterprise-grade reliability.</li>
</ul>

Expand Down
28 changes: 28 additions & 0 deletions application/core/model_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,34 @@
),
]

MINIMAX_ATTACHMENTS = IMAGE_ATTACHMENTS

MINIMAX_MODELS = [
AvailableModel(
id="MiniMax-M2.5",
provider=ModelProvider.MINIMAX,
display_name="MiniMax M2.5",
description="MiniMax flagship model with 204K context window",
capabilities=ModelCapabilities(
supports_tools=True,
supported_attachment_types=MINIMAX_ATTACHMENTS,
context_window=204800,
),
),
AvailableModel(
id="MiniMax-M2.5-highspeed",
provider=ModelProvider.MINIMAX,
display_name="MiniMax M2.5 Highspeed",
description="Fast, cost-effective MiniMax model with 204K context window",
capabilities=ModelCapabilities(
supports_tools=True,
supported_attachment_types=MINIMAX_ATTACHMENTS,
context_window=204800,
),
),
]


AZURE_OPENAI_MODELS = [
AvailableModel(
id="azure-gpt-4",
Expand Down
20 changes: 20 additions & 0 deletions application/core/model_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ class ModelProvider(str, Enum):
PREMAI = "premai"
SAGEMAKER = "sagemaker"
NOVITA = "novita"
MINIMAX = "minimax"


@dataclass
Expand Down Expand Up @@ -118,6 +119,10 @@ def _load_models(self):
settings.LLM_PROVIDER == "huggingface" and settings.API_KEY
):
self._add_huggingface_models(settings)
if settings.MINIMAX_API_KEY or (
settings.LLM_PROVIDER == "minimax" and settings.API_KEY
):
self._add_minimax_models(settings)
# Default model selection
if settings.LLM_NAME:
# Parse LLM_NAME (may be comma-separated)
Expand Down Expand Up @@ -273,6 +278,21 @@ def _add_huggingface_models(self, settings):
)
self.models[model_id] = model

def _add_minimax_models(self, settings):
from application.core.model_configs import MINIMAX_MODELS

if settings.MINIMAX_API_KEY:
for model in MINIMAX_MODELS:
self.models[model.id] = model
return
if settings.LLM_PROVIDER == "minimax" and settings.LLM_NAME:
for model in MINIMAX_MODELS:
if model.id == settings.LLM_NAME:
self.models[model.id] = model
return
for model in MINIMAX_MODELS:
self.models[model.id] = model

def _parse_model_names(self, llm_name: str) -> List[str]:
"""
Parse LLM_NAME which may contain comma-separated model names.
Expand Down
1 change: 1 addition & 0 deletions application/core/model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ def get_api_key_for_provider(provider: str) -> Optional[str]:
"google": settings.GOOGLE_API_KEY,
"groq": settings.GROQ_API_KEY,
"huggingface": settings.HUGGINGFACE_API_KEY,
"minimax": settings.MINIMAX_API_KEY,
"azure_openai": settings.API_KEY,
"docsgpt": None,
"llama.cpp": None,
Expand Down
2 changes: 2 additions & 0 deletions application/core/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ class Settings(BaseSettings):
GROQ_API_KEY: Optional[str] = None
HUGGINGFACE_API_KEY: Optional[str] = None
OPEN_ROUTER_API_KEY: Optional[str] = None
MINIMAX_API_KEY: Optional[str] = None

OPENAI_API_BASE: Optional[str] = None # azure openai api base url
OPENAI_API_VERSION: Optional[str] = None # azure openai api version
Expand Down Expand Up @@ -174,6 +175,7 @@ class Settings(BaseSettings):
"GOOGLE_API_KEY",
"GROQ_API_KEY",
"HUGGINGFACE_API_KEY",
"MINIMAX_API_KEY",
"EMBEDDINGS_KEY",
"FALLBACK_LLM_API_KEY",
"QDRANT_API_KEY",
Expand Down
2 changes: 2 additions & 0 deletions application/llm/llm_creator.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from application.llm.openai import AzureOpenAILLM, OpenAILLM
from application.llm.premai import PremAILLM
from application.llm.sagemaker import SagemakerAPILLM
from application.llm.minimax import MiniMaxLLM
from application.llm.open_router import OpenRouterLLM

logger = logging.getLogger(__name__)
Expand All @@ -27,6 +28,7 @@ class LLMCreator:
"google": GoogleLLM,
"novita": NovitaLLM,
"openrouter": OpenRouterLLM,
"minimax": MiniMaxLLM,
}

@classmethod
Expand Down
74 changes: 74 additions & 0 deletions application/llm/minimax.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
from application.core.settings import settings
from application.llm.openai import OpenAILLM

MINIMAX_BASE_URL = "https://api.minimax.io/v1"


class MiniMaxLLM(OpenAILLM):
def __init__(self, api_key=None, user_api_key=None, base_url=None, *args, **kwargs):
super().__init__(
api_key=api_key or settings.MINIMAX_API_KEY or settings.API_KEY,
user_api_key=user_api_key,
base_url=base_url or MINIMAX_BASE_URL,
*args,
**kwargs,
)

@staticmethod
def _clamp_temperature(kwargs):
"""Clamp temperature to MiniMax's valid range (0.0, 1.0]."""
if "temperature" in kwargs:
temp = kwargs["temperature"]
if temp is not None:
temp = float(temp)
if temp <= 0:
kwargs["temperature"] = 0.01
elif temp > 1.0:
kwargs["temperature"] = 1.0

def _raw_gen(
self,
baseself,
model,
messages,
stream=False,
tools=None,
response_format=None,
**kwargs,
):
self._clamp_temperature(kwargs)
# MiniMax does not support response_format; drop it
return super()._raw_gen(
baseself,
model,
messages,
stream=stream,
tools=tools,
response_format=None,
**kwargs,
)

def _raw_gen_stream(
self,
baseself,
model,
messages,
stream=True,
tools=None,
response_format=None,
**kwargs,
):
self._clamp_temperature(kwargs)
# MiniMax does not support response_format; drop it
return super()._raw_gen_stream(
baseself,
model,
messages,
stream=stream,
tools=tools,
response_format=None,
**kwargs,
)

def _supports_structured_output(self):
return False
74 changes: 74 additions & 0 deletions application/tts/minimax_tts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import base64
import json
import logging

import requests

from application.core.settings import settings
from application.tts.base import BaseTTS

logger = logging.getLogger(__name__)

MINIMAX_TTS_BASE_URL = "https://api.minimax.io"

MINIMAX_TTS_VOICES = [
"English_Graceful_Lady",
"English_Insightful_Speaker",
"English_radiant_girl",
"English_Persuasive_Man",
"English_Lucky_Robot",
"English_expressive_narrator",
]


class MiniMaxTTS(BaseTTS):
def __init__(self):
self.api_key = settings.MINIMAX_API_KEY
self.base_url = MINIMAX_TTS_BASE_URL

def text_to_speech(self, text):
lang = "en"
url = f"{self.base_url}/v1/t2a_v2"

payload = {
"model": "speech-2.8-hd",
"text": text,
"stream": False,
"voice_setting": {
"voice_id": "English_Graceful_Lady",
"speed": 1,
"vol": 1,
"pitch": 0,
},
"audio_setting": {
"sample_rate": 32000,
"bitrate": 128000,
"format": "mp3",
"channel": 1,
},
}

response = requests.post(
url,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}",
},
json=payload,
timeout=30,
)
response.raise_for_status()

result = response.json()
status_code = result.get("base_resp", {}).get("status_code", -1)
if status_code != 0:
status_msg = result.get("base_resp", {}).get("status_msg", "unknown error")
raise RuntimeError(f"MiniMax TTS error ({status_code}): {status_msg}")

hex_audio = result.get("data", {}).get("audio", "")
if not hex_audio:
raise RuntimeError("MiniMax TTS returned empty audio data")

audio_bytes = bytes.fromhex(hex_audio)
audio_base64 = base64.b64encode(audio_bytes).decode("utf-8")
return audio_base64, lang
2 changes: 2 additions & 0 deletions application/tts/tts_creator.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from application.tts.google_tts import GoogleTTS
from application.tts.elevenlabs import ElevenlabsTTS
from application.tts.minimax_tts import MiniMaxTTS
from application.tts.base import BaseTTS


Expand All @@ -8,6 +9,7 @@ class TTSCreator:
tts_providers = {
"google_tts": GoogleTTS,
"elevenlabs": ElevenlabsTTS,
"minimax_tts": MiniMaxTTS,
}

@classmethod
Expand Down
1 change: 1 addition & 0 deletions docs/content/Models/cloud-providers.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ DocsGPT offers direct, streamlined support for the following cloud LLM providers
| Prem AI | `premai` | (See Prem AI docs) |
| AWS SageMaker | `sagemaker` | (See SageMaker docs) |
| Novita AI | `novita` | (See Novita docs) |
| MiniMax | `minimax` | `MiniMax-M2.5` |

## Connecting to OpenAI-Compatible Cloud APIs

Expand Down
12 changes: 10 additions & 2 deletions setup.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -268,9 +268,10 @@ function Prompt-CloudAPIProviderOptions {
Write-ColorText "5) HuggingFace Inference API" -ForegroundColor "Yellow"
Write-ColorText "6) Azure OpenAI" -ForegroundColor "Yellow"
Write-ColorText "7) Novita" -ForegroundColor "Yellow"
Write-ColorText "8) MiniMax" -ForegroundColor "Yellow"
Write-ColorText "b) Back to Main Menu" -ForegroundColor "Yellow"
Write-Host ""
$script:provider_choice = Read-Host "Choose option (1-7, or b)"
$script:provider_choice = Read-Host "Choose option (1-8, or b)"
}

# Function to prompt for Ollama CPU/GPU options
Expand Down Expand Up @@ -982,11 +983,18 @@ function Connect-CloudAPIProvider {
Get-APIKey
break
}
"8" { # MiniMax
$script:provider_name = "MiniMax"
$script:llm_name = "minimax"
$script:model_name = "MiniMax-M2.5"
Get-APIKey
break
}
"b" { Clear-Host; return }
"B" { Clear-Host; return }
default {
Write-Host ""
Write-ColorText "Invalid choice. Please choose 1-7, or b." -ForegroundColor "Red"
Write-ColorText "Invalid choice. Please choose 1-8, or b." -ForegroundColor "Red"
Start-Sleep -Seconds 1
}
}
Expand Down
11 changes: 9 additions & 2 deletions setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,10 @@ prompt_cloud_api_provider_options() {
echo -e "${YELLOW}5) HuggingFace Inference API${NC}"
echo -e "${YELLOW}6) Azure OpenAI${NC}"
echo -e "${YELLOW}7) Novita${NC}"
echo -e "${YELLOW}8) MiniMax${NC}"
echo -e "${YELLOW}b) Back to Main Menu${NC}"
echo
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-7, or b): ${NC}")" provider_choice
read -p "$(echo -e "${DEFAULT_FG}Choose option (1-8, or b): ${NC}")" provider_choice
}

# Function to prompt for Ollama CPU/GPU options
Expand Down Expand Up @@ -707,8 +708,14 @@ connect_cloud_api_provider() {
model_name="deepseek/deepseek-r1"
get_api_key
break ;;
8) # MiniMax
provider_name="MiniMax"
llm_provider="minimax"
model_name="MiniMax-M2.5"
get_api_key
break ;;
b|B) clear; return 1 ;; # Clear screen and Back to Main Menu
*) echo -e "\n${RED}Invalid choice. Please choose 1-7, or b.${NC}" ; sleep 1 ;;
*) echo -e "\n${RED}Invalid choice. Please choose 1-8, or b.${NC}" ; sleep 1 ;;
esac
done

Expand Down
Loading
Loading