Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions packages/components/credentials/MiniMaxApi.credential.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import { INodeParams, INodeCredential } from '../src/Interface'

class MiniMaxApi implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]

constructor() {
this.label = 'MiniMax API'
this.name = 'miniMaxApi'
this.version = 1.0
this.inputs = [
{
label: 'MiniMax API Key',
name: 'miniMaxApiKey',
type: 'password'
}
]
}
}

module.exports = { credClass: MiniMaxApi }
17 changes: 17 additions & 0 deletions packages/components/models.json
Original file line number Diff line number Diff line change
Expand Up @@ -1521,6 +1521,23 @@
}
]
},
{
"name": "chatMiniMax",
"models": [
{
"label": "MiniMax-M2.5",
"name": "MiniMax-M2.5",
"input_cost": 3e-7,
"output_cost": 1.2e-6
},
{
"label": "MiniMax-M2.5-highspeed",
"name": "MiniMax-M2.5-highspeed",
"input_cost": 6e-7,
"output_cost": 2.4e-6
}
]
},
{
"name": "chatMistralAI",
"models": [
Expand Down
118 changes: 118 additions & 0 deletions packages/components/nodes/chatmodels/ChatMiniMax/ChatMiniMax.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
import { ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
import { BaseCache } from '@langchain/core/caches'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatMiniMax, ChatMiniMaxInput } from './FlowiseChatMiniMax'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'

class ChatMiniMax_ChatModels implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]

constructor() {
this.label = 'ChatMiniMax'
this.name = 'chatMiniMax'
this.version = 1.0
this.type = 'ChatMiniMax'
this.icon = 'minimax.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around MiniMax large language models using Anthropic-compatible API'
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatAnthropic)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['miniMaxApi']
}
this.inputs = [
{
label: 'Cache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
label: 'Model Name',
name: 'modelName',
type: 'asyncOptions',
loadMethod: 'listModels',
default: 'MiniMax-M2.5'
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 1.0,
optional: true
},
{
label: 'Streaming',
name: 'streaming',
type: 'boolean',
default: true,
optional: true,
additionalParams: true
},
{
label: 'Max Tokens',
name: 'maxTokens',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Top P',
name: 'topP',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
}
]
}

//@ts-ignore
loadMethods = {
async listModels(): Promise<INodeOptionsValue[]> {
return await getModels(MODEL_TYPE.CHAT, 'chatMiniMax')
}
}

async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const maxTokens = nodeData.inputs?.maxTokens as string
const topP = nodeData.inputs?.topP as string
const streaming = nodeData.inputs?.streaming as boolean
const cache = nodeData.inputs?.cache as BaseCache

const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const miniMaxApiKey = getCredentialParam('miniMaxApiKey', credentialData, nodeData)

const obj: ChatMiniMaxInput = {
modelName,
miniMaxApiKey,
streaming: streaming ?? true
}

if (temperature) obj.temperature = parseFloat(temperature)
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (cache) obj.cache = cache
Comment on lines +102 to +111
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The temperature parameter is being unconditionally parsed and assigned. If a user clears this optional field in the UI, temperature could be undefined or an empty string. In both cases, parseFloat(temperature) would result in NaN, which is then passed to the model constructor. This is likely to cause a runtime error when making an API call. To prevent this, you should handle temperature conditionally, similar to how maxTokens and topP are handled.

Suggested change
const obj: ChatMiniMaxInput = {
temperature: parseFloat(temperature),
modelName,
miniMaxApiKey,
streaming: streaming ?? true
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (cache) obj.cache = cache
const obj: ChatMiniMaxInput = {
modelName,
miniMaxApiKey,
streaming: streaming ?? true
}
if (temperature) obj.temperature = parseFloat(temperature)
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (cache) obj.cache = cache

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed


const model = new ChatMiniMax(nodeData.id, obj)
return model
}
}

module.exports = { nodeClass: ChatMiniMax_ChatModels }
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import { ChatAnthropic as LangchainChatAnthropic, AnthropicInput } from '@langchain/anthropic'
import { type BaseChatModelParams } from '@langchain/core/language_models/chat_models'

export interface ChatMiniMaxInput extends Partial<AnthropicInput>, BaseChatModelParams {
miniMaxApiKey?: string
}

export class ChatMiniMax extends LangchainChatAnthropic {
configuredModel: string
configuredMaxToken?: number
id: string

constructor(id: string, fields?: ChatMiniMaxInput) {
const miniMaxApiKey = fields?.miniMaxApiKey || fields?.anthropicApiKey

super({
...fields,
anthropicApiKey: miniMaxApiKey,
clientOptions: {
baseURL: 'https://api.minimax.io/anthropic'
}
})

this.id = id
this.configuredModel = fields?.modelName || 'MiniMax-M2.5'
this.configuredMaxToken = fields?.maxTokens

// @langchain/anthropic defaults topP and topK to -1 as an "unset" sentinel and
// always serialises them into the request body. The real Anthropic API accepts
// -1 silently, but MiniMax's Anthropic-compatible endpoint requires top_p/top_k
// to be in (0, 1]. Setting them to undefined causes JSON.stringify to omit the
// fields entirely so MiniMax applies its own defaults.
if (fields?.topP === undefined) this.topP = undefined as unknown as number
if (fields?.topK === undefined) this.topK = undefined as unknown as number
}

revertToOriginalModel(): void {
this.modelName = this.configuredModel
this.maxTokens = this.configuredMaxToken
}
}
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
127 changes: 126 additions & 1 deletion packages/components/src/textToSpeech.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ import type { ReadableStream } from 'node:stream/web'

const TextToSpeechType = {
OPENAI_TTS: 'openai',
ELEVEN_LABS_TTS: 'elevenlabs'
ELEVEN_LABS_TTS: 'elevenlabs',
MINIMAX_TTS: 'minimax'
}

export const convertTextToSpeechStream = async (
Expand Down Expand Up @@ -100,6 +101,118 @@ export const convertTextToSpeechStream = async (
})
break
}

case TextToSpeechType.MINIMAX_TTS: {
onStart('mp3')

const apiKey = credentialData.miniMaxApiKey
if (!apiKey) {
throw new Error('MiniMax API Key is required')
}

const voiceId = textToSpeechConfig.voice || 'English_expressive_narrator'
const model = textToSpeechConfig.model || 'speech-2.8-hd'

const requestBody: Record<string, unknown> = {
model: model,
text: text,
stream: true,
language_boost: 'auto',
output_format: 'hex',
voice_setting: {
voice_id: voiceId,
speed: textToSpeechConfig.speed ?? 1.0,
vol: textToSpeechConfig.vol ?? 1.0,
pitch: textToSpeechConfig.pitch ?? 0
},
audio_setting: {
format: 'mp3',
sample_rate: 32000,
bitrate: 128000,
channel: 1
}
}

const response = await fetch('https://api.minimax.io/v1/t2a_v2', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`
},
body: JSON.stringify(requestBody),
signal: abortController.signal
})

if (!response.ok) {
const errorText = await response.text()
throw new Error(`MiniMax TTS API error: ${response.status} - ${errorText}`)
}

if (!response.body) {
throw new Error('Failed to get response stream from MiniMax')
}

const reader = response.body.getReader()
const decoder = new TextDecoder()
let sseBuffer = ''

const processMinimaxStream = async () => {
for (;;) {
if (abortController.signal.aborted) {
reader.cancel()
streamDestroyed = true
reject(new Error('TTS generation aborted'))
return
}

const { done, value } = await reader.read()
if (done) break

sseBuffer += decoder.decode(value, { stream: true })
const lines = sseBuffer.split('\n')
sseBuffer = lines.pop() || ''

for (const line of lines) {
const trimmedLine = line.trim()
if (!trimmedLine || trimmedLine.startsWith(':')) {
continue
}

if (trimmedLine.startsWith('data:')) {
const jsonStr = trimmedLine.slice(5).trim()
if (!jsonStr) continue

try {
const eventData = JSON.parse(jsonStr)

if (eventData.base_resp?.status_code !== 0) {
const errorMsg = eventData.base_resp?.status_msg || 'Unknown error'
reject(new Error(`MiniMax TTS error: ${errorMsg}`))
return
}

if (eventData.data?.audio) {
const audioChunk = Buffer.from(eventData.data.audio, 'hex')
onChunk(audioChunk)
}

if (eventData.data?.status === 2) {
break
}
} catch {
// Skip malformed JSON
}
}
}
}

onEnd()
resolve()
}

await processMinimaxStream()
break
}
}
} else {
reject(new Error('Text to speech is not selected. Please configure TTS in the chatflow.'))
Expand Down Expand Up @@ -234,6 +347,18 @@ export const getVoices = async (provider: string, credentialId: string, options:
}))
}

case TextToSpeechType.MINIMAX_TTS: {
return [
// English voices (official recommended)
{ id: 'English_expressive_narrator', name: 'Expressive Narrator', category: 'English' },
{ id: 'English_Graceful_Lady', name: 'Graceful Lady', category: 'English' },
{ id: 'English_Insightful_Speaker', name: 'Insightful Speaker', category: 'English' },
{ id: 'English_radiant_girl', name: 'Radiant Girl', category: 'English' },
{ id: 'English_Persuasive_Man', name: 'Persuasive Man', category: 'English' },
{ id: 'English_Lucky_Robot', name: 'Lucky Robot', category: 'English' }
]
}

default:
throw new Error(`Unsupported TTS provider: ${provider}`)
}
Expand Down