Skip to content

Commit 7b5168f

Browse files
committed
feat: added groq as a provider
1 parent 4e51d25 commit 7b5168f

File tree

5 files changed

+237
-2
lines changed

5 files changed

+237
-2
lines changed

package-lock.json

Lines changed: 31 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
"croner": "^9.0.0",
4343
"date-fns": "^3.6.0",
4444
"drizzle-orm": "^0.39.3",
45+
"groq-sdk": "^0.15.0",
4546
"lodash.debounce": "^4.0.8",
4647
"lucide-react": "^0.469.0",
4748
"next": "^15.2.0",

providers/groq/index.ts

Lines changed: 186 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,186 @@
1+
import { Groq } from 'groq-sdk'
2+
import { executeTool } from '@/tools'
3+
import { ProviderConfig, ProviderRequest, ProviderResponse } from '../types'
4+
5+
export const groqProvider: ProviderConfig = {
6+
id: 'groq',
7+
name: 'Groq',
8+
description: "Groq's LLM models with high-performance inference",
9+
version: '1.0.0',
10+
models: ['groq/llama-3.3-70b-specdec', 'groq/deepseek-r1-distill-llama-70b', 'groq/qwen-2.5-32b'],
11+
defaultModel: 'groq/llama-3.3-70b-specdec',
12+
13+
executeRequest: async (request: ProviderRequest): Promise<ProviderResponse> => {
14+
if (!request.apiKey) {
15+
throw new Error('API key is required for Groq')
16+
}
17+
18+
const groq = new Groq({
19+
apiKey: request.apiKey,
20+
dangerouslyAllowBrowser: true,
21+
})
22+
23+
// Start with an empty array for all messages
24+
const allMessages = []
25+
26+
// Add system prompt if present
27+
if (request.systemPrompt) {
28+
allMessages.push({
29+
role: 'system',
30+
content: request.systemPrompt,
31+
})
32+
}
33+
34+
// Add context if present
35+
if (request.context) {
36+
allMessages.push({
37+
role: 'user',
38+
content: request.context,
39+
})
40+
}
41+
42+
// Add remaining messages
43+
if (request.messages) {
44+
allMessages.push(...request.messages)
45+
}
46+
47+
// Transform tools to function format if provided
48+
const tools = request.tools?.length
49+
? request.tools.map((tool) => ({
50+
type: 'function',
51+
function: {
52+
name: tool.id,
53+
description: tool.description,
54+
parameters: tool.parameters,
55+
},
56+
}))
57+
: undefined
58+
59+
// Build the request payload
60+
const payload: any = {
61+
model: (request.model || 'groq/llama-3.3-70b-specdec').replace('groq/', ''),
62+
messages: allMessages,
63+
}
64+
65+
// Add optional parameters
66+
if (request.temperature !== undefined) payload.temperature = request.temperature
67+
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
68+
69+
// Add response format for structured output if specified
70+
if (request.responseFormat) {
71+
payload.response_format = { type: 'json_object' }
72+
}
73+
74+
// Add tools if provided
75+
if (tools?.length) {
76+
payload.tools = tools
77+
payload.tool_choice = 'auto'
78+
}
79+
80+
// Make the initial API request
81+
let currentResponse = await groq.chat.completions.create(payload)
82+
let content = currentResponse.choices[0]?.message?.content || ''
83+
let tokens = {
84+
prompt: currentResponse.usage?.prompt_tokens || 0,
85+
completion: currentResponse.usage?.completion_tokens || 0,
86+
total: currentResponse.usage?.total_tokens || 0,
87+
}
88+
let toolCalls = []
89+
let toolResults = []
90+
let currentMessages = [...allMessages]
91+
let iterationCount = 0
92+
const MAX_ITERATIONS = 10 // Prevent infinite loops
93+
94+
try {
95+
while (iterationCount < MAX_ITERATIONS) {
96+
// Check for tool calls
97+
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
98+
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
99+
break
100+
}
101+
102+
// Process each tool call
103+
for (const toolCall of toolCallsInResponse) {
104+
try {
105+
const toolName = toolCall.function.name
106+
const toolArgs = JSON.parse(toolCall.function.arguments)
107+
108+
// Get the tool from the tools registry
109+
const tool = request.tools?.find((t) => t.id === toolName)
110+
if (!tool) continue
111+
112+
// Execute the tool
113+
const mergedArgs = { ...tool.params, ...toolArgs }
114+
const result = await executeTool(toolName, mergedArgs)
115+
116+
if (!result.success) continue
117+
118+
toolResults.push(result.output)
119+
toolCalls.push({
120+
name: toolName,
121+
arguments: toolArgs,
122+
})
123+
124+
// Add the tool call and result to messages
125+
currentMessages.push({
126+
role: 'assistant',
127+
content: null,
128+
tool_calls: [
129+
{
130+
id: toolCall.id,
131+
type: 'function',
132+
function: {
133+
name: toolName,
134+
arguments: toolCall.function.arguments,
135+
},
136+
},
137+
],
138+
})
139+
140+
currentMessages.push({
141+
role: 'tool',
142+
tool_call_id: toolCall.id,
143+
content: JSON.stringify(result.output),
144+
})
145+
} catch (error) {
146+
console.error('Error processing tool call:', error)
147+
}
148+
}
149+
150+
// Make the next request with updated messages
151+
const nextPayload = {
152+
...payload,
153+
messages: currentMessages,
154+
}
155+
156+
// Make the next request
157+
currentResponse = await groq.chat.completions.create(nextPayload)
158+
159+
// Update content if we have a text response
160+
if (currentResponse.choices[0]?.message?.content) {
161+
content = currentResponse.choices[0].message.content
162+
}
163+
164+
// Update token counts
165+
if (currentResponse.usage) {
166+
tokens.prompt += currentResponse.usage.prompt_tokens || 0
167+
tokens.completion += currentResponse.usage.completion_tokens || 0
168+
tokens.total += currentResponse.usage.total_tokens || 0
169+
}
170+
171+
iterationCount++
172+
}
173+
} catch (error) {
174+
console.error('Error in Groq request:', error)
175+
throw error
176+
}
177+
178+
return {
179+
content,
180+
model: request.model,
181+
tokens,
182+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
183+
toolResults: toolResults.length > 0 ? toolResults : undefined,
184+
}
185+
},
186+
}

providers/types.ts

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,11 @@
1-
export type ProviderId = 'openai' | 'anthropic' | 'google' | 'deepseek' | 'xai' | 'cerebras'
1+
export type ProviderId =
2+
| 'openai'
3+
| 'anthropic'
4+
| 'google'
5+
| 'deepseek'
6+
| 'xai'
7+
| 'cerebras'
8+
| 'groq'
29

310
export interface TokenInfo {
411
prompt?: number

providers/utils.ts

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ import { anthropicProvider } from './anthropic'
33
import { cerebrasProvider } from './cerebras'
44
import { deepseekProvider } from './deepseek'
55
import { googleProvider } from './google'
6+
import { groqProvider } from './groq'
67
import { openaiProvider } from './openai'
78
import { ProviderConfig, ProviderId, ProviderToolConfig } from './types'
89
import { xAIProvider } from './xai'
@@ -45,7 +46,16 @@ export const providers: Record<
4546
cerebras: {
4647
...cerebrasProvider,
4748
models: ['cerebras/llama-3.3-70b'],
48-
modelPatterns: [/^cerebras\/llama/],
49+
modelPatterns: [/^cerebras/],
50+
},
51+
groq: {
52+
...groqProvider,
53+
models: [
54+
'groq/llama-3.3-70b-specdec',
55+
'groq/deepseek-r1-distill-llama-70b',
56+
'groq/qwen-2.5-32b',
57+
],
58+
modelPatterns: [/^groq/],
4959
},
5060
}
5161

0 commit comments

Comments
 (0)