Skip to content

Commit d7a124a

Browse files
author
Theodore Li
committed
Lint, remove vestigial hosted keys not implemented
1 parent 1ac08e5 commit d7a124a

File tree

11 files changed

+28
-65
lines changed

11 files changed

+28
-65
lines changed

apps/sim/app/api/workspaces/[id]/byok-keys/route.ts

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,14 +20,10 @@ const VALID_PROVIDERS = [
2020
'mistral',
2121
'exa',
2222
'huggingface',
23-
'tavily',
2423
'linkup',
2524
'perplexity',
26-
'zep',
2725
'jina',
2826
'google_cloud',
29-
'ahrefs',
30-
'apify',
3127
'elevenlabs',
3228
] as const
3329

apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/byok/byok.tsx

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,7 @@ import {
1414
ModalHeader,
1515
} from '@/components/emcn'
1616
import {
17-
AhrefsIcon,
1817
AnthropicIcon,
19-
ApifyIcon,
2018
BrowserUseIcon,
2119
ElevenLabsIcon,
2220
ExaAIIcon,
@@ -30,8 +28,6 @@ import {
3028
OpenAIIcon,
3129
PerplexityIcon,
3230
SerperIcon,
33-
TavilyIcon,
34-
ZepIcon,
3531
} from '@/components/icons'
3632
import { Skeleton } from '@/components/ui'
3733
import {
@@ -121,34 +117,13 @@ const PROVIDERS: {
121117
description: 'Web search and content retrieval',
122118
placeholder: 'Enter your Linkup API key',
123119
},
124-
{
125-
id: 'tavily',
126-
name: 'Tavily',
127-
icon: TavilyIcon,
128-
description: 'AI-powered web search and content extraction',
129-
placeholder: 'Enter your Tavily API key',
130-
},
131120
{
132121
id: 'perplexity',
133122
name: 'Perplexity',
134123
icon: PerplexityIcon,
135124
description: 'AI-powered chat and web search',
136125
placeholder: 'pplx-...',
137126
},
138-
{
139-
id: 'zep',
140-
name: 'Zep',
141-
icon: ZepIcon,
142-
description: 'Long-term memory for AI agents',
143-
placeholder: 'Enter your Zep API key',
144-
},
145-
{
146-
id: 'ahrefs',
147-
name: 'Ahrefs',
148-
icon: AhrefsIcon,
149-
description: 'SEO analysis, backlinks, and keyword research',
150-
placeholder: 'Enter your Ahrefs API key',
151-
},
152127
{
153128
id: 'jina',
154129
name: 'Jina AI',
@@ -170,13 +145,6 @@ const PROVIDERS: {
170145
description: 'Translate, Maps, PageSpeed, and Books APIs',
171146
placeholder: 'Enter your Google Cloud API key',
172147
},
173-
{
174-
id: 'apify',
175-
name: 'Apify',
176-
icon: ApifyIcon,
177-
description: 'Web scraping, automation, and actor execution',
178-
placeholder: 'Enter your Apify API token',
179-
},
180148
]
181149

182150
function BYOKKeySkeleton() {

apps/sim/lib/api-key/byok.ts

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -97,9 +97,7 @@ export async function getApiKeyWithBYOK(
9797
logger.debug('No BYOK key found, falling back', { provider, model, workspaceId })
9898

9999
if (isModelHosted) {
100-
const envKeyPrefix = isGeminiModel
101-
? 'GEMINI_API_KEY'
102-
: `${provider.toUpperCase()}_API_KEY`
100+
const envKeyPrefix = isGeminiModel ? 'GEMINI_API_KEY' : `${provider.toUpperCase()}_API_KEY`
103101
const rateLimiter = getHostedKeyRateLimiter()
104102
const acquireResult = await rateLimiter.acquireKey(
105103
provider,

apps/sim/providers/utils.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ import type OpenAI from 'openai'
33
import type { ChatCompletionChunk } from 'openai/resources/chat/completions'
44
import type { CompletionUsage } from 'openai/resources/completions'
55
import { env } from '@/lib/core/config/env'
6-
import { isHosted } from '@/lib/core/config/feature-flags'
76
import {
87
buildCanonicalIndex,
98
type CanonicalGroup,
@@ -38,7 +37,6 @@ import {
3837
updateOllamaModels as updateOllamaModelsInDefinitions,
3938
} from '@/providers/models'
4039
import type { ProviderId, ProviderToolConfig } from '@/providers/types'
41-
import { useProvidersStore } from '@/stores/providers/store'
4240
import { mergeToolParameters } from '@/tools/params'
4341

4442
const logger = createLogger('ProviderUtils')

apps/sim/tools/browser_use/run_task.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ export const runTaskTool: ToolConfig<BrowserUseRunTaskParams, BrowserUseRunTaskR
270270
const STEP_COSTS: Record<string, number> = {
271271
'browser-use-llm': 0.002,
272272
'browser-use-2.0': 0.006,
273-
'o3': 0.03,
273+
o3: 0.03,
274274
'o4-mini': 0.03,
275275
'gemini-3-pro-preview': 0.03,
276276
'gemini-3-flash-preview': 0.015,
@@ -291,7 +291,9 @@ export const runTaskTool: ToolConfig<BrowserUseRunTaskParams, BrowserUseRunTaskR
291291
const model = (params.model as string) || 'browser-use-2.0'
292292
const knownCost = STEP_COSTS[model]
293293
if (!knownCost) {
294-
logger.warn(`Unknown Browser Use model "${model}", using default step cost $${DEFAULT_STEP_COST}`)
294+
logger.warn(
295+
`Unknown Browser Use model "${model}", using default step cost $${DEFAULT_STEP_COST}`
296+
)
295297
}
296298
const stepCost = knownCost ?? DEFAULT_STEP_COST
297299
const stepCount = output.steps.length

apps/sim/tools/elevenlabs/tts.ts

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,7 @@
11
import type { ElevenLabsTtsParams, ElevenLabsTtsResponse } from '@/tools/elevenlabs/types'
22
import type { ToolConfig } from '@/tools/types'
33

4-
const FLASH_TURBO_MODELS = new Set([
5-
'eleven_turbo_v2',
6-
'eleven_turbo_v2_5',
7-
'eleven_flash_v2_5',
8-
])
4+
const FLASH_TURBO_MODELS = new Set(['eleven_turbo_v2', 'eleven_turbo_v2_5', 'eleven_flash_v2_5'])
95

106
export const elevenLabsTtsTool: ToolConfig<ElevenLabsTtsParams, ElevenLabsTtsResponse> = {
117
id: 'elevenlabs_tts',

apps/sim/tools/huggingface/chat.ts

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,14 @@ export const chatTool: ToolConfig<HuggingFaceChatParams, HuggingFaceChatResponse
2929
// https://huggingface.co/docs/api-inference/rate-limits
3030
const totalTokens = usage.total_tokens
3131
const cost = (totalTokens / 1_000_000) * 3
32-
return { cost, metadata: { promptTokens: usage.prompt_tokens, completionTokens: usage.completion_tokens, totalTokens } }
32+
return {
33+
cost,
34+
metadata: {
35+
promptTokens: usage.prompt_tokens,
36+
completionTokens: usage.completion_tokens,
37+
totalTokens,
38+
},
39+
}
3340
},
3441
},
3542
rateLimit: {

apps/sim/tools/jina/read_url.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -188,8 +188,8 @@ export const readUrlTool: ToolConfig<ReadUrlParams, ReadUrlResponse> = {
188188

189189
const tokensHeader = response.headers.get('x-tokens')
190190
if (tokensHeader) {
191-
const parsed = parseInt(tokensHeader, 10)
192-
if (!isNaN(parsed)) {
191+
const parsed = Number.parseInt(tokensHeader, 10)
192+
if (!Number.isNaN(parsed)) {
193193
tokensUsed = parsed
194194
}
195195
}

apps/sim/tools/jina/search.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,8 +178,8 @@ export const searchTool: ToolConfig<SearchParams, SearchResponse> = {
178178
let tokensUsed: number | undefined
179179
const tokensHeader = response.headers.get('x-tokens')
180180
if (tokensHeader) {
181-
const parsed = parseInt(tokensHeader, 10)
182-
if (!isNaN(parsed) && parsed > 0) {
181+
const parsed = Number.parseInt(tokensHeader, 10)
182+
if (!Number.isNaN(parsed) && parsed > 0) {
183183
tokensUsed = parsed
184184
}
185185
}

apps/sim/tools/perplexity/chat.ts

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,13 +5,15 @@ import type { ToolConfig } from '@/tools/types'
55
* Per-token rates by model from https://docs.perplexity.ai/guides/pricing
66
* Per-request fees assume Low context size (the API default).
77
*/
8-
const MODEL_PRICING: Record<string, { inputPerM: number; outputPerM: number; requestPer1K: number }> =
9-
{
10-
'sonar-deep-research': { inputPerM: 2, outputPerM: 8, requestPer1K: 0 },
11-
'sonar-reasoning-pro': { inputPerM: 2, outputPerM: 8, requestPer1K: 6 },
12-
'sonar-pro': { inputPerM: 3, outputPerM: 15, requestPer1K: 6 },
13-
sonar: { inputPerM: 1, outputPerM: 1, requestPer1K: 5 },
14-
}
8+
const MODEL_PRICING: Record<
9+
string,
10+
{ inputPerM: number; outputPerM: number; requestPer1K: number }
11+
> = {
12+
'sonar-deep-research': { inputPerM: 2, outputPerM: 8, requestPer1K: 0 },
13+
'sonar-reasoning-pro': { inputPerM: 2, outputPerM: 8, requestPer1K: 6 },
14+
'sonar-pro': { inputPerM: 3, outputPerM: 15, requestPer1K: 6 },
15+
sonar: { inputPerM: 1, outputPerM: 1, requestPer1K: 5 },
16+
}
1517

1618
function getModelPricing(model: string) {
1719
for (const [key, pricing] of Object.entries(MODEL_PRICING)) {

0 commit comments

Comments
 (0)