Skip to content

Commit 5d187b4

Browse files
committed
update model filter for current crop of models
1 parent d01ca73 commit 5d187b4

2 files changed

Lines changed: 82 additions & 9 deletions

File tree

src/gpt.js

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -219,9 +219,52 @@ export async function fetchAndStream(port, messages, options = {}) {
219219
//------------------------------------------------------------------------------
220220
// Filters the list of OpenAI models to only those that are useful for our
221221
// purposes.
222+
//
223+
// Notes:
224+
// - We use the Chat Completions endpoint (/v1/chat/completions), not Responses.
225+
// - /v1/models returns many model IDs (embeddings, audio, moderation, etc.). We
226+
// only want models intended for text generation.
222227
//------------------------------------------------------------------------------
223228
export function wantModel(model) {
224-
return [/^o\d-mini$/, /^gpt-\d(\.\d)?o(-mini)?$/].some((re) => re.test(model));
229+
// Prefers undated aliases, but date preference is enforced in model list construction.
230+
// (OpenAI tends to provide `foo-YYYY-MM-DD` *and* a dateless `foo` alias.)
231+
232+
// Allow chat/text model families.
233+
const allow = [/^gpt-/i, /^o\d/i].some((re) => re.test(model));
234+
235+
// Exclude models that are not intended for this extension’s text summaries.
236+
const denyRules = [
237+
// Explicit non-text model families.
238+
/^text-embedding-/i,
239+
/^embedding-/i,
240+
/^whisper-/i,
241+
/\btts-/i,
242+
/-tts\b/i,
243+
/^omni-moderation-/i,
244+
/^text-moderation-/i,
245+
/^moderation-/i,
246+
/^dall-e-/i,
247+
248+
// Known "not what we want" tokens/families.
249+
/(^|-)instruct($|-)/i,
250+
/(^|-)preview($|-)/i,
251+
// Exclude turbo variants (e.g. gpt-4-turbo) but keep gpt-3.5-turbo.
252+
// (We do this with a regex rather than an explicit model list.)
253+
/(^|-)pro($|-)/i,
254+
/(^|-)image($|-)/i,
255+
/(^|-)realtime($|-)/i,
256+
/(^|-)audio($|-)/i,
257+
/(^|-)transcribe($|-)/i,
258+
/(^|-)diarize($|-)/i,
259+
/(^|-)search($|-)/i,
260+
/(^|-)codex($|-)/i,
261+
/(^|-)deep-research($|-)/i,
262+
];
263+
const isTurboVariant = /(^|-)turbo($|-)/i.test(model);
264+
const isAllowedTurbo = /^gpt-3\.5-turbo($|-)/i.test(model);
265+
const deny = denyRules.some((re) => re.test(model)) || (isTurboVariant && !isAllowedTurbo);
266+
267+
return allow && !deny;
225268
}
226269

227270
//------------------------------------------------------------------------------

src/pages/config.js

Lines changed: 38 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -284,18 +284,38 @@ document.addEventListener('DOMContentLoaded', async () => {
284284
});
285285

286286
if (!response.ok) {
287+
const errorMsg = response.statusText;
287288
console.error('Error fetching models:', response);
288-
throw new Error(`Error fetching models: ${response.statusText}`);
289+
return { models: [], error: `Error fetching models: ${errorMsg}` };
289290
}
290291

291292
const data = await response.json();
292-
const models = data.data.map((model) => model.id).filter(wantModel);
293-
models.sort();
294-
295-
return models;
293+
const candidateIds = data.data.map((model) => model.id);
294+
const filtered = candidateIds.filter(wantModel);
295+
const withoutNumericSuffix = filtered.filter(id => !/-\d{4,}$/.test(id));
296+
const DATE_SUFFIX_RE = /-\d{4}-\d{2}-\d{2}$/;
297+
// Build per-base-model selection map
298+
const modelMap = {};
299+
for (const id of withoutNumericSuffix) {
300+
const base = id.replace(DATE_SUFFIX_RE, '');
301+
if (!modelMap[base]) {
302+
modelMap[base] = { undated: null, dated: null };
303+
}
304+
if (DATE_SUFFIX_RE.test(id)) {
305+
// Keep first dated if multiple exist
306+
if (modelMap[base].dated === null) {
307+
modelMap[base].dated = id;
308+
}
309+
} else {
310+
modelMap[base].undated = id;
311+
}
312+
}
313+
const chosen = Object.values(modelMap).map(({undated, dated}) => undated || dated);
314+
chosen.sort();
315+
return { models: chosen, error: null };
296316
} catch (error) {
297317
console.error(error);
298-
return [];
318+
return { models: [], error: error.message };
299319
}
300320
}
301321

@@ -315,10 +335,19 @@ document.addEventListener('DOMContentLoaded', async () => {
315335
return;
316336
}
317337

318-
const models = await fetchAvailableModels(apiKeyValue);
338+
const result = await fetchAvailableModels(apiKeyValue);
339+
340+
if (result.error) {
341+
showError(`Failed to refresh models: ${result.error}`);
342+
return;
343+
}
344+
345+
const models = result.models;
319346

320347
if (models.length === 0) {
321-
showError('No models retrieved. Please check your API key and try again.');
348+
showError(
349+
'No usable chat models were returned. This can happen if OpenAI returned only models that this extension filters out (preview/turbo/pro/dated/audio/etc).'
350+
);
322351
return;
323352
}
324353

@@ -341,6 +370,7 @@ document.addEventListener('DOMContentLoaded', async () => {
341370
} else {
342371
modelSelect.value = defaultModel;
343372
}
373+
toggleReasoningOptions();
344374
} catch (error) {
345375
showError(`Failed to refresh models: ${error.message}`);
346376
} finally {

0 commit comments

Comments
 (0)