Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 39 additions & 1 deletion src/main/main.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1164,6 +1164,7 @@ interface ModelsCache {
version: number; // Cache version for invalidation
}
let modelsCache: ModelsCache | null = null;
let modelsWarmupPromise: Promise<void> | null = null;
const MODEL_CACHE_TTL = 24 * 60 * 60 * 1000; // 24 hours (models don't change frequently)
const MODEL_CACHE_VERSION = 3; // Increment when model schema or fetch logic changes (bumped to 3 to refresh all caches with latest 17 models)

Expand Down Expand Up @@ -1316,6 +1317,33 @@ async function fetchModelsFromAPI(client: CopilotClient): Promise<ModelInfo[]> {
}
}

function warmModelsCacheInBackground(): void {
if (modelsWarmupPromise) return;

if (getCachedModels().length > 0) {
return;
}

const client = getDefaultClient();
if (!client) {
console.warn('[warmModelsCacheInBackground] No Copilot client available yet');
return;
}

modelsWarmupPromise = (async () => {
try {
const models = await fetchModelsFromAPI(client);
if (models.length > 0 && mainWindow && !mainWindow.isDestroyed()) {
mainWindow.webContents.send('copilot:modelsVerified', { models });
}
} catch (error) {
console.warn('[warmModelsCacheInBackground] Failed to warm model cache:', error);
} finally {
modelsWarmupPromise = null;
}
})();
}

// Preferred models for quick, simple AI tasks (in order of preference)
// These are typically free/cheap models optimized for simple text generation
const QUICK_TASKS_MODEL_PREFERENCES = ['gpt-4.1', 'gpt-5-mini', 'claude-haiku-4.5'];
Expand Down Expand Up @@ -2247,7 +2275,8 @@ async function initCopilot(): Promise<void> {
}
}

// Models will be fetched on-demand when user opens model selector (via copilot:getModels IPC)
// Warm model cache asynchronously on startup to avoid first-open model selector lag.
warmModelsCacheInBackground();

// Start keep-alive timer to prevent session timeouts
startKeepAlive();
Expand Down Expand Up @@ -3091,6 +3120,15 @@ ipcMain.handle('copilot:getModels', async () => {

// No valid cache - fetch fresh from API
console.log('[copilot:getModels] No cache, fetching from API...');
if (modelsWarmupPromise) {
await modelsWarmupPromise;
const warmedModels = getCachedModels();
if (warmedModels.length > 0) {
console.log(`[copilot:getModels] Returning ${warmedModels.length} warmed models`);
return { models: warmedModels, current: currentModel };
}
}

const client = getDefaultClient();
if (!client) {
console.warn('No Copilot client available for fetching models');
Expand Down
7 changes: 7 additions & 0 deletions src/renderer/App.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -1486,6 +1486,12 @@ const App: React.FC = () => {
.catch((err) => console.error(`Failed to load history for ${s.sessionId}:`, err));
});

const unsubscribeModelsVerified = window.electronAPI.copilot.onModelsVerified((data) => {
if (data.models.length > 0) {
setAvailableModels(data.models);
}
});

// Also fetch models in case ready event was missed (baseline list only)
window.electronAPI.copilot
.getModels()
Expand Down Expand Up @@ -2401,6 +2407,7 @@ Only output ${RALPH_COMPLETION_SIGNAL} when ALL items above are verified complet
unsubscribePermission();
unsubscribeError();
unsubscribeSessionResumed();
unsubscribeModelsVerified();
unsubscribeUsageInfo();
unsubscribeCompactionStart();
unsubscribeCompactionComplete();
Expand Down