Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/browser/components/ChatInput/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -2268,7 +2268,7 @@ const ChatInputInner: React.FC<ChatInputProps> = (props) => {
className="flex items-center [&_.thinking-slider]:[@container(max-width:550px)]:hidden"
data-component="ThinkingSliderGroup"
>
<ThinkingSliderComponent modelString={baseModel} />
<ThinkingSliderComponent />
</div>

<div className="ml-4 flex items-center" data-component="ModelSettingsGroup">
Expand Down
8 changes: 3 additions & 5 deletions src/browser/components/ThinkingSlider.tsx
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import React, { useEffect, useId } from "react";
import type { ThinkingLevel } from "@/common/types/thinking";
import { useThinkingModel } from "@/browser/contexts/ThinkingContext";
import { useThinkingLevel } from "@/browser/hooks/useThinkingLevel";
import { Tooltip, TooltipTrigger, TooltipContent } from "./ui/tooltip";
import { formatKeybind, KEYBINDS } from "@/browser/utils/ui/keybinds";
Expand Down Expand Up @@ -59,12 +60,9 @@ const getSliderStyles = (value: number, isHover = false) => {
};
};

interface ThinkingControlProps {
modelString: string;
}

export const ThinkingSliderComponent: React.FC<ThinkingControlProps> = ({ modelString }) => {
export const ThinkingSliderComponent: React.FC = () => {
const [thinkingLevel, setThinkingLevel] = useThinkingLevel();
const modelString = useThinkingModel();
const [isHovering, setIsHovering] = React.useState(false);
const sliderId = useId();
const allowed = getThinkingPolicyForModel(modelString);
Expand Down
19 changes: 16 additions & 3 deletions src/browser/components/WorkspaceModeAISync.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,23 @@ import {
MODE_AI_DEFAULTS_KEY,
} from "@/common/constants/storage";
import { getDefaultModel } from "@/browser/hooks/useModelsFromSettings";
import { migrateGatewayModel } from "@/browser/hooks/useGatewayModels";
import { resolveModelAlias } from "@/common/utils/ai/models";
import { enforceThinkingPolicy } from "@/common/utils/thinking/policy";
import { coerceThinkingLevel, type ThinkingLevel } from "@/common/types/thinking";
import type { ModeAiDefaults } from "@/common/types/modeAiDefaults";
import type { AgentAiDefaults } from "@/common/types/agentAiDefaults";

const normalizeModelString = (model: string, fallback: string): string => {
const trimmed = model.trim();
if (!trimmed) {
return fallback;
}

const canonical = migrateGatewayModel(resolveModelAlias(trimmed)).trim();
return canonical.length > 0 ? canonical : fallback;
};

type WorkspaceAISettingsCache = Partial<
Record<string, { model: string; thinkingLevel: ThinkingLevel }>
>;
Expand Down Expand Up @@ -75,6 +87,7 @@ export function WorkspaceModeAISync(props: { workspaceId: string }): null {
typeof candidateModel === "string" && candidateModel.trim().length > 0
? candidateModel
: fallbackModel;
const effectiveModel = normalizeModelString(resolvedModel, fallbackModel);

const existingThinking = readPersistedState<ThinkingLevel>(thinkingKey, "off");
const candidateThinking =
Expand All @@ -86,10 +99,10 @@ export function WorkspaceModeAISync(props: { workspaceId: string }): null {
"off";
const resolvedThinking = coerceThinkingLevel(candidateThinking) ?? "off";

const effectiveThinking = enforceThinkingPolicy(resolvedModel, resolvedThinking);
const effectiveThinking = enforceThinkingPolicy(effectiveModel, resolvedThinking);

if (existingModel !== resolvedModel) {
updatePersistedState(modelKey, resolvedModel);
if (existingModel !== effectiveModel) {
updatePersistedState(modelKey, effectiveModel);
}

if (existingThinking !== effectiveThinking) {
Expand Down
46 changes: 27 additions & 19 deletions src/browser/contexts/ThinkingContext.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import { useAPI } from "@/browser/contexts/API";
import { KEYBINDS, matchesKeybind } from "@/browser/utils/ui/keybinds";

interface ThinkingContextType {
model: string;
thinkingLevel: ThinkingLevel;
setThinkingLevel: (level: ThinkingLevel) => void;
}
Expand All @@ -38,17 +39,20 @@ function getScopeId(workspaceId: string | undefined, projectPath: string | undef
return workspaceId ?? (projectPath ? getProjectScopeId(projectPath) : GLOBAL_SCOPE_ID);
}

function getCanonicalModelForScope(scopeId: string, fallbackModel: string): string {
const rawModel = readPersistedState<string>(getModelKey(scopeId), fallbackModel);
return migrateGatewayModel(rawModel || fallbackModel);
}

export const ThinkingProvider: React.FC<ThinkingProviderProps> = (props) => {
const { api } = useAPI();
const defaultModel = getDefaultModel();
const scopeId = getScopeId(props.workspaceId, props.projectPath);
const thinkingKey = getThinkingLevelKey(scopeId);

const [rawModel] = usePersistedState<string>(getModelKey(scopeId), defaultModel, {
listener: true,
});
const canonicalModel = useMemo(
() => migrateGatewayModel(rawModel || defaultModel),
[rawModel, defaultModel]
);

// Workspace-scoped thinking. (No longer per-model.)
const [thinkingLevel, setThinkingLevelInternal] = usePersistedState<ThinkingLevel>(
thinkingKey,
Expand All @@ -63,21 +67,19 @@ export const ThinkingProvider: React.FC<ThinkingProviderProps> = (props) => {
return;
}

const model = getCanonicalModelForScope(scopeId, defaultModel);
const legacyKey = getThinkingLevelByModelKey(model);
const legacyKey = getThinkingLevelByModelKey(canonicalModel);
const legacy = readPersistedState<ThinkingLevel | undefined>(legacyKey, undefined);
if (legacy === undefined) {
return;
}

const effective = enforceThinkingPolicy(model, legacy);
const effective = enforceThinkingPolicy(canonicalModel, legacy);
updatePersistedState(thinkingKey, effective);
}, [defaultModel, scopeId, thinkingKey]);
}, [canonicalModel, thinkingKey]);

const setThinkingLevel = useCallback(
(level: ThinkingLevel) => {
const model = getCanonicalModelForScope(scopeId, defaultModel);
const effective = enforceThinkingPolicy(model, level);
const effective = enforceThinkingPolicy(canonicalModel, level);

setThinkingLevelInternal(effective);

Expand All @@ -99,7 +101,7 @@ export const ThinkingProvider: React.FC<ThinkingProviderProps> = (props) => {
prev && typeof prev === "object" ? prev : {};
return {
...record,
[agentId]: { model, thinkingLevel: effective },
[agentId]: { model: canonicalModel, thinkingLevel: effective },
};
},
{}
Expand All @@ -115,13 +117,13 @@ export const ThinkingProvider: React.FC<ThinkingProviderProps> = (props) => {
.updateModeAISettings({
workspaceId: props.workspaceId,
mode: agentId,
aiSettings: { model, thinkingLevel: effective },
aiSettings: { model: canonicalModel, thinkingLevel: effective },
})
.catch(() => {
// Best-effort only. If offline or backend is old, the next sendMessage will persist.
});
},
[api, defaultModel, props.workspaceId, scopeId, setThinkingLevelInternal]
[api, canonicalModel, props.workspaceId, scopeId, setThinkingLevelInternal]
);

// Global keybind: cycle thinking level (Ctrl/Cmd+Shift+T).
Expand All @@ -133,10 +135,13 @@ export const ThinkingProvider: React.FC<ThinkingProviderProps> = (props) => {
return;
}

if (e.repeat) {
return;
}

e.preventDefault();

const model = getCanonicalModelForScope(scopeId, defaultModel);
const allowed = getThinkingPolicyForModel(model);
const allowed = getThinkingPolicyForModel(canonicalModel);
if (allowed.length <= 1) {
return;
}
Expand All @@ -148,17 +153,20 @@ export const ThinkingProvider: React.FC<ThinkingProviderProps> = (props) => {

window.addEventListener("keydown", handleKeyDown);
return () => window.removeEventListener("keydown", handleKeyDown);
}, [defaultModel, scopeId, thinkingLevel, setThinkingLevel]);
}, [canonicalModel, thinkingLevel, setThinkingLevel]);

// Memoize context value to prevent unnecessary re-renders of consumers.
const contextValue = useMemo(
() => ({ thinkingLevel, setThinkingLevel }),
[thinkingLevel, setThinkingLevel]
() => ({ model: canonicalModel, thinkingLevel, setThinkingLevel }),
[canonicalModel, thinkingLevel, setThinkingLevel]
);

return <ThinkingContext.Provider value={contextValue}>{props.children}</ThinkingContext.Provider>;
};

export const useThinkingModel = () => {
return useThinking().model;
};
export const useThinking = () => {
const context = useContext(ThinkingContext);
if (!context) {
Expand Down
27 changes: 27 additions & 0 deletions src/common/utils/thinking/policy.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,15 @@ describe("getThinkingPolicyForModel", () => {
]);
});

test("returns 5 levels including xhigh for gpt-5.2-codex preview suffix", () => {
expect(getThinkingPolicyForModel("openai:gpt-5.2-codex-2025-12-11-preview")).toEqual([
"off",
"low",
"medium",
"high",
"xhigh",
]);
});
test("returns 5 levels including xhigh for gpt-5.2-codex", () => {
expect(getThinkingPolicyForModel("openai:gpt-5.2-codex")).toEqual([
"off",
Expand Down Expand Up @@ -113,6 +122,24 @@ describe("getThinkingPolicyForModel", () => {
"xhigh",
]);
});

test("returns default levels for gpt-5.2-pro-mini", () => {
expect(getThinkingPolicyForModel("openai:gpt-5.2-pro-mini")).toEqual([
"off",
"low",
"medium",
"high",
]);
});

test("returns default levels for gpt-5.2-codex-mini", () => {
expect(getThinkingPolicyForModel("openai:gpt-5.2-codex-mini")).toEqual([
"off",
"low",
"medium",
"high",
]);
});
test("returns medium/high/xhigh for gpt-5.2-pro with version suffix", () => {
expect(getThinkingPolicyForModel("openai:gpt-5.2-pro-2025-12-11")).toEqual([
"medium",
Expand Down
9 changes: 6 additions & 3 deletions src/common/utils/thinking/policy.ts
Original file line number Diff line number Diff line change
Expand Up @@ -53,17 +53,20 @@ export function getThinkingPolicyForModel(modelString: string): ThinkingPolicy {
}

// GPT-5.2-Codex supports 5 reasoning levels including xhigh (Extra High)
if (/^gpt-5\.2-codex(?!-[a-z])/.test(withoutProviderNamespace)) {
// Allow version suffixes like -2025-12-11-preview, but exclude mini variants.
if (/^gpt-5\.2-codex(?!-mini\b)/.test(withoutProviderNamespace)) {
return ["off", "low", "medium", "high", "xhigh"];
}

// gpt-5.2-pro supports medium, high, xhigh reasoning levels
if (/^gpt-5\.2-pro(?!-[a-z])/.test(withoutProviderNamespace)) {
// Allow version suffixes like -2025-12-11-preview, but exclude mini variants.
if (/^gpt-5\.2-pro(?!-mini\b)/.test(withoutProviderNamespace)) {
return ["medium", "high", "xhigh"];
}

// gpt-5.2 supports 5 reasoning levels including xhigh (Extra High)
if (/^gpt-5\.2(?!-[a-z])/.test(withoutProviderNamespace)) {
// Allow version suffixes like -2025-12-11-preview, but exclude any mini variants.
if (/^gpt-5\.2(?!.*-mini\b)/.test(withoutProviderNamespace)) {
return ["off", "low", "medium", "high", "xhigh"];
}

Expand Down
Loading
Loading