diff --git a/src/lib/config/services/git.test.ts b/src/lib/config/services/git.test.ts index 4415674..768f5fb 100644 --- a/src/lib/config/services/git.test.ts +++ b/src/lib/config/services/git.test.ts @@ -119,6 +119,120 @@ describe('loadGitConfig', () => { expect(config.mode).toBe('stdout') expect(config.defaultBranch).toBe('test') }) + + it('preserves default service fields when gitconfig only sets a few keys', () => { + // Regression: previously the loader REPLACED the service object + // entirely, so a gitconfig that only specified serviceProvider + + // serviceApiKey would wipe the default tokenLimit, temperature, + // maxConcurrent, etc. — leaving a hollow service shape that + // failed downstream because all the LLM-call defaults were gone. + const sparseGitConfig = ` +[coco] + serviceProvider = openai + serviceModel = gpt-4o + serviceApiKey = test-api-key +` + const defaultsOpenai: Partial = { + service: getDefaultServiceConfigFromAlias('openai'), + mode: 'stdout', + defaultBranch: 'test', + } + mockFs.existsSync.mockReturnValue(true) + mockFs.readFileSync.mockReturnValue(sparseGitConfig) + + const config = loadGitConfig(defaultsOpenai as Config) + const service = config.service as OpenAILLMService + + // gitconfig override applied + expect(service.model).toBe('gpt-4o') + if (service.authentication.type === 'APIKey') { + expect(service.authentication.credentials.apiKey).toBe('test-api-key') + } + // Defaults preserved — the previous bug wiped these + expect(service.tokenLimit).toBeDefined() + expect(service.temperature).toBeDefined() + expect(service.maxConcurrent).toBeDefined() + }) + + it('does not attach requestOptions when neither sub-field is set', () => { + // Regression: previously the loader built `requestOptions: { + // timeout: Number(undefined), maxRetries: Number(undefined) }` → + // `{ timeout: NaN, maxRetries: NaN }` → JSON-serializes to + // `{ timeout: null, maxRetries: null }` → schema validation + // rejected since timeout/maxRetries must be number. + const noRequestOptions = ` +[coco] + serviceProvider = openai + serviceModel = gpt-4.1-nano + serviceApiKey = sk-test +` + const defaultsOpenai: Partial = { + service: getDefaultServiceConfigFromAlias('openai'), + mode: 'stdout', + defaultBranch: 'test', + } + mockFs.existsSync.mockReturnValue(true) + mockFs.readFileSync.mockReturnValue(noRequestOptions) + + const config = loadGitConfig(defaultsOpenai as Config) + const service = config.service as OpenAILLMService + + // No spurious requestOptions object created from undefined keys. + expect(service.requestOptions).toBeUndefined() + }) + + it('attaches only the requestOptions sub-fields that gitconfig actually sets', () => { + // Partial set: only timeout, no maxRetries. Result should have + // timeout populated and maxRetries unset (not NaN). + const partialRequestOptions = ` +[coco] + serviceProvider = openai + serviceModel = gpt-4o + serviceApiKey = sk-test + serviceRequestOptionsTimeout = 15000 +` + const defaultsOpenai: Partial = { + service: getDefaultServiceConfigFromAlias('openai'), + mode: 'stdout', + defaultBranch: 'test', + } + mockFs.existsSync.mockReturnValue(true) + mockFs.readFileSync.mockReturnValue(partialRequestOptions) + + const config = loadGitConfig(defaultsOpenai as Config) + const service = config.service as OpenAILLMService + + expect(service.requestOptions?.timeout).toBe(15000) + expect(service.requestOptions?.maxRetries).toBeUndefined() + }) + + it('does not attach provider-irrelevant fields (endpoint on openai, baseURL on ollama)', () => { + // Regression: the loader unconditionally attached both endpoint + // AND baseURL regardless of provider. Each provider's schema + // variant has additionalProperties:false, so the irrelevant one + // (endpoint for openai, baseURL for ollama) would fail + // validation in its non-applicable anyOf branch. + const openaiGitConfig = ` +[coco] + serviceProvider = openai + serviceModel = gpt-4o + serviceApiKey = sk-test +` + mockFs.existsSync.mockReturnValue(true) + mockFs.readFileSync.mockReturnValue(openaiGitConfig) + + const defaultsOpenai: Partial = { + service: getDefaultServiceConfigFromAlias('openai'), + mode: 'stdout', + defaultBranch: 'test', + } + const config = loadGitConfig(defaultsOpenai as Config) + const service = config.service as OpenAILLMService + + // 'endpoint' belongs to OllamaLLMService — must not appear on an + // openai service. + expect((service as unknown as { endpoint?: string }).endpoint).toBeUndefined() + }) }) describe('appendToGitConfig', () => { diff --git a/src/lib/config/services/git.ts b/src/lib/config/services/git.ts index 1f72d3e..01ba63b 100644 --- a/src/lib/config/services/git.ts +++ b/src/lib/config/services/git.ts @@ -40,43 +40,87 @@ export function loadGitConfig( if (gitConfigParsed.coco) { foundPath = gitConfigPath - service = { - provider: gitConfigParsed.coco?.serviceProvider, - model: gitConfigParsed.coco?.serviceModel, - tokenLimit: gitConfigParsed.coco?.serviceTokenLimit - ? Number(gitConfigParsed.coco.serviceTokenLimit) - : undefined, - temperature: gitConfigParsed.coco?.serviceTemperature - ? Number(gitConfigParsed.coco.serviceTemperature) - : undefined, - maxConcurrent: gitConfigParsed.coco?.serviceMaxConcurrent - ? Number(gitConfigParsed.coco.serviceMaxConcurrent) - : undefined, - minTokensForSummary: gitConfigParsed.coco?.serviceMinTokensForSummary - ? Number(gitConfigParsed.coco.serviceMinTokensForSummary) - : undefined, - maxFileTokens: gitConfigParsed.coco?.serviceMaxFileTokens - ? Number(gitConfigParsed.coco.serviceMaxFileTokens) - : undefined, - maxParsingAttempts: gitConfigParsed.coco?.serviceMaxParsingAttempts - ? Number(gitConfigParsed.coco.serviceMaxParsingAttempts) - : undefined, - baseURL: gitConfigParsed.coco?.serviceBaseURL, - authentication: { + // Build OVERRIDES from gitconfig — only include keys the user + // actually set. Then merge on top of the existing service + // (default or earlier layer) so unset fields keep their + // defaults instead of getting wiped. + // + // The previous behavior had three latent bugs: + // 1. Replaced the whole service object — wiped default + // tokenLimit / temperature / maxConcurrent / etc. + // 2. Constructed `requestOptions` unconditionally with + // `Number(undefined) === NaN` for unset sub-fields → + // JSON-serializes to `null` → schema rejects. + // 3. Attached `endpoint` and `baseURL` unconditionally → the + // per-provider schema variants (`additionalProperties: + // false` on each anyOf branch) reject the irrelevant one. + const coco = gitConfigParsed.coco + const numberOrUndefined = (raw: unknown): number | undefined => { + if (raw === undefined || raw === null || raw === '') return undefined + const n = Number(raw) + return Number.isFinite(n) ? n : undefined + } + const requestOptionsOverrides: { timeout?: number; maxRetries?: number } = {} + const timeout = numberOrUndefined(coco.serviceRequestOptionsTimeout) + const maxRetries = numberOrUndefined(coco.serviceRequestOptionsMaxRetries) + if (timeout !== undefined) requestOptionsOverrides.timeout = timeout + if (maxRetries !== undefined) requestOptionsOverrides.maxRetries = maxRetries + + const overrides: Record = {} + if (coco.serviceProvider) overrides.provider = coco.serviceProvider + if (coco.serviceModel) overrides.model = coco.serviceModel + const tokenLimit = numberOrUndefined(coco.serviceTokenLimit) + if (tokenLimit !== undefined) overrides.tokenLimit = tokenLimit + const temperature = numberOrUndefined(coco.serviceTemperature) + if (temperature !== undefined) overrides.temperature = temperature + const maxConcurrent = numberOrUndefined(coco.serviceMaxConcurrent) + if (maxConcurrent !== undefined) overrides.maxConcurrent = maxConcurrent + const minTokensForSummary = numberOrUndefined(coco.serviceMinTokensForSummary) + if (minTokensForSummary !== undefined) overrides.minTokensForSummary = minTokensForSummary + const maxFileTokens = numberOrUndefined(coco.serviceMaxFileTokens) + if (maxFileTokens !== undefined) overrides.maxFileTokens = maxFileTokens + const maxParsingAttempts = numberOrUndefined(coco.serviceMaxParsingAttempts) + if (maxParsingAttempts !== undefined) overrides.maxParsingAttempts = maxParsingAttempts + + // Provider-specific keys only attach when relevant to the + // chosen (or pre-existing) provider — keeps the merged service + // shape consistent with whichever schema variant should match. + const effectiveProvider = (coco.serviceProvider || service?.provider) as + | 'openai' | 'ollama' | 'anthropic' | undefined + if (effectiveProvider === 'openai' && coco.serviceBaseURL) { + overrides.baseURL = coco.serviceBaseURL + } + if (effectiveProvider === 'ollama' && coco.serviceEndpoint) { + overrides.endpoint = coco.serviceEndpoint + } + if (coco.serviceFields) { + try { + overrides.fields = JSON.parse(coco.serviceFields) + } catch { + // Malformed JSON in serviceFields — skip rather than throw. + // The loader's job isn't to validate user input here, just + // to surface what's parseable. The schema validator runs + // later and will catch a missing required field if any. + } + } + // requestOptions only gets attached when at least one sub-field + // was actually set. Empty `{}` would still serialize cleanly + // but stays out of the merged shape for readability. + if (Object.keys(requestOptionsOverrides).length > 0) { + overrides.requestOptions = requestOptionsOverrides + } + // Authentication: only when an apiKey was provided. Default + // service ships with an empty-credential placeholder for users + // who'll set the key via env var; gitconfig's apiKey should + // override that path, not the structure itself. + if (coco.serviceApiKey) { + overrides.authentication = { type: 'APIKey', - credentials: { - apiKey: gitConfigParsed.coco?.serviceApiKey, - }, - }, - requestOptions: { - timeout: Number(gitConfigParsed.coco?.serviceRequestOptionsTimeout), - maxRetries: Number(gitConfigParsed.coco?.serviceRequestOptionsMaxRetries), - }, - endpoint: gitConfigParsed.coco?.serviceEndpoint, - fields: gitConfigParsed.coco?.serviceFields - ? JSON.parse(gitConfigParsed.coco?.serviceFields) - : undefined, - } as LLMService + credentials: { apiKey: coco.serviceApiKey }, + } + } + + service = { ...(service || {}), ...overrides } as LLMService } config = {