Skip to content

Commit 2bf088d

Browse files
committed
fix(core): disable gpt-5 reasoning and raise output cap
1 parent a5284a5 commit 2bf088d

9 files changed

Lines changed: 74 additions & 15 deletions

File tree

Cargo.lock

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

crates/cli/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[package]
22
name = "goodcommit"
3-
version = "0.3.1"
3+
version = "0.3.2"
44
edition = "2021"
55
license = "MIT"
66
description = "Good Commit: fast, reliable AI commit messages"

crates/cli/src/setup.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ pub fn run_setup() -> Result<()> {
111111
config.one_line = Some(true);
112112
config.timeout_secs = Some(20);
113113
config.max_input_tokens = Some(6000);
114-
config.max_output_tokens = Some(200);
114+
config.max_output_tokens = Some(2048);
115115
config.stage_mode = Some(StageMode::Auto);
116116

117117
let toml = toml::to_string_pretty(&config).context("failed to serialize config")?;

crates/core/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[package]
22
name = "goodcommit-core"
3-
version = "0.3.1"
3+
version = "0.3.2"
44
edition = "2021"
55
license = "MIT"
66
description = "Core library for Good Commit"

crates/core/src/config.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ impl Config {
119119
push: Some(true),
120120
timeout_secs: Some(20),
121121
max_input_tokens: Some(6000),
122-
max_output_tokens: Some(200),
122+
max_output_tokens: Some(2048),
123123
max_file_bytes: Some(200_000),
124124
max_file_lines: Some(2_000),
125125
summary_concurrency: Some(4),
@@ -186,7 +186,7 @@ impl Config {
186186
push: self.push.unwrap_or(true),
187187
timeout_secs: self.timeout_secs.unwrap_or(20),
188188
max_input_tokens: self.max_input_tokens.unwrap_or(6000),
189-
max_output_tokens: self.max_output_tokens.unwrap_or(200),
189+
max_output_tokens: self.max_output_tokens.unwrap_or(2048),
190190
max_file_bytes: self.max_file_bytes.unwrap_or(200_000),
191191
max_file_lines: self.max_file_lines.unwrap_or(2_000),
192192
summary_concurrency: self.summary_concurrency.unwrap_or(4) as usize,

crates/core/src/pipeline.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ async fn summarize_then_commit(
217217
deadline: Instant,
218218
) -> CoreResult<String> {
219219
let max_file_tokens = std::cmp::min(config.max_input_tokens as usize, 2000);
220-
let summary_tokens = std::cmp::min(config.max_output_tokens, 120);
220+
let summary_tokens = config.max_output_tokens;
221221
let concurrency = std::cmp::max(config.summary_concurrency, 1);
222222

223223
let summary_results = stream::iter(diff_files.iter())

crates/core/src/providers/openai.rs

Lines changed: 61 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,12 @@ impl OpenAiProvider {
9898
user_prompt: &str,
9999
request: ProviderRequest,
100100
) -> CoreResult<String> {
101-
let base = self.responses_base_payload(system_prompt, user_prompt, Some(request.temperature));
101+
let temperature = if self.is_gpt5() {
102+
None
103+
} else {
104+
Some(request.temperature)
105+
};
106+
let base = self.responses_base_payload(system_prompt, user_prompt, temperature);
102107

103108
match self
104109
.complete_responses_with_fallbacks(&base, request.max_output_tokens)
@@ -123,11 +128,16 @@ impl OpenAiProvider {
123128
user_prompt: &str,
124129
request: ProviderRequest,
125130
) -> CoreResult<String> {
131+
let temperature = if self.is_gpt5() {
132+
None
133+
} else {
134+
Some(request.temperature)
135+
};
126136
let body = self.chat_payload(
127137
system_prompt,
128138
user_prompt,
129139
request.max_output_tokens,
130-
Some(request.temperature),
140+
temperature,
131141
);
132142

133143
let http_request = self
@@ -182,6 +192,16 @@ impl OpenAiProvider {
182192
});
183193

184194
if let Some(obj) = payload.as_object_mut() {
195+
if self.is_gpt5() {
196+
obj.insert(
197+
"reasoning".to_string(),
198+
serde_json::json!({ "effort": "none" }),
199+
);
200+
obj.insert(
201+
"text".to_string(),
202+
serde_json::json!({ "format": { "type": "text" } }),
203+
);
204+
}
185205
if let Some(value) = temperature {
186206
obj.insert("temperature".to_string(), serde_json::json!(value));
187207
}
@@ -236,6 +256,10 @@ impl OpenAiProvider {
236256
}
237257
}
238258

259+
fn is_gpt5(&self) -> bool {
260+
self.model.trim().to_lowercase().starts_with("gpt-5")
261+
}
262+
239263
async fn complete_responses_with_param(
240264
&self,
241265
base: &Value,
@@ -396,6 +420,41 @@ mod tests {
396420
assert!(payload.get("temperature").is_none());
397421
}
398422

423+
#[test]
424+
fn responses_payload_sets_reasoning_none_for_gpt5() {
425+
let provider = OpenAiProvider::new(
426+
"gpt-5-nano-2025-08-07".to_string(),
427+
"https://api.openai.com/v1".to_string(),
428+
OpenAiMode::Responses,
429+
5,
430+
Some("test-key".to_string()),
431+
)
432+
.expect("provider");
433+
434+
let payload = provider.responses_base_payload("system", "user", None);
435+
let effort = payload
436+
.get("reasoning")
437+
.and_then(|value| value.get("effort"))
438+
.and_then(|value| value.as_str());
439+
assert_eq!(effort, Some("none"));
440+
assert!(payload.get("text").is_some());
441+
}
442+
443+
#[test]
444+
fn responses_payload_skips_reasoning_for_non_gpt5() {
445+
let provider = OpenAiProvider::new(
446+
"gpt-4o-mini".to_string(),
447+
"https://api.openai.com/v1".to_string(),
448+
OpenAiMode::Responses,
449+
5,
450+
Some("test-key".to_string()),
451+
)
452+
.expect("provider");
453+
454+
let payload = provider.responses_base_payload("system", "user", None);
455+
assert!(payload.get("reasoning").is_none());
456+
}
457+
399458
#[test]
400459
fn chat_payload_omits_temperature_when_none() {
401460
let provider = OpenAiProvider::new(

homebrew/goodcommit.rb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
class Goodcommit < Formula
22
desc "Good Commit: fast, reliable AI commit messages"
33
homepage "https://github.com/Bikz/goodcommit"
4-
version "0.3.1"
4+
version "0.3.2"
55

66
on_macos do
77
if Hardware::CPU.arm?
8-
url "https://github.com/Bikz/goodcommit/releases/download/v0.3.1/goodcommit-aarch64-apple-darwin.tar.gz"
8+
url "https://github.com/Bikz/goodcommit/releases/download/v0.3.2/goodcommit-aarch64-apple-darwin.tar.gz"
99
sha256 "REPLACE_ME"
1010
else
11-
url "https://github.com/Bikz/goodcommit/releases/download/v0.3.1/goodcommit-x86_64-apple-darwin.tar.gz"
11+
url "https://github.com/Bikz/goodcommit/releases/download/v0.3.2/goodcommit-x86_64-apple-darwin.tar.gz"
1212
sha256 "REPLACE_ME"
1313
end
1414
end
@@ -18,7 +18,7 @@ class Goodcommit < Formula
1818
odie "linux arm64 builds are not yet available"
1919
end
2020

21-
url "https://github.com/Bikz/goodcommit/releases/download/v0.3.1/goodcommit-x86_64-unknown-linux-gnu.tar.gz"
21+
url "https://github.com/Bikz/goodcommit/releases/download/v0.3.2/goodcommit-x86_64-unknown-linux-gnu.tar.gz"
2222
sha256 "REPLACE_ME"
2323
end
2424

npm/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "goodcommit",
3-
"version": "0.3.1",
3+
"version": "0.3.2",
44
"description": "Good Commit: fast, reliable AI commit messages",
55
"license": "MIT",
66
"repository": {

0 commit comments

Comments
 (0)