From 2c48a5a9712abb87028eee44a7c657d8523f3859 Mon Sep 17 00:00:00 2001 From: Amit Singh Date: Fri, 27 Mar 2026 11:02:20 +0530 Subject: [PATCH 1/5] feat(forge_domain): add model-change tracking for reasoning --- crates/forge_app/src/orch.rs | 5 +- crates/forge_domain/src/context.rs | 159 +++++- .../src/transformer/reasoning_normalizer.rs | 526 ++++++++++++------ .../forge_domain/src/transformer/set_model.rs | 37 +- ...lizer__tests__model_changed_snapshot.snap} | 28 +- ...izer__tests__model_unchanged_snapshot.snap | 73 +++ ...st_assistant_message_has_no_reasoning.snap | 61 -- ...model_affects_both_user_and_assistant.snap | 36 ++ ...odel_does_not_affect_system_messages.snap} | 16 +- ...l__tests__set_model_for_user_messages.snap | 1 + 10 files changed, 675 insertions(+), 267 deletions(-) rename crates/forge_domain/src/transformer/snapshots/{forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_keeps_all_when_first_has_reasoning.snap => forge_domain__transformer__reasoning_normalizer__tests__model_changed_snapshot.snap} (61%) create mode 100644 crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__model_unchanged_snapshot.snap delete mode 100644 crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_removes_all_when_first_assistant_message_has_no_reasoning.snap create mode 100644 crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_affects_both_user_and_assistant.snap rename crates/forge_domain/src/transformer/snapshots/{forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_when_no_assistant_message_present.snap => forge_domain__transformer__set_model__tests__set_model_does_not_affect_system_messages.snap} (52%) diff --git a/crates/forge_app/src/orch.rs b/crates/forge_app/src/orch.rs index b189e95146..a377814c33 100644 --- a/crates/forge_app/src/orch.rs +++ b/crates/forge_app/src/orch.rs @@ -160,8 +160,11 @@ impl Orchestrator { .pipe(SortTools::new(self.agent.tool_order())) .pipe(TransformToolCalls::new().when(|_| !tool_supported)) .pipe(ImageHandling::new()) + // Drop ALL reasoning (including config) when reasoning is not supported by the model .pipe(DropReasoningDetails.when(|_| !reasoning_supported)) - .pipe(ReasoningNormalizer.when(|_| reasoning_supported)); + // Strip all reasoning from messages when the model has changed (signatures are + // model-specific and invalid across models). No-op when model is unchanged. + .pipe(ReasoningNormalizer::new(model_id.clone())); let response = self .services .chat_agent( diff --git a/crates/forge_domain/src/context.rs b/crates/forge_domain/src/context.rs index c2a5cb7867..0e552ee23a 100644 --- a/crates/forge_domain/src/context.rs +++ b/crates/forge_domain/src/context.rs @@ -557,8 +557,11 @@ impl Context { ) -> Self { // Convert flat reasoning string to reasoning_details if present let merged_reasoning_details = if let Some(reasoning_text) = reasoning { - let reasoning_entry = - ReasoningFull { text: Some(reasoning_text), ..Default::default() }; + let reasoning_entry = ReasoningFull { + text: Some(reasoning_text), + type_of: Some("reasoning.text".to_string()), + ..Default::default() + }; if let Some(mut existing_details) = reasoning_details { existing_details.push(reasoning_entry); Some(existing_details) @@ -688,6 +691,37 @@ impl Context { }) .sum() } + + /// Checks if the model has changed from the previous assistant message. + /// Returns true if the previous assistant message has a different model than + /// the provided current_model, or if there is no previous assistant message + /// with a model. + /// + /// This is used to determine whether to apply reasoning normalization - we only + /// want to strip reasoning when switching models, not when continuing with the + /// same model. + pub fn has_model_changed(&self, current_model: &ModelId) -> bool { + // Find the last assistant message with a model field + let last_assistant_model = self + .messages + .iter() + .rev() + .find_map(|msg| { + if let ContextMessage::Text(text_msg) = &**msg { + if text_msg.has_role(Role::Assistant) { + return text_msg.model.as_ref(); + } + } + None + }); + + // If there's no previous assistant model, consider it as changed + // If there is a previous model, check if it differs from current + match last_assistant_model { + None => true, + Some(prev_model) => prev_model != current_model, + } + } } #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] @@ -1494,4 +1528,125 @@ mod tests { let expected = 5; // 18 chars / 4 = 5 tokens (rounded up) assert_eq!(actual, expected); } + + #[test] + fn test_has_model_changed_returns_true_when_no_previous_messages() { + let fixture = Context::default(); + let current_model = ModelId::new("gpt-4"); + + let actual = fixture.has_model_changed(¤t_model); + let expected = true; + + assert_eq!(actual, expected); + } + + #[test] + fn test_has_model_changed_returns_true_when_model_differs() { + let fixture = Context::default().add_message( + TextMessage::new(Role::Assistant, "Hello").model(ModelId::new("gpt-3.5")), + ); + let current_model = ModelId::new("gpt-4"); + + let actual = fixture.has_model_changed(¤t_model); + let expected = true; + + assert_eq!(actual, expected); + } + + #[test] + fn test_has_model_changed_returns_false_when_model_same() { + let fixture = Context::default().add_message( + TextMessage::new(Role::Assistant, "Hello").model(ModelId::new("gpt-4")), + ); + let current_model = ModelId::new("gpt-4"); + + let actual = fixture.has_model_changed(¤t_model); + let expected = false; + + assert_eq!(actual, expected); + } + + #[test] + fn test_has_model_changed_returns_true_when_previous_has_no_model() { + let fixture = + Context::default().add_message(TextMessage::new(Role::Assistant, "Hello")); // No model set + let current_model = ModelId::new("gpt-4"); + + let actual = fixture.has_model_changed(¤t_model); + let expected = true; + + assert_eq!(actual, expected); + } + + #[test] + fn test_has_model_changed_checks_last_assistant_message_with_model() { + let fixture = Context::default() + .add_message( + TextMessage::new(Role::Assistant, "First").model(ModelId::new("gpt-3.5")), + ) + .add_message(TextMessage::new(Role::User, "Question")) + .add_message( + TextMessage::new(Role::Assistant, "Second").model(ModelId::new("gpt-4")), + ); + let current_model = ModelId::new("gpt-4"); + + let actual = fixture.has_model_changed(¤t_model); + let expected = false; // Last assistant message with model is "gpt-4", same as current + + assert_eq!(actual, expected); + } + + #[test] + fn test_has_model_changed_with_multiple_messages_model_changed() { + let fixture = Context::default() + .add_message( + TextMessage::new(Role::Assistant, "First").model(ModelId::new("gpt-3.5")), + ) + .add_message(TextMessage::new(Role::User, "Question")) + .add_message( + TextMessage::new(Role::Assistant, "Second").model(ModelId::new("claude-3")), + ); + let current_model = ModelId::new("gpt-4"); + + let actual = fixture.has_model_changed(¤t_model); + let expected = true; // Last assistant message with model is "claude-3", different from "gpt-4" + + assert_eq!(actual, expected); + } + + #[test] + fn test_has_model_changed_ignores_user_messages() { + // User messages have model tracking too, but we should only check assistant messages + let fixture = Context::default() + .add_message( + TextMessage::new(Role::Assistant, "Response").model(ModelId::new("gpt-4")), + ) + .add_message(TextMessage::new(Role::User, "Question").model(ModelId::new("claude-3"))); + let current_model = ModelId::new("gpt-4"); + + let actual = fixture.has_model_changed(¤t_model); + let expected = false; // Last ASSISTANT message is "gpt-4", user message should be ignored + + assert_eq!(actual, expected); + } + + #[test] + fn test_has_model_changed_continuing_same_model() { + // Scenario: model1 -> model2 -> model2 (the second model2 should not drop reasoning) + let fixture = Context::default() + .add_message( + TextMessage::new(Role::Assistant, "First").model(ModelId::new("model1")), + ) + .add_message(TextMessage::new(Role::User, "Question")) + .add_message( + TextMessage::new(Role::Assistant, "Second").model(ModelId::new("model2")), + ) + .add_message(TextMessage::new(Role::User, "Another question")); + let current_model = ModelId::new("model2"); + + let actual = fixture.has_model_changed(¤t_model); + let expected = false; // Last assistant used "model2", same as current + + assert_eq!(actual, expected); + } } diff --git a/crates/forge_domain/src/transformer/reasoning_normalizer.rs b/crates/forge_domain/src/transformer/reasoning_normalizer.rs index 66c6befb0f..b1a66deb9f 100644 --- a/crates/forge_domain/src/transformer/reasoning_normalizer.rs +++ b/crates/forge_domain/src/transformer/reasoning_normalizer.rs @@ -1,61 +1,77 @@ -use crate::{Context, Transformer}; +use crate::{Context, ModelId, Transformer}; -/// A transformer that normalizes reasoning details across assistant messages. +/// A transformer that preserves reasoning only for the contiguous tail of +/// assistant messages that were produced by the current model, stripping +/// reasoning from everything before the first model mismatch (going backwards). /// -/// Per Claude's extended thinking docs, thinking blocks from previous turns are -/// stripped to save context space, but the LAST assistant turn's thinking must -/// be preserved for reasoning continuity (especially during tool use). +/// # Behaviour /// -/// This transformer checks if the last assistant message has reasoning details. -/// If it does, reasoning is preserved only on the last assistant message. -/// If it doesn't, all reasoning details are removed from all assistant -/// messages. -#[derive(Default)] -pub struct ReasoningNormalizer; +/// Walk backwards through the assistant messages. As long as each message's +/// model matches `model_id`, its reasoning is kept. The moment a message with +/// a different model is encountered that index becomes the *cutoff*: reasoning +/// is stripped from that message and every assistant message before it, +/// regardless of which model produced them. +/// +/// For example, given the assistant-message sequence `[1 1 1 1 2 1 3 1 2 2 2]` +/// with current model `2`: +/// - Tail `[2 2 2]` is preserved. +/// - Everything at or before the `1` that precedes the tail is stripped, +/// including the earlier `2` that appears before the model break. +/// +/// When every assistant message was produced by the current model the +/// transformer is a no-op. When there are no assistant messages it is also a +/// no-op. +/// +/// NOTE: `context.reasoning` (the config) is never removed so the new request +/// can still enable reasoning on the current turn. +pub struct ReasoningNormalizer { + model_id: ModelId, +} + +impl ReasoningNormalizer { + /// Creates a normalizer for the given current model. + pub fn new(model_id: ModelId) -> Self { + Self { model_id } + } +} impl Transformer for ReasoningNormalizer { type Value = Context; fn transform(&mut self, mut context: Self::Value) -> Self::Value { - // Find the index of the last assistant message - let last_assistant_idx = context + // Walk backwards to find the last assistant message (forward index) whose + // model differs from the current one. That is the cutoff: everything at + // or before it has reasoning stripped; the same-model tail after it is + // kept intact. + let cutoff = context .messages .iter() .enumerate() .rev() - .find(|(_, message)| message.has_role(crate::Role::Assistant)) - .map(|(idx, _)| idx); - - // Check if the last assistant message has reasoning - let last_assistant_has_reasoning = last_assistant_idx.and_then(|idx| { - context - .messages - .get(idx) - .map(|message| message.has_reasoning_details()) - }); - - // Apply the normalization rule - if last_assistant_has_reasoning == Some(false) || last_assistant_has_reasoning.is_none() { - // Remove reasoning details from all assistant messages - // NOTE: We do NOT set context.reasoning = None here, as that would - // disable reasoning for subsequent turns. The reasoning config should - // persist across turns even when stripping reasoning blocks. - for message in context.messages.iter_mut() { - if message.has_role(crate::Role::Assistant) - && let crate::ContextMessage::Text(text_msg) = &mut **message - { - text_msg.reasoning_details = None; + .find_map(|(idx, msg)| { + if msg.has_role(crate::Role::Assistant) { + if let crate::ContextMessage::Text(text) = &**msg { + if text.model.as_ref() != Some(&self.model_id) { + return Some(idx); + } + } } + None + }); + + let Some(cutoff) = cutoff else { + return context; // all assistant messages match — nothing to strip + }; + + for (idx, message) in context.messages.iter_mut().enumerate() { + if idx > cutoff { + break; } - } else { - // Last assistant has reasoning - strip from all previous assistant messages - for (idx, message) in context.messages.iter_mut().enumerate() { - if message.has_role(crate::Role::Assistant) - && Some(idx) != last_assistant_idx - && let crate::ContextMessage::Text(text_msg) = &mut **message - { - text_msg.reasoning_details = None; - } + if message.has_role(crate::Role::Assistant) + && let crate::ContextMessage::Text(text_msg) = &mut **message + { + text_msg.reasoning_details = None; + text_msg.thought_signature = None; } } @@ -84,178 +100,334 @@ mod tests { } } - fn create_context_first_assistant_has_reasoning() -> Context { - let reasoning_details = vec![ReasoningFull { + fn model_a() -> ModelId { + ModelId::from("model-a") + } + + fn model_b() -> ModelId { + ModelId::from("model-b") + } + + fn model_c() -> ModelId { + ModelId::from("model-c") + } + + fn reasoning_details() -> Vec { + vec![ReasoningFull { text: Some("I need to think about this carefully".to_string()), - signature: None, + signature: Some("sig_model_a".to_string()), ..Default::default() - }]; + }] + } + + /// Shorthand for an assistant `ContextMessage` with model and reasoning set. + fn assistant_msg(model: ModelId, content: &str) -> ContextMessage { + ContextMessage::Text( + TextMessage::new(Role::Assistant, content) + .model(model) + .reasoning_details(reasoning_details()), + ) + } + /// Builds a context where the last assistant message was produced by `prev_model`. + fn fixture_with_prev_model(prev_model: ModelId) -> Context { Context::default() .reasoning(ReasoningConfig::default().enabled(true)) - .add_message(ContextMessage::user("User question", None)) - .add_message(ContextMessage::Text( - TextMessage::new(Role::Assistant, "First assistant response with reasoning") - .reasoning_details(reasoning_details.clone()), - )) + .add_message(ContextMessage::user("First question", None)) + .add_message(assistant_msg(prev_model.clone(), "First assistant response")) .add_message(ContextMessage::user("Follow-up question", None)) - .add_message(ContextMessage::Text( - TextMessage::new(Role::Assistant, "Second assistant response with reasoning") - .reasoning_details(reasoning_details.clone()), - )) - .add_message(ContextMessage::Text(TextMessage::new( - Role::Assistant, - "Third assistant without reasoning", - ))) + .add_message(assistant_msg(prev_model, "Second assistant response")) } - fn create_context_first_assistant_no_reasoning() -> Context { - let reasoning_details = vec![ReasoningFull { - text: Some("Complex reasoning process".to_string()), - signature: None, - ..Default::default() - }]; + #[test] + fn test_no_op_when_model_unchanged() { + // When the current model matches the last assistant message's model, + // the transformer must not touch any reasoning details. + let fixture = fixture_with_prev_model(model_a()); + let mut transformer = ReasoningNormalizer::new(model_a()); + let actual = transformer.transform(fixture.clone()); - Context::default() - .reasoning(ReasoningConfig::default().enabled(true)) - .add_message(ContextMessage::user("User message", None)) - .add_message(ContextMessage::Text(TextMessage::new( - Role::Assistant, - "First assistant without reasoning", - ))) - .add_message(ContextMessage::Text( - TextMessage::new(Role::Assistant, "Second assistant with reasoning") - .reasoning_details(reasoning_details.clone()), - )) - .add_message(ContextMessage::Text( - TextMessage::new(Role::Assistant, "Third assistant with reasoning") - .reasoning_details(reasoning_details), - )) + assert_eq!(actual, fixture, "Context should be unchanged when model is the same"); } #[test] - fn test_reasoning_normalizer_keeps_all_when_first_has_reasoning() { - let fixture = create_context_first_assistant_has_reasoning(); - let mut transformer = ReasoningNormalizer; - let actual = transformer.transform(fixture.clone()); + fn test_strips_all_reasoning_when_model_changed() { + // When the model changes, ALL reasoning must be stripped (including the + // last assistant message) because signatures from the old model are + // invalid for the new model. + let fixture = fixture_with_prev_model(model_a()); + let mut transformer = ReasoningNormalizer::new(model_b()); + let actual = transformer.transform(fixture); - // All reasoning details should be preserved since first assistant has reasoning - let snapshot = - TransformationSnapshot::new("ReasoningNormalizer_first_has_reasoning", fixture, actual); - assert_yaml_snapshot!(snapshot); + for message in &actual.messages { + if message.has_role(Role::Assistant) { + if let crate::ContextMessage::Text(text) = &**message { + assert_eq!( + text.reasoning_details, None, + "All assistant reasoning must be stripped on model change" + ); + } + } + } } #[test] - fn test_reasoning_normalizer_removes_all_when_first_assistant_message_has_no_reasoning() { - let context = create_context_first_assistant_no_reasoning(); - let mut transformer = ReasoningNormalizer; - let actual = transformer.transform(context.clone()); - - // All reasoning details should be removed since first assistant has no - // reasoning - let snapshot = - TransformationSnapshot::new("ReasoningNormalizer_first_no_reasoning", context, actual); - assert_yaml_snapshot!(snapshot); + fn test_reasoning_config_preserved_when_model_changed() { + // Stripping reasoning blocks must not disable the reasoning config, + // so the new model can still reason on the current turn. + let fixture = fixture_with_prev_model(model_a()); + let mut transformer = ReasoningNormalizer::new(model_b()); + let actual = transformer.transform(fixture); + + assert!( + actual.reasoning.is_some(), + "Reasoning config must be preserved so new model can still reason" + ); + assert_eq!(actual.reasoning.as_ref().unwrap().enabled, Some(true)); } #[test] - fn test_reasoning_normalizer_when_no_assistant_message_present() { - let context = Context::default() + fn test_no_op_when_no_previous_assistant_message() { + // No previous assistant message means no previous model to compare + // against — treat as unchanged (no-op). + let fixture = Context::default() .reasoning(ReasoningConfig::default().enabled(true)) .add_message(ContextMessage::system("System message")) .add_message(ContextMessage::user("User message", None)); - let mut transformer = ReasoningNormalizer; - let actual = transformer.transform(context.clone()); - - // All reasoning details should be removed since first assistant has no - // reasoning - let snapshot = TransformationSnapshot::new( - "ReasoningNormalizer_first_no_assistant_message_present", - context, - actual, - ); - assert_yaml_snapshot!(snapshot); + + let mut transformer = ReasoningNormalizer::new(model_b()); + let actual = transformer.transform(fixture.clone()); + + assert_eq!(actual, fixture, "Context should be unchanged when there is no previous assistant"); } + // --- Back-and-forth model change tests --- + #[test] - fn test_reasoning_normalizer_preserves_last_assistant_after_compaction() { - // Simulates the scenario after compaction where: - // 1. Compactor preserved reasoning from the last compacted message - // 2. Injected it into the first assistant after compaction - // 3. There are multiple assistant messages in the context - // Expected: Only the LAST assistant should keep its reasoning - let preserved_reasoning = vec![ReasoningFull { - text: Some("Preserved reasoning from compaction".to_string()), - signature: Some("sig_preserved".to_string()), - ..Default::default() - }]; + fn test_a_to_b_strips_reasoning() { + // A → B: switching from model_a to model_b must strip all reasoning. + let fixture = Context::default() + .reasoning(ReasoningConfig::default().enabled(true)) + .add_message(ContextMessage::user("q1", None)) + .add_message(assistant_msg(model_a(), "a1")) + .add_message(ContextMessage::user("q2", None)) + .add_message(assistant_msg(model_a(), "a2")); // last assistant is model_a - let other_reasoning = vec![ReasoningFull { - text: Some("Old reasoning from previous turn".to_string()), - signature: Some("sig_old".to_string()), - ..Default::default() - }]; + let actual = ReasoningNormalizer::new(model_b()).transform(fixture); + assert!(all_reasoning_stripped(&actual)); + } + + #[test] + fn test_a_to_b_back_to_a_strips_reasoning() { + // A → B → A: switching back to model_a after model_b must still strip, + // because the last assistant message carries model_b signatures. let fixture = Context::default() .reasoning(ReasoningConfig::default().enabled(true)) - .add_message(ContextMessage::user("Summary after compaction", None)) - .add_message(ContextMessage::Text( - TextMessage::new(Role::Assistant, "First assistant (with injected reasoning)") - .reasoning_details(other_reasoning.clone()), - )) - .add_message(ContextMessage::user("User question", None)) - .add_message(ContextMessage::Text( - TextMessage::new(Role::Assistant, "Last assistant (current turn)") - .reasoning_details(preserved_reasoning.clone()), - )); - - // Execute - let mut transformer = ReasoningNormalizer; - let actual = transformer.transform(fixture.clone()); + .add_message(ContextMessage::user("q1", None)) + .add_message(assistant_msg(model_a(), "a1")) + .add_message(ContextMessage::user("q2", None)) + .add_message(assistant_msg(model_b(), "a2")) // last assistant is model_b + .add_message(ContextMessage::user("q3", None)); - // Verify: Only the last assistant should have reasoning - let last_assistant = actual - .messages - .iter() - .rev() - .find(|msg| msg.has_role(Role::Assistant)) - .expect("Should have last assistant"); + let actual = ReasoningNormalizer::new(model_a()).transform(fixture); + + assert!(all_reasoning_stripped(&actual)); + } + + #[test] + fn test_a_to_b_stay_on_b_strips_a_keeps_b() { + // A → B (stay on B): the model_a message before the switch loses its + // reasoning (it's before the cutoff); the model_b tail is preserved. + let fixture = Context::default() + .reasoning(ReasoningConfig::default().enabled(true)) + .add_message(ContextMessage::user("q1", None)) + .add_message(assistant_msg(model_a(), "a1")) + .add_message(ContextMessage::user("q2", None)) + .add_message(assistant_msg(model_b(), "a2")); // last assistant is model_b + + let actual = ReasoningNormalizer::new(model_b()).transform(fixture); - if let crate::ContextMessage::Text(text) = &**last_assistant { + // a1 (model_a) is before the cutoff → stripped + let msgs: Vec<_> = actual.messages.iter().collect(); + if let crate::ContextMessage::Text(a1) = &**msgs[1] { + assert_eq!(a1.reasoning_details, None, "a1 (model_a) should be stripped"); + } + // a2 (model_b) is in the same-model tail → preserved + if let crate::ContextMessage::Text(a2) = &**msgs[3] { assert_eq!( - text.reasoning_details, - Some(preserved_reasoning), - "Last assistant should preserve its reasoning" + a2.reasoning_details, + Some(reasoning_details()), + "a2 (model_b) should be preserved" ); - } else { - panic!("Expected Text message"); } + } - // Verify: First assistant reasoning should be stripped - let first_assistant = actual - .messages - .iter() - .find(|msg| msg.has_role(Role::Assistant)) - .expect("Should have first assistant"); + #[test] + fn test_a_to_b_to_c_strips_reasoning() { + // A → B → C: every model switch must strip; here B→C triggers the strip. + let fixture = Context::default() + .reasoning(ReasoningConfig::default().enabled(true)) + .add_message(ContextMessage::user("q1", None)) + .add_message(assistant_msg(model_a(), "a1")) + .add_message(ContextMessage::user("q2", None)) + .add_message(assistant_msg(model_b(), "a2")); // last assistant is model_b + + let actual = ReasoningNormalizer::new(model_c()).transform(fixture); + + assert!(all_reasoning_stripped(&actual)); + } + + #[test] + fn test_alternating_a_b_a_b_strips_reasoning() { + // A → B → A → B: after the full alternation the last assistant is model_a; + // switching to model_b must strip all reasoning. + let fixture = Context::default() + .reasoning(ReasoningConfig::default().enabled(true)) + .add_message(ContextMessage::user("q1", None)) + .add_message(assistant_msg(model_a(), "a1")) + .add_message(ContextMessage::user("q2", None)) + .add_message(assistant_msg(model_b(), "a2")) + .add_message(ContextMessage::user("q3", None)) + .add_message(assistant_msg(model_a(), "a3")); // last assistant is model_a + + let actual = ReasoningNormalizer::new(model_b()).transform(fixture); - if let crate::ContextMessage::Text(text) = &**first_assistant { + assert!(all_reasoning_stripped(&actual)); + } + + #[test] + fn test_alternating_a_b_a_stay_a_strips_ab_keeps_last_a() { + // A → B → A (stay on A): the cutoff is at b2 (the first mismatch going + // backwards), so a1 and b2 lose reasoning; only a3 (the same-model tail) + // is preserved. + let fixture = Context::default() + .reasoning(ReasoningConfig::default().enabled(true)) + .add_message(ContextMessage::user("q1", None)) + .add_message(assistant_msg(model_a(), "a1")) + .add_message(ContextMessage::user("q2", None)) + .add_message(assistant_msg(model_b(), "b2")) + .add_message(ContextMessage::user("q3", None)) + .add_message(assistant_msg(model_a(), "a3")); // last assistant is model_a + + let actual = ReasoningNormalizer::new(model_a()).transform(fixture); + + let msgs: Vec<_> = actual.messages.iter().collect(); + // a1 (model_a, before cutoff) → stripped + if let crate::ContextMessage::Text(a1) = &**msgs[1] { + assert_eq!(a1.reasoning_details, None, "a1 should be stripped (before cutoff)"); + } + // b2 (model_b, the cutoff itself) → stripped + if let crate::ContextMessage::Text(b2) = &**msgs[3] { + assert_eq!(b2.reasoning_details, None, "b2 should be stripped (is the cutoff)"); + } + // a3 (model_a, same-model tail) → preserved + if let crate::ContextMessage::Text(a3) = &**msgs[5] { assert_eq!( - text.reasoning_details, None, - "First assistant reasoning should be stripped" + a3.reasoning_details, + Some(reasoning_details()), + "a3 should be preserved (same-model tail)" ); - } else { - panic!("Expected Text message"); } + } - // Verify: Global reasoning config is PRESERVED (not set to None) - assert!( - actual.reasoning.is_some(), - "Reasoning config should be preserved for subsequent turns" - ); - assert_eq!( - actual.reasoning.as_ref().unwrap().enabled, - Some(true), - "Reasoning should remain enabled for subsequent turns" - ); + /// Returns `true` when every assistant message in `ctx` has no reasoning details. + fn all_reasoning_stripped(ctx: &Context) -> bool { + ctx.messages.iter().all(|msg| { + if msg.has_role(Role::Assistant) { + if let crate::ContextMessage::Text(text) = &**msg { + return text.reasoning_details.is_none(); + } + } + true + }) + } + + #[test] + fn test_mixed_sequence_preserves_only_same_model_tail() { + // Sequence: a a a a b a c a b b b (current = b) + // ↑↑↑ preserved tail + // ↑↑↑↑↑↑↑↑↑↑↑ stripped (everything before the tail break) + // The earlier `b` in the middle is also stripped because it is before + // the cutoff — only the contiguous tail from the end matters. + let fixture = Context::default() + .reasoning(ReasoningConfig::default().enabled(true)) + .add_message(ContextMessage::user("q1", None)) + .add_message(assistant_msg(model_a(), "a1")) + .add_message(ContextMessage::user("q2", None)) + .add_message(assistant_msg(model_a(), "a2")) + .add_message(ContextMessage::user("q3", None)) + .add_message(assistant_msg(model_a(), "a3")) + .add_message(ContextMessage::user("q4", None)) + .add_message(assistant_msg(model_a(), "a4")) + .add_message(ContextMessage::user("q5", None)) + .add_message(assistant_msg(model_b(), "b5")) // earlier b — must be stripped + .add_message(ContextMessage::user("q6", None)) + .add_message(assistant_msg(model_a(), "a6")) + .add_message(ContextMessage::user("q7", None)) + .add_message(assistant_msg(model_c(), "c7")) + .add_message(ContextMessage::user("q8", None)) + .add_message(assistant_msg(model_a(), "a8")) + .add_message(ContextMessage::user("q9", None)) + .add_message(assistant_msg(model_b(), "b9")) // tail start + .add_message(ContextMessage::user("q10", None)) + .add_message(assistant_msg(model_b(), "b10")) // tail + .add_message(ContextMessage::user("q11", None)) + .add_message(assistant_msg(model_b(), "b11")); // tail end (last) + + let actual = ReasoningNormalizer::new(model_b()).transform(fixture); + + let assistant_msgs: Vec<_> = actual + .messages + .iter() + .filter(|m| m.has_role(Role::Assistant)) + .collect(); + + // Tail (last 3): b9, b10, b11 → reasoning preserved + for tail_msg in &assistant_msgs[assistant_msgs.len() - 3..] { + if let crate::ContextMessage::Text(t) = &***tail_msg { + assert_eq!( + t.reasoning_details, + Some(reasoning_details()), + "tail model_b message should preserve reasoning: {}", + t.content + ); + } + } + + // Everything before the tail (a1..a8, b5, c7) → reasoning stripped + for pre_msg in &assistant_msgs[..assistant_msgs.len() - 3] { + if let crate::ContextMessage::Text(t) = &***pre_msg { + assert_eq!( + t.reasoning_details, + None, + "pre-tail message should have reasoning stripped: {}", + t.content + ); + } + } + + // Reasoning config must still be enabled + assert_eq!(actual.reasoning.as_ref().unwrap().enabled, Some(true)); + } + + #[test] + fn test_model_changed_snapshot() { + let fixture = fixture_with_prev_model(model_a()); + let mut transformer = ReasoningNormalizer::new(model_b()); + let actual = transformer.transform(fixture.clone()); + + let snapshot = TransformationSnapshot::new("ReasoningNormalizer_model_changed", fixture, actual); + assert_yaml_snapshot!(snapshot); + } + + #[test] + fn test_model_unchanged_snapshot() { + let fixture = fixture_with_prev_model(model_a()); + let mut transformer = ReasoningNormalizer::new(model_a()); + let actual = transformer.transform(fixture.clone()); + + let snapshot = TransformationSnapshot::new("ReasoningNormalizer_model_unchanged", fixture, actual); + assert_yaml_snapshot!(snapshot); } } diff --git a/crates/forge_domain/src/transformer/set_model.rs b/crates/forge_domain/src/transformer/set_model.rs index 29fe64c9da..b10abef5a6 100644 --- a/crates/forge_domain/src/transformer/set_model.rs +++ b/crates/forge_domain/src/transformer/set_model.rs @@ -1,7 +1,8 @@ use super::Transformer; use crate::{Context, ModelId}; -/// Transformer that sets the model for all user messages in the context +/// Transformer that sets the model for all user and assistant messages in the +/// context pub struct SetModel { pub model: ModelId, } @@ -16,10 +17,10 @@ impl Transformer for SetModel { type Value = Context; fn transform(&mut self, mut value: Self::Value) -> Self::Value { - // Set the model for all user messages that don't already have a model set + // Set the model for all user and assistant messages that don't already have a model set for message in value.messages.iter_mut() { if let crate::ContextMessage::Text(text_msg) = &mut **message - && text_msg.role == crate::Role::User + && (text_msg.role == crate::Role::User || text_msg.role == crate::Role::Assistant) && text_msg.model.is_none() { text_msg.model = Some(self.model.clone()); @@ -100,7 +101,7 @@ mod tests { } #[test] - fn test_set_model_only_affects_user_messages() { + fn test_set_model_does_not_affect_system_messages() { let fixture = Context::default() .add_message(ContextMessage::Text(TextMessage::new( Role::System, @@ -115,7 +116,33 @@ mod tests { let mut transformer = SetModel::new(ModelId::new("gpt-4")); let actual = transformer.transform(fixture.clone()); - let snapshot = TransformationSnapshot::new("SetModel(gpt-4)_user_only", fixture, actual); + let snapshot = + TransformationSnapshot::new("SetModel(gpt-4)_excludes_system", fixture, actual); + assert_yaml_snapshot!(snapshot); + } + + #[test] + fn test_set_model_affects_both_user_and_assistant() { + let fixture = Context::default() + .add_message(ContextMessage::user("User message", None)) + .add_message(ContextMessage::Text(TextMessage::new( + Role::Assistant, + "Assistant message", + ))) + .add_message(ContextMessage::Text(TextMessage::new( + Role::System, + "System message", + ))) + .add_message(ContextMessage::user("Another user message", None)); + + let mut transformer = SetModel::new(ModelId::new("gpt-4")); + let actual = transformer.transform(fixture.clone()); + + let snapshot = TransformationSnapshot::new( + "SetModel(gpt-4)_user_and_assistant", + fixture, + actual, + ); assert_yaml_snapshot!(snapshot); } } diff --git a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_keeps_all_when_first_has_reasoning.snap b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__model_changed_snapshot.snap similarity index 61% rename from crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_keeps_all_when_first_has_reasoning.snap rename to crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__model_changed_snapshot.snap index ebe2a760cf..cbb38ba9ed 100644 --- a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_keeps_all_when_first_has_reasoning.snap +++ b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__model_changed_snapshot.snap @@ -2,18 +2,19 @@ source: crates/forge_domain/src/transformer/reasoning_normalizer.rs expression: snapshot --- -transformation: ReasoningNormalizer_first_has_reasoning +transformation: ReasoningNormalizer_model_changed before: messages: - text: role: User - content: User question + content: First question - text: role: Assistant - content: First assistant response with reasoning + content: First assistant response + model: model-a reasoning_details: - text: I need to think about this carefully - signature: ~ + signature: sig_model_a data: ~ id: ~ format: ~ @@ -24,36 +25,33 @@ before: content: Follow-up question - text: role: Assistant - content: Second assistant response with reasoning + content: Second assistant response + model: model-a reasoning_details: - text: I need to think about this carefully - signature: ~ + signature: sig_model_a data: ~ id: ~ format: ~ index: ~ type_of: ~ - - text: - role: Assistant - content: Third assistant without reasoning reasoning: enabled: true after: messages: - text: role: User - content: User question + content: First question - text: role: Assistant - content: First assistant response with reasoning + content: First assistant response + model: model-a - text: role: User content: Follow-up question - text: role: Assistant - content: Second assistant response with reasoning - - text: - role: Assistant - content: Third assistant without reasoning + content: Second assistant response + model: model-a reasoning: enabled: true diff --git a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__model_unchanged_snapshot.snap b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__model_unchanged_snapshot.snap new file mode 100644 index 0000000000..9350ccc2ae --- /dev/null +++ b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__model_unchanged_snapshot.snap @@ -0,0 +1,73 @@ +--- +source: crates/forge_domain/src/transformer/reasoning_normalizer.rs +expression: snapshot +--- +transformation: ReasoningNormalizer_model_unchanged +before: + messages: + - text: + role: User + content: First question + - text: + role: Assistant + content: First assistant response + model: model-a + reasoning_details: + - text: I need to think about this carefully + signature: sig_model_a + data: ~ + id: ~ + format: ~ + index: ~ + type_of: ~ + - text: + role: User + content: Follow-up question + - text: + role: Assistant + content: Second assistant response + model: model-a + reasoning_details: + - text: I need to think about this carefully + signature: sig_model_a + data: ~ + id: ~ + format: ~ + index: ~ + type_of: ~ + reasoning: + enabled: true +after: + messages: + - text: + role: User + content: First question + - text: + role: Assistant + content: First assistant response + model: model-a + reasoning_details: + - text: I need to think about this carefully + signature: sig_model_a + data: ~ + id: ~ + format: ~ + index: ~ + type_of: ~ + - text: + role: User + content: Follow-up question + - text: + role: Assistant + content: Second assistant response + model: model-a + reasoning_details: + - text: I need to think about this carefully + signature: sig_model_a + data: ~ + id: ~ + format: ~ + index: ~ + type_of: ~ + reasoning: + enabled: true diff --git a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_removes_all_when_first_assistant_message_has_no_reasoning.snap b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_removes_all_when_first_assistant_message_has_no_reasoning.snap deleted file mode 100644 index 3fd25178ff..0000000000 --- a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_removes_all_when_first_assistant_message_has_no_reasoning.snap +++ /dev/null @@ -1,61 +0,0 @@ ---- -source: crates/forge_domain/src/transformer/reasoning_normalizer.rs -expression: snapshot ---- -transformation: ReasoningNormalizer_first_no_reasoning -before: - messages: - - text: - role: User - content: User message - - text: - role: Assistant - content: First assistant without reasoning - - text: - role: Assistant - content: Second assistant with reasoning - reasoning_details: - - text: Complex reasoning process - signature: ~ - data: ~ - id: ~ - format: ~ - index: ~ - type_of: ~ - - text: - role: Assistant - content: Third assistant with reasoning - reasoning_details: - - text: Complex reasoning process - signature: ~ - data: ~ - id: ~ - format: ~ - index: ~ - type_of: ~ - reasoning: - enabled: true -after: - messages: - - text: - role: User - content: User message - - text: - role: Assistant - content: First assistant without reasoning - - text: - role: Assistant - content: Second assistant with reasoning - - text: - role: Assistant - content: Third assistant with reasoning - reasoning_details: - - text: Complex reasoning process - signature: ~ - data: ~ - id: ~ - format: ~ - index: ~ - type_of: ~ - reasoning: - enabled: true diff --git a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_affects_both_user_and_assistant.snap b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_affects_both_user_and_assistant.snap new file mode 100644 index 0000000000..c75f602cb1 --- /dev/null +++ b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_affects_both_user_and_assistant.snap @@ -0,0 +1,36 @@ +--- +source: crates/forge_domain/src/transformer/set_model.rs +expression: snapshot +--- +transformation: SetModel(gpt-4)_user_and_assistant +before: + messages: + - text: + role: User + content: User message + - text: + role: Assistant + content: Assistant message + - text: + role: System + content: System message + - text: + role: User + content: Another user message +after: + messages: + - text: + role: User + content: User message + model: gpt-4 + - text: + role: Assistant + content: Assistant message + model: gpt-4 + - text: + role: System + content: System message + - text: + role: User + content: Another user message + model: gpt-4 diff --git a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_when_no_assistant_message_present.snap b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_does_not_affect_system_messages.snap similarity index 52% rename from crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_when_no_assistant_message_present.snap rename to crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_does_not_affect_system_messages.snap index ca8eec7585..ce0fefb8c5 100644 --- a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__reasoning_normalizer__tests__reasoning_normalizer_when_no_assistant_message_present.snap +++ b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_does_not_affect_system_messages.snap @@ -1,25 +1,29 @@ --- -source: crates/forge_domain/src/transformer/reasoning_normalizer.rs +source: crates/forge_domain/src/transformer/set_model.rs expression: snapshot --- -transformation: ReasoningNormalizer_first_no_assistant_message_present +transformation: SetModel(gpt-4)_excludes_system before: messages: - text: role: System content: System message + - text: + role: Assistant + content: Assistant message - text: role: User content: User message - reasoning: - enabled: true after: messages: - text: role: System content: System message + - text: + role: Assistant + content: Assistant message + model: gpt-4 - text: role: User content: User message - reasoning: - enabled: true + model: gpt-4 diff --git a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_for_user_messages.snap b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_for_user_messages.snap index 8a268fb502..b8507035cf 100644 --- a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_for_user_messages.snap +++ b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_for_user_messages.snap @@ -29,6 +29,7 @@ after: - text: role: Assistant content: Assistant response + model: gpt-4 - text: role: User content: User message 2 From 00ff6ceb9cc512bdde97715079116b5f5e0c5717 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Fri, 27 Mar 2026 06:45:00 +0000 Subject: [PATCH 2/5] [autofix.ci] apply automated fixes --- crates/forge_domain/src/context.rs | 72 +++++++------------ .../src/transformer/reasoning_normalizer.rs | 68 +++++++++++------- .../forge_domain/src/transformer/set_model.rs | 10 ++- 3 files changed, 74 insertions(+), 76 deletions(-) diff --git a/crates/forge_domain/src/context.rs b/crates/forge_domain/src/context.rs index 0e552ee23a..1acdbbd490 100644 --- a/crates/forge_domain/src/context.rs +++ b/crates/forge_domain/src/context.rs @@ -693,27 +693,22 @@ impl Context { } /// Checks if the model has changed from the previous assistant message. - /// Returns true if the previous assistant message has a different model than - /// the provided current_model, or if there is no previous assistant message - /// with a model. + /// Returns true if the previous assistant message has a different model + /// than the provided current_model, or if there is no previous + /// assistant message with a model. /// - /// This is used to determine whether to apply reasoning normalization - we only - /// want to strip reasoning when switching models, not when continuing with the - /// same model. + /// This is used to determine whether to apply reasoning normalization - we + /// only want to strip reasoning when switching models, not when + /// continuing with the same model. pub fn has_model_changed(&self, current_model: &ModelId) -> bool { // Find the last assistant message with a model field - let last_assistant_model = self - .messages - .iter() - .rev() - .find_map(|msg| { - if let ContextMessage::Text(text_msg) = &**msg { - if text_msg.has_role(Role::Assistant) { - return text_msg.model.as_ref(); - } + let last_assistant_model = self.messages.iter().rev().find_map(|msg| { + if let ContextMessage::Text(text_msg) = &**msg + && text_msg.has_role(Role::Assistant) { + return text_msg.model.as_ref(); } - None - }); + None + }); // If there's no previous assistant model, consider it as changed // If there is a previous model, check if it differs from current @@ -1542,9 +1537,8 @@ mod tests { #[test] fn test_has_model_changed_returns_true_when_model_differs() { - let fixture = Context::default().add_message( - TextMessage::new(Role::Assistant, "Hello").model(ModelId::new("gpt-3.5")), - ); + let fixture = Context::default() + .add_message(TextMessage::new(Role::Assistant, "Hello").model(ModelId::new("gpt-3.5"))); let current_model = ModelId::new("gpt-4"); let actual = fixture.has_model_changed(¤t_model); @@ -1555,9 +1549,8 @@ mod tests { #[test] fn test_has_model_changed_returns_false_when_model_same() { - let fixture = Context::default().add_message( - TextMessage::new(Role::Assistant, "Hello").model(ModelId::new("gpt-4")), - ); + let fixture = Context::default() + .add_message(TextMessage::new(Role::Assistant, "Hello").model(ModelId::new("gpt-4"))); let current_model = ModelId::new("gpt-4"); let actual = fixture.has_model_changed(¤t_model); @@ -1568,8 +1561,7 @@ mod tests { #[test] fn test_has_model_changed_returns_true_when_previous_has_no_model() { - let fixture = - Context::default().add_message(TextMessage::new(Role::Assistant, "Hello")); // No model set + let fixture = Context::default().add_message(TextMessage::new(Role::Assistant, "Hello")); // No model set let current_model = ModelId::new("gpt-4"); let actual = fixture.has_model_changed(¤t_model); @@ -1581,13 +1573,9 @@ mod tests { #[test] fn test_has_model_changed_checks_last_assistant_message_with_model() { let fixture = Context::default() - .add_message( - TextMessage::new(Role::Assistant, "First").model(ModelId::new("gpt-3.5")), - ) + .add_message(TextMessage::new(Role::Assistant, "First").model(ModelId::new("gpt-3.5"))) .add_message(TextMessage::new(Role::User, "Question")) - .add_message( - TextMessage::new(Role::Assistant, "Second").model(ModelId::new("gpt-4")), - ); + .add_message(TextMessage::new(Role::Assistant, "Second").model(ModelId::new("gpt-4"))); let current_model = ModelId::new("gpt-4"); let actual = fixture.has_model_changed(¤t_model); @@ -1599,9 +1587,7 @@ mod tests { #[test] fn test_has_model_changed_with_multiple_messages_model_changed() { let fixture = Context::default() - .add_message( - TextMessage::new(Role::Assistant, "First").model(ModelId::new("gpt-3.5")), - ) + .add_message(TextMessage::new(Role::Assistant, "First").model(ModelId::new("gpt-3.5"))) .add_message(TextMessage::new(Role::User, "Question")) .add_message( TextMessage::new(Role::Assistant, "Second").model(ModelId::new("claude-3")), @@ -1616,11 +1602,10 @@ mod tests { #[test] fn test_has_model_changed_ignores_user_messages() { - // User messages have model tracking too, but we should only check assistant messages + // User messages have model tracking too, but we should only check assistant + // messages let fixture = Context::default() - .add_message( - TextMessage::new(Role::Assistant, "Response").model(ModelId::new("gpt-4")), - ) + .add_message(TextMessage::new(Role::Assistant, "Response").model(ModelId::new("gpt-4"))) .add_message(TextMessage::new(Role::User, "Question").model(ModelId::new("claude-3"))); let current_model = ModelId::new("gpt-4"); @@ -1632,15 +1617,12 @@ mod tests { #[test] fn test_has_model_changed_continuing_same_model() { - // Scenario: model1 -> model2 -> model2 (the second model2 should not drop reasoning) + // Scenario: model1 -> model2 -> model2 (the second model2 should not drop + // reasoning) let fixture = Context::default() - .add_message( - TextMessage::new(Role::Assistant, "First").model(ModelId::new("model1")), - ) + .add_message(TextMessage::new(Role::Assistant, "First").model(ModelId::new("model1"))) .add_message(TextMessage::new(Role::User, "Question")) - .add_message( - TextMessage::new(Role::Assistant, "Second").model(ModelId::new("model2")), - ) + .add_message(TextMessage::new(Role::Assistant, "Second").model(ModelId::new("model2"))) .add_message(TextMessage::new(Role::User, "Another question")); let current_model = ModelId::new("model2"); diff --git a/crates/forge_domain/src/transformer/reasoning_normalizer.rs b/crates/forge_domain/src/transformer/reasoning_normalizer.rs index b1a66deb9f..ffbfaae3aa 100644 --- a/crates/forge_domain/src/transformer/reasoning_normalizer.rs +++ b/crates/forge_domain/src/transformer/reasoning_normalizer.rs @@ -49,13 +49,11 @@ impl Transformer for ReasoningNormalizer { .enumerate() .rev() .find_map(|(idx, msg)| { - if msg.has_role(crate::Role::Assistant) { - if let crate::ContextMessage::Text(text) = &**msg { - if text.model.as_ref() != Some(&self.model_id) { + if msg.has_role(crate::Role::Assistant) + && let crate::ContextMessage::Text(text) = &**msg + && text.model.as_ref() != Some(&self.model_id) { return Some(idx); } - } - } None }); @@ -120,7 +118,8 @@ mod tests { }] } - /// Shorthand for an assistant `ContextMessage` with model and reasoning set. + /// Shorthand for an assistant `ContextMessage` with model and reasoning + /// set. fn assistant_msg(model: ModelId, content: &str) -> ContextMessage { ContextMessage::Text( TextMessage::new(Role::Assistant, content) @@ -129,12 +128,16 @@ mod tests { ) } - /// Builds a context where the last assistant message was produced by `prev_model`. + /// Builds a context where the last assistant message was produced by + /// `prev_model`. fn fixture_with_prev_model(prev_model: ModelId) -> Context { Context::default() .reasoning(ReasoningConfig::default().enabled(true)) .add_message(ContextMessage::user("First question", None)) - .add_message(assistant_msg(prev_model.clone(), "First assistant response")) + .add_message(assistant_msg( + prev_model.clone(), + "First assistant response", + )) .add_message(ContextMessage::user("Follow-up question", None)) .add_message(assistant_msg(prev_model, "Second assistant response")) } @@ -147,7 +150,10 @@ mod tests { let mut transformer = ReasoningNormalizer::new(model_a()); let actual = transformer.transform(fixture.clone()); - assert_eq!(actual, fixture, "Context should be unchanged when model is the same"); + assert_eq!( + actual, fixture, + "Context should be unchanged when model is the same" + ); } #[test] @@ -160,14 +166,13 @@ mod tests { let actual = transformer.transform(fixture); for message in &actual.messages { - if message.has_role(Role::Assistant) { - if let crate::ContextMessage::Text(text) = &**message { + if message.has_role(Role::Assistant) + && let crate::ContextMessage::Text(text) = &**message { assert_eq!( text.reasoning_details, None, "All assistant reasoning must be stripped on model change" ); } - } } } @@ -198,7 +203,10 @@ mod tests { let mut transformer = ReasoningNormalizer::new(model_b()); let actual = transformer.transform(fixture.clone()); - assert_eq!(actual, fixture, "Context should be unchanged when there is no previous assistant"); + assert_eq!( + actual, fixture, + "Context should be unchanged when there is no previous assistant" + ); } // --- Back-and-forth model change tests --- @@ -251,7 +259,10 @@ mod tests { // a1 (model_a) is before the cutoff → stripped let msgs: Vec<_> = actual.messages.iter().collect(); if let crate::ContextMessage::Text(a1) = &**msgs[1] { - assert_eq!(a1.reasoning_details, None, "a1 (model_a) should be stripped"); + assert_eq!( + a1.reasoning_details, None, + "a1 (model_a) should be stripped" + ); } // a2 (model_b) is in the same-model tail → preserved if let crate::ContextMessage::Text(a2) = &**msgs[3] { @@ -315,11 +326,17 @@ mod tests { let msgs: Vec<_> = actual.messages.iter().collect(); // a1 (model_a, before cutoff) → stripped if let crate::ContextMessage::Text(a1) = &**msgs[1] { - assert_eq!(a1.reasoning_details, None, "a1 should be stripped (before cutoff)"); + assert_eq!( + a1.reasoning_details, None, + "a1 should be stripped (before cutoff)" + ); } // b2 (model_b, the cutoff itself) → stripped if let crate::ContextMessage::Text(b2) = &**msgs[3] { - assert_eq!(b2.reasoning_details, None, "b2 should be stripped (is the cutoff)"); + assert_eq!( + b2.reasoning_details, None, + "b2 should be stripped (is the cutoff)" + ); } // a3 (model_a, same-model tail) → preserved if let crate::ContextMessage::Text(a3) = &**msgs[5] { @@ -331,14 +348,14 @@ mod tests { } } - /// Returns `true` when every assistant message in `ctx` has no reasoning details. + /// Returns `true` when every assistant message in `ctx` has no reasoning + /// details. fn all_reasoning_stripped(ctx: &Context) -> bool { ctx.messages.iter().all(|msg| { - if msg.has_role(Role::Assistant) { - if let crate::ContextMessage::Text(text) = &**msg { + if msg.has_role(Role::Assistant) + && let crate::ContextMessage::Text(text) = &**msg { return text.reasoning_details.is_none(); } - } true }) } @@ -369,7 +386,7 @@ mod tests { .add_message(ContextMessage::user("q8", None)) .add_message(assistant_msg(model_a(), "a8")) .add_message(ContextMessage::user("q9", None)) - .add_message(assistant_msg(model_b(), "b9")) // tail start + .add_message(assistant_msg(model_b(), "b9")) // tail start .add_message(ContextMessage::user("q10", None)) .add_message(assistant_msg(model_b(), "b10")) // tail .add_message(ContextMessage::user("q11", None)) @@ -399,8 +416,7 @@ mod tests { for pre_msg in &assistant_msgs[..assistant_msgs.len() - 3] { if let crate::ContextMessage::Text(t) = &***pre_msg { assert_eq!( - t.reasoning_details, - None, + t.reasoning_details, None, "pre-tail message should have reasoning stripped: {}", t.content ); @@ -417,7 +433,8 @@ mod tests { let mut transformer = ReasoningNormalizer::new(model_b()); let actual = transformer.transform(fixture.clone()); - let snapshot = TransformationSnapshot::new("ReasoningNormalizer_model_changed", fixture, actual); + let snapshot = + TransformationSnapshot::new("ReasoningNormalizer_model_changed", fixture, actual); assert_yaml_snapshot!(snapshot); } @@ -427,7 +444,8 @@ mod tests { let mut transformer = ReasoningNormalizer::new(model_a()); let actual = transformer.transform(fixture.clone()); - let snapshot = TransformationSnapshot::new("ReasoningNormalizer_model_unchanged", fixture, actual); + let snapshot = + TransformationSnapshot::new("ReasoningNormalizer_model_unchanged", fixture, actual); assert_yaml_snapshot!(snapshot); } } diff --git a/crates/forge_domain/src/transformer/set_model.rs b/crates/forge_domain/src/transformer/set_model.rs index b10abef5a6..0df86b09eb 100644 --- a/crates/forge_domain/src/transformer/set_model.rs +++ b/crates/forge_domain/src/transformer/set_model.rs @@ -17,7 +17,8 @@ impl Transformer for SetModel { type Value = Context; fn transform(&mut self, mut value: Self::Value) -> Self::Value { - // Set the model for all user and assistant messages that don't already have a model set + // Set the model for all user and assistant messages that don't already have a + // model set for message in value.messages.iter_mut() { if let crate::ContextMessage::Text(text_msg) = &mut **message && (text_msg.role == crate::Role::User || text_msg.role == crate::Role::Assistant) @@ -138,11 +139,8 @@ mod tests { let mut transformer = SetModel::new(ModelId::new("gpt-4")); let actual = transformer.transform(fixture.clone()); - let snapshot = TransformationSnapshot::new( - "SetModel(gpt-4)_user_and_assistant", - fixture, - actual, - ); + let snapshot = + TransformationSnapshot::new("SetModel(gpt-4)_user_and_assistant", fixture, actual); assert_yaml_snapshot!(snapshot); } } From d562b002f633764ff2b18b9f8f04b93a2cde71ee Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Fri, 27 Mar 2026 06:46:48 +0000 Subject: [PATCH 3/5] [autofix.ci] apply automated fixes (attempt 2/3) --- crates/forge_domain/src/context.rs | 7 ++--- .../src/transformer/reasoning_normalizer.rs | 27 ++++++++++--------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/crates/forge_domain/src/context.rs b/crates/forge_domain/src/context.rs index 1acdbbd490..8aca162601 100644 --- a/crates/forge_domain/src/context.rs +++ b/crates/forge_domain/src/context.rs @@ -704,9 +704,10 @@ impl Context { // Find the last assistant message with a model field let last_assistant_model = self.messages.iter().rev().find_map(|msg| { if let ContextMessage::Text(text_msg) = &**msg - && text_msg.has_role(Role::Assistant) { - return text_msg.model.as_ref(); - } + && text_msg.has_role(Role::Assistant) + { + return text_msg.model.as_ref(); + } None }); diff --git a/crates/forge_domain/src/transformer/reasoning_normalizer.rs b/crates/forge_domain/src/transformer/reasoning_normalizer.rs index ffbfaae3aa..f29bc8ed1b 100644 --- a/crates/forge_domain/src/transformer/reasoning_normalizer.rs +++ b/crates/forge_domain/src/transformer/reasoning_normalizer.rs @@ -51,9 +51,10 @@ impl Transformer for ReasoningNormalizer { .find_map(|(idx, msg)| { if msg.has_role(crate::Role::Assistant) && let crate::ContextMessage::Text(text) = &**msg - && text.model.as_ref() != Some(&self.model_id) { - return Some(idx); - } + && text.model.as_ref() != Some(&self.model_id) + { + return Some(idx); + } None }); @@ -167,12 +168,13 @@ mod tests { for message in &actual.messages { if message.has_role(Role::Assistant) - && let crate::ContextMessage::Text(text) = &**message { - assert_eq!( - text.reasoning_details, None, - "All assistant reasoning must be stripped on model change" - ); - } + && let crate::ContextMessage::Text(text) = &**message + { + assert_eq!( + text.reasoning_details, None, + "All assistant reasoning must be stripped on model change" + ); + } } } @@ -353,9 +355,10 @@ mod tests { fn all_reasoning_stripped(ctx: &Context) -> bool { ctx.messages.iter().all(|msg| { if msg.has_role(Role::Assistant) - && let crate::ContextMessage::Text(text) = &**msg { - return text.reasoning_details.is_none(); - } + && let crate::ContextMessage::Text(text) = &**msg + { + return text.reasoning_details.is_none(); + } true }) } From bb9342936190cee32c364c311351fa9c227609c5 Mon Sep 17 00:00:00 2001 From: Amit Singh Date: Fri, 27 Mar 2026 12:58:12 +0530 Subject: [PATCH 4/5] fix(set_model): remove redundant role check Remove the role check from SetModel transformer since ContextMessage::Text already only contains user and assistant messages. Co-Authored-By: ForgeCode --- crates/forge_domain/src/transformer/set_model.rs | 15 +++++---------- ...sts__set_model_affects_all_text_messages.snap} | 3 ++- ...set_model_affects_both_user_and_assistant.snap | 1 + ...model__tests__set_model_for_user_messages.snap | 1 + 4 files changed, 9 insertions(+), 11 deletions(-) rename crates/forge_domain/src/transformer/snapshots/{forge_domain__transformer__set_model__tests__set_model_does_not_affect_system_messages.snap => forge_domain__transformer__set_model__tests__set_model_affects_all_text_messages.snap} (90%) diff --git a/crates/forge_domain/src/transformer/set_model.rs b/crates/forge_domain/src/transformer/set_model.rs index 0df86b09eb..17742ea3da 100644 --- a/crates/forge_domain/src/transformer/set_model.rs +++ b/crates/forge_domain/src/transformer/set_model.rs @@ -1,8 +1,7 @@ use super::Transformer; use crate::{Context, ModelId}; -/// Transformer that sets the model for all user and assistant messages in the -/// context +/// Transformer that sets the model for all text messages in the context pub struct SetModel { pub model: ModelId, } @@ -17,12 +16,9 @@ impl Transformer for SetModel { type Value = Context; fn transform(&mut self, mut value: Self::Value) -> Self::Value { - // Set the model for all user and assistant messages that don't already have a - // model set + // Set the model for all text messages that don't already have a model set for message in value.messages.iter_mut() { - if let crate::ContextMessage::Text(text_msg) = &mut **message - && (text_msg.role == crate::Role::User || text_msg.role == crate::Role::Assistant) - && text_msg.model.is_none() + if let crate::ContextMessage::Text(text_msg) = &mut **message && text_msg.model.is_none() { text_msg.model = Some(self.model.clone()); } @@ -102,7 +98,7 @@ mod tests { } #[test] - fn test_set_model_does_not_affect_system_messages() { + fn test_set_model_affects_all_text_messages() { let fixture = Context::default() .add_message(ContextMessage::Text(TextMessage::new( Role::System, @@ -117,8 +113,7 @@ mod tests { let mut transformer = SetModel::new(ModelId::new("gpt-4")); let actual = transformer.transform(fixture.clone()); - let snapshot = - TransformationSnapshot::new("SetModel(gpt-4)_excludes_system", fixture, actual); + let snapshot = TransformationSnapshot::new("SetModel(gpt-4)_all_text", fixture, actual); assert_yaml_snapshot!(snapshot); } diff --git a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_does_not_affect_system_messages.snap b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_affects_all_text_messages.snap similarity index 90% rename from crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_does_not_affect_system_messages.snap rename to crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_affects_all_text_messages.snap index ce0fefb8c5..8a4694e6d1 100644 --- a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_does_not_affect_system_messages.snap +++ b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_affects_all_text_messages.snap @@ -2,7 +2,7 @@ source: crates/forge_domain/src/transformer/set_model.rs expression: snapshot --- -transformation: SetModel(gpt-4)_excludes_system +transformation: SetModel(gpt-4)_all_text before: messages: - text: @@ -19,6 +19,7 @@ after: - text: role: System content: System message + model: gpt-4 - text: role: Assistant content: Assistant message diff --git a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_affects_both_user_and_assistant.snap b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_affects_both_user_and_assistant.snap index c75f602cb1..70fa8c7da3 100644 --- a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_affects_both_user_and_assistant.snap +++ b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_affects_both_user_and_assistant.snap @@ -30,6 +30,7 @@ after: - text: role: System content: System message + model: gpt-4 - text: role: User content: Another user message diff --git a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_for_user_messages.snap b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_for_user_messages.snap index b8507035cf..1a26efa2d5 100644 --- a/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_for_user_messages.snap +++ b/crates/forge_domain/src/transformer/snapshots/forge_domain__transformer__set_model__tests__set_model_for_user_messages.snap @@ -22,6 +22,7 @@ after: - text: role: System content: System message + model: gpt-4 - text: role: User content: User message 1 From debfdc14c5bd7ec101b1369ac4048f0e063314a7 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Fri, 27 Mar 2026 07:30:05 +0000 Subject: [PATCH 5/5] [autofix.ci] apply automated fixes --- crates/forge_domain/src/transformer/set_model.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/forge_domain/src/transformer/set_model.rs b/crates/forge_domain/src/transformer/set_model.rs index 17742ea3da..8785ed742a 100644 --- a/crates/forge_domain/src/transformer/set_model.rs +++ b/crates/forge_domain/src/transformer/set_model.rs @@ -18,7 +18,8 @@ impl Transformer for SetModel { fn transform(&mut self, mut value: Self::Value) -> Self::Value { // Set the model for all text messages that don't already have a model set for message in value.messages.iter_mut() { - if let crate::ContextMessage::Text(text_msg) = &mut **message && text_msg.model.is_none() + if let crate::ContextMessage::Text(text_msg) = &mut **message + && text_msg.model.is_none() { text_msg.model = Some(self.model.clone()); }