Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion crates/forge_app/src/orch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,11 @@ impl<S: AgentService> Orchestrator<S> {
.pipe(SortTools::new(self.agent.tool_order()))
.pipe(TransformToolCalls::new().when(|_| !tool_supported))
.pipe(ImageHandling::new())
// Drop ALL reasoning (including config) when reasoning is not supported by the model
.pipe(DropReasoningDetails.when(|_| !reasoning_supported))
.pipe(ReasoningNormalizer.when(|_| reasoning_supported));
// Strip all reasoning from messages when the model has changed (signatures are
// model-specific and invalid across models). No-op when model is unchanged.
.pipe(ReasoningNormalizer::new(model_id.clone()));
let response = self
.services
.chat_agent(
Expand Down
142 changes: 140 additions & 2 deletions crates/forge_domain/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -557,8 +557,11 @@ impl Context {
) -> Self {
// Convert flat reasoning string to reasoning_details if present
let merged_reasoning_details = if let Some(reasoning_text) = reasoning {
let reasoning_entry =
ReasoningFull { text: Some(reasoning_text), ..Default::default() };
let reasoning_entry = ReasoningFull {
text: Some(reasoning_text),
type_of: Some("reasoning.text".to_string()),
..Default::default()
};
if let Some(mut existing_details) = reasoning_details {
existing_details.push(reasoning_entry);
Some(existing_details)
Expand Down Expand Up @@ -688,6 +691,33 @@ impl Context {
})
.sum()
}

/// Checks if the model has changed from the previous assistant message.
/// Returns true if the previous assistant message has a different model
/// than the provided current_model, or if there is no previous
/// assistant message with a model.
///
/// This is used to determine whether to apply reasoning normalization - we
/// only want to strip reasoning when switching models, not when
/// continuing with the same model.
pub fn has_model_changed(&self, current_model: &ModelId) -> bool {
// Find the last assistant message with a model field
let last_assistant_model = self.messages.iter().rev().find_map(|msg| {
if let ContextMessage::Text(text_msg) = &**msg
&& text_msg.has_role(Role::Assistant)
{
return text_msg.model.as_ref();
}
None
});

// If there's no previous assistant model, consider it as changed
// If there is a previous model, check if it differs from current
match last_assistant_model {
None => true,
Some(prev_model) => prev_model != current_model,
}
}
}

#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
Expand Down Expand Up @@ -1494,4 +1524,112 @@ mod tests {
let expected = 5; // 18 chars / 4 = 5 tokens (rounded up)
assert_eq!(actual, expected);
}

#[test]
fn test_has_model_changed_returns_true_when_no_previous_messages() {
let fixture = Context::default();
let current_model = ModelId::new("gpt-4");

let actual = fixture.has_model_changed(&current_model);
let expected = true;

assert_eq!(actual, expected);
}

#[test]
fn test_has_model_changed_returns_true_when_model_differs() {
let fixture = Context::default()
.add_message(TextMessage::new(Role::Assistant, "Hello").model(ModelId::new("gpt-3.5")));
let current_model = ModelId::new("gpt-4");

let actual = fixture.has_model_changed(&current_model);
let expected = true;

assert_eq!(actual, expected);
}

#[test]
fn test_has_model_changed_returns_false_when_model_same() {
let fixture = Context::default()
.add_message(TextMessage::new(Role::Assistant, "Hello").model(ModelId::new("gpt-4")));
let current_model = ModelId::new("gpt-4");

let actual = fixture.has_model_changed(&current_model);
let expected = false;

assert_eq!(actual, expected);
}

#[test]
fn test_has_model_changed_returns_true_when_previous_has_no_model() {
let fixture = Context::default().add_message(TextMessage::new(Role::Assistant, "Hello")); // No model set
let current_model = ModelId::new("gpt-4");

let actual = fixture.has_model_changed(&current_model);
let expected = true;

assert_eq!(actual, expected);
}

#[test]
fn test_has_model_changed_checks_last_assistant_message_with_model() {
let fixture = Context::default()
.add_message(TextMessage::new(Role::Assistant, "First").model(ModelId::new("gpt-3.5")))
.add_message(TextMessage::new(Role::User, "Question"))
.add_message(TextMessage::new(Role::Assistant, "Second").model(ModelId::new("gpt-4")));
let current_model = ModelId::new("gpt-4");

let actual = fixture.has_model_changed(&current_model);
let expected = false; // Last assistant message with model is "gpt-4", same as current

assert_eq!(actual, expected);
}

#[test]
fn test_has_model_changed_with_multiple_messages_model_changed() {
let fixture = Context::default()
.add_message(TextMessage::new(Role::Assistant, "First").model(ModelId::new("gpt-3.5")))
.add_message(TextMessage::new(Role::User, "Question"))
.add_message(
TextMessage::new(Role::Assistant, "Second").model(ModelId::new("claude-3")),
);
let current_model = ModelId::new("gpt-4");

let actual = fixture.has_model_changed(&current_model);
let expected = true; // Last assistant message with model is "claude-3", different from "gpt-4"

assert_eq!(actual, expected);
}

#[test]
fn test_has_model_changed_ignores_user_messages() {
// User messages have model tracking too, but we should only check assistant
// messages
let fixture = Context::default()
.add_message(TextMessage::new(Role::Assistant, "Response").model(ModelId::new("gpt-4")))
.add_message(TextMessage::new(Role::User, "Question").model(ModelId::new("claude-3")));
let current_model = ModelId::new("gpt-4");

let actual = fixture.has_model_changed(&current_model);
let expected = false; // Last ASSISTANT message is "gpt-4", user message should be ignored

assert_eq!(actual, expected);
}

#[test]
fn test_has_model_changed_continuing_same_model() {
// Scenario: model1 -> model2 -> model2 (the second model2 should not drop
// reasoning)
let fixture = Context::default()
.add_message(TextMessage::new(Role::Assistant, "First").model(ModelId::new("model1")))
.add_message(TextMessage::new(Role::User, "Question"))
.add_message(TextMessage::new(Role::Assistant, "Second").model(ModelId::new("model2")))
.add_message(TextMessage::new(Role::User, "Another question"));
let current_model = ModelId::new("model2");

let actual = fixture.has_model_changed(&current_model);
let expected = false; // Last assistant used "model2", same as current

assert_eq!(actual, expected);
}
}
Loading
Loading