Skip to content

Commit 4767865

Browse files
authored
Merge pull request #110 from hackall360/codex/fix-lmstudio-file-handling-issues
Ensure LM Studio requests expose apply_patch tool
2 parents 6739e11 + 1e733ad commit 4767865

5 files changed

Lines changed: 188 additions & 6 deletions

File tree

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ Codex understands the following architecture aliases when `--backend lmstudio` i
9696
| `qwen3-moe` | `qwen/qwen3-coder-30b` |
9797
| `qwen3-moe-a3b`| `qwen/qwen3-30b-a3b-2507` |
9898

99-
You can also pass the exact LM Studio identifier (for example `my-org/custom-model`) if you are running a different checkpoint. Codex verifies that the requested model is available from LM Studio and surfaces clear errors when it is not.
99+
Aliases are case-insensitive and you can mix spaces, hyphens, or underscores (for example, `qwen3 coder 30b a3b`). You can also pass the exact LM Studio identifier (for example `my-org/custom-model`) if you are running a different checkpoint. Codex verifies that the requested model is available from LM Studio and surfaces clear errors when it is not.
100100

101101
When you select the LM Studio backend Codex automatically enables structured JSON output so the agent can reliably capture command results. No extra flags are required.
102102

codex-rs/core/src/model_family.rs

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,15 @@ pub fn find_family_for_model(slug: &str) -> Option<ModelFamily> {
112112
supports_reasoning_summaries: true,
113113
needs_special_apply_patch_instructions: true,
114114
)
115+
} else if slug.starts_with("mistralai/devstral")
116+
|| slug.starts_with("qwen/qwen2")
117+
|| slug.starts_with("qwen/qwen3")
118+
{
119+
model_family!(
120+
slug,
121+
slug,
122+
apply_patch_tool_type: Some(ApplyPatchToolType::Function),
123+
)
115124
} else {
116125
None
117126
}
@@ -129,3 +138,27 @@ pub fn derive_default_model_family(model: &str) -> ModelFamily {
129138
base_instructions: BASE_INSTRUCTIONS.to_string(),
130139
}
131140
}
141+
142+
#[cfg(test)]
143+
mod tests {
144+
use super::*;
145+
use crate::tool_apply_patch::ApplyPatchToolType;
146+
147+
#[test]
148+
fn lmstudio_models_use_function_apply_patch_tool() {
149+
for slug in [
150+
"mistralai/devstral-small-2507",
151+
"qwen/qwen2.5-coder-14b",
152+
"qwen/qwen3-coder-30b",
153+
"qwen/qwen3-30b-a3b-2507",
154+
] {
155+
let family = find_family_for_model(slug)
156+
.unwrap_or_else(|| panic!("expected lmstudio slug {slug:?} to map"));
157+
assert_eq!(
158+
family.apply_patch_tool_type,
159+
Some(ApplyPatchToolType::Function),
160+
"LM Studio slug {slug} should expose the function-style apply_patch tool"
161+
);
162+
}
163+
}
164+
}

codex-rs/core/src/openai_tools.rs

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1059,6 +1059,55 @@ mod tests {
10591059
);
10601060
}
10611061

1062+
#[test]
1063+
fn lmstudio_models_include_apply_patch_tool() {
1064+
let model_family = find_family_for_model("qwen/qwen3-coder-30b")
1065+
.expect("LM Studio slugs should map to a model family");
1066+
let config = ToolsConfig::new(&ToolsConfigParams {
1067+
model_family: &model_family,
1068+
include_plan_tool: false,
1069+
include_apply_patch_tool: false,
1070+
include_web_search_request: false,
1071+
use_streamable_shell_tool: false,
1072+
include_view_image_tool: false,
1073+
experimental_unified_exec_tool: false,
1074+
});
1075+
1076+
let tools = get_openai_tools(&config, None);
1077+
assert!(tools.iter().any(|tool| match tool {
1078+
OpenAiTool::Function(ResponsesApiTool { name, .. }) => name == "apply_patch",
1079+
_ => false,
1080+
}));
1081+
}
1082+
1083+
#[test]
1084+
fn chat_completions_tools_include_apply_patch_for_lmstudio() {
1085+
let model_family = find_family_for_model("qwen/qwen3-coder-30b")
1086+
.expect("LM Studio slugs should map to a model family");
1087+
let config = ToolsConfig::new(&ToolsConfigParams {
1088+
model_family: &model_family,
1089+
include_plan_tool: false,
1090+
include_apply_patch_tool: false,
1091+
include_web_search_request: false,
1092+
use_streamable_shell_tool: false,
1093+
include_view_image_tool: false,
1094+
experimental_unified_exec_tool: false,
1095+
});
1096+
1097+
let tools = get_openai_tools(&config, None);
1098+
let chat_tools = create_tools_json_for_chat_completions_api(&tools)
1099+
.expect("conversion to chat tools should succeed");
1100+
1101+
assert!(chat_tools.iter().any(|tool| {
1102+
tool.get("type").and_then(JsonValue::as_str) == Some("function")
1103+
&& tool
1104+
.get("function")
1105+
.and_then(|fn_value| fn_value.get("name"))
1106+
.and_then(JsonValue::as_str)
1107+
== Some("apply_patch")
1108+
}));
1109+
}
1110+
10621111
#[test]
10631112
fn test_shell_tool() {
10641113
let tool = super::create_shell_tool();

codex-rs/exec/tests/suite/lmstudio.rs

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ async fn exec_resolves_lmstudio_model_aliases() -> anyhow::Result<()> {
2323
("qwen3-moe", "qwen/qwen3-coder-30b"),
2424
("qwen3moe", "qwen/qwen3-coder-30b"),
2525
("qwen3-moe-a3b", "qwen/qwen3-30b-a3b-2507"),
26+
("qwen3 coder 30b a3b", "qwen/qwen3-30b-a3b-2507"),
27+
("Qwen3 Coder 30B", "qwen/qwen3-coder-30b"),
2628
];
2729

2830
for (alias, expected_model) in cases {
@@ -109,3 +111,80 @@ async fn exec_resolves_lmstudio_model_aliases() -> anyhow::Result<()> {
109111

110112
Ok(())
111113
}
114+
115+
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
116+
async fn exec_enables_apply_patch_tool_for_lmstudio() -> anyhow::Result<()> {
117+
let test = test_codex_exec();
118+
let server = responses::start_mock_server().await;
119+
120+
let models_payload = serde_json::json!({
121+
"data": [
122+
{ "id": DEFAULT_LM_STUDIO_MODEL }
123+
]
124+
});
125+
126+
Mock::given(method("GET"))
127+
.and(path("/v1/models"))
128+
.respond_with(ResponseTemplate::new(200).set_body_json(models_payload))
129+
.expect(1)
130+
.mount(&server)
131+
.await;
132+
133+
let chat_stream = concat!(
134+
"data: {\"choices\":[{\"delta\":{\"content\":\"ok\"}}]}\n\n",
135+
"data: {\"choices\":[{\"delta\":{}}]}\n\n",
136+
"data: [DONE]\n\n",
137+
);
138+
139+
Mock::given(method("POST"))
140+
.and(path("/v1/chat/completions"))
141+
.respond_with(
142+
ResponseTemplate::new(200)
143+
.insert_header("content-type", "text/event-stream")
144+
.set_body_raw(chat_stream, "text/event-stream"),
145+
)
146+
.expect(1)
147+
.mount(&server)
148+
.await;
149+
150+
test.cmd()
151+
.env("CODEX_LM_STUDIO_BASE_URL", format!("{}/v1", server.uri()))
152+
.arg("--skip-git-repo-check")
153+
.arg("--backend")
154+
.arg("lmstudio")
155+
.arg(DEFAULT_LM_STUDIO_MODEL)
156+
.assert()
157+
.success();
158+
159+
let requests = server
160+
.received_requests()
161+
.await
162+
.expect("failed to capture requests");
163+
164+
let chat_request = requests
165+
.iter()
166+
.find(|req| req.method == Method::POST && req.url.path() == "/v1/chat/completions")
167+
.context("LM Studio chat completion request missing")?;
168+
169+
let payload: Value = serde_json::from_slice(&chat_request.body)
170+
.context("LM Studio chat completion request should be valid JSON")?;
171+
let tools = payload
172+
.get("tools")
173+
.and_then(Value::as_array)
174+
.context("LM Studio request missing tools array")?;
175+
176+
assert!(
177+
tools.iter().any(|tool| {
178+
tool.get("type").and_then(Value::as_str) == Some("function")
179+
&& tool
180+
.get("function")
181+
.and_then(|fn_value| fn_value.get("name"))
182+
.and_then(Value::as_str)
183+
== Some("apply_patch")
184+
}),
185+
"LM Studio chat request should include the apply_patch tool: {tools:?}"
186+
);
187+
188+
server.verify().await;
189+
Ok(())
190+
}

codex-rs/lmstudio/src/lib.rs

Lines changed: 26 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,14 @@ const MODEL_ALIAS_TABLE: &[(&str, &str)] = &[
3737
("qwen3_moe", "qwen/qwen3-coder-30b"),
3838
("qwen-3-moe", "qwen/qwen3-coder-30b"),
3939
("qwen3-coder", "qwen/qwen3-coder-30b"),
40+
("qwen3 coder", "qwen/qwen3-coder-30b"),
4041
("qwen3-30b", "qwen/qwen3-coder-30b"),
42+
("qwen3 coder 30b", "qwen/qwen3-coder-30b"),
4143
("qwen3-moe-a3b", "qwen/qwen3-30b-a3b-2507"),
4244
("qwen3-moe-a3b-2507", "qwen/qwen3-30b-a3b-2507"),
4345
("qwen3-30b-a3b", "qwen/qwen3-30b-a3b-2507"),
46+
("qwen3 coder a3b", "qwen/qwen3-30b-a3b-2507"),
47+
("qwen3 coder 30b a3b", "qwen/qwen3-30b-a3b-2507"),
4448
];
4549

4650
const MODEL_ALIAS_HINTS: &[(&str, &str)] = &[
@@ -58,6 +62,15 @@ fn alias_examples() -> String {
5862
.join(", ")
5963
}
6064

65+
fn normalized_alias_forms(value: &str) -> (String, String) {
66+
let lowercase = value.to_ascii_lowercase();
67+
let compact = lowercase
68+
.chars()
69+
.filter(|c| !matches!(c, '-' | '_' | ' '))
70+
.collect();
71+
(lowercase, compact)
72+
}
73+
6174
/// Error returned when a provided LM Studio model alias cannot be resolved.
6275
#[derive(Debug, Clone)]
6376
pub struct UnsupportedModelAliasError {
@@ -114,11 +127,11 @@ pub fn resolve_model_identifier(model: Option<&str>) -> Result<String, Unsupport
114127
if trimmed.is_empty() {
115128
return Err(UnsupportedModelAliasError::new(trimmed));
116129
}
117-
let normalized = trimmed.to_ascii_lowercase();
118-
if let Some((_, canonical)) = MODEL_ALIAS_TABLE
119-
.iter()
120-
.find(|(alias, _)| *alias == normalized)
121-
{
130+
let (normalized, normalized_compact) = normalized_alias_forms(trimmed);
131+
if let Some((_, canonical)) = MODEL_ALIAS_TABLE.iter().find(|(alias, _)| {
132+
let (alias_normalized, alias_compact) = normalized_alias_forms(alias);
133+
alias_normalized == normalized || alias_compact == normalized_compact
134+
}) {
122135
return Ok((*canonical).to_string());
123136
}
124137
if trimmed.contains('/') || trimmed.contains(':') {
@@ -272,6 +285,14 @@ mod tests {
272285
resolve_model_identifier(Some("qwen3-moe-a3b")).unwrap(),
273286
"qwen/qwen3-30b-a3b-2507"
274287
);
288+
assert_eq!(
289+
resolve_model_identifier(Some("qwen3 coder 30b a3b")).unwrap(),
290+
"qwen/qwen3-30b-a3b-2507"
291+
);
292+
assert_eq!(
293+
resolve_model_identifier(Some("Qwen3 Coder 30B")).unwrap(),
294+
"qwen/qwen3-coder-30b"
295+
);
275296
}
276297

277298
#[test]

0 commit comments

Comments
 (0)