Skip to content

Commit 41a7e2a

Browse files
committed
Add LM Studio backend support
1 parent 5c67dc3 commit 41a7e2a

18 files changed

Lines changed: 772 additions & 31 deletions

File tree

README.md

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,34 @@ Codex CLI supports [MCP servers](./docs/advanced.md#model-context-protocol-mcp).
6969

7070
Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md).
7171

72+
### Using Codex with LM Studio
73+
74+
Codex can run fully locally by delegating inference to [LM Studio](https://lmstudio.ai/).
75+
76+
1. Launch LM Studio and enable the **Local Inference Server** (Preferences → Developer).
77+
2. Start any LM Studio model from the **My Models** tab. Codex looks for the model identifier exposed by the LM Studio server.
78+
3. Run Codex with the LM Studio backend:
79+
80+
```shell
81+
# Interactive session using the default LLaMA 3.1 8B Instruct model
82+
codex --backend lmstudio
83+
84+
# Explicitly pick one of the supported architectures
85+
codex --backend lmstudio --model qwen3
86+
codex exec --backend lmstudio --model qwen3-moe "summarize this repo"
87+
```
88+
89+
Codex understands the following architecture aliases when `--backend lmstudio` is selected:
90+
91+
| Alias | LM Studio model identifier |
92+
| ---------- | --------------------------------------------------- |
93+
| `llama` | `meta-llama/Meta-Llama-3.1-8B-Instruct` |
94+
| `qwen2` | `Qwen/Qwen2-7B-Instruct` |
95+
| `qwen3` | `Qwen/Qwen3-7B-Instruct` |
96+
| `qwen3-moe`| `Qwen/Qwen3-MoE-A2.7B-Instruct` |
97+
98+
You can also pass the exact LM Studio identifier (for example `my-org/custom-model`) if you are running a different checkpoint. Codex verifies that the requested model is available from LM Studio and surfaces clear errors when it is not.
99+
72100
---
73101

74102
### Docs & FAQ

codex-rs/Cargo.lock

Lines changed: 16 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

codex-rs/Cargo.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ members = [
1414
"login",
1515
"mcp-client",
1616
"mcp-server",
17+
"lmstudio",
1718
"mcp-types",
1819
"ollama",
1920
"protocol",
@@ -50,6 +51,7 @@ codex-mcp-client = { path = "mcp-client" }
5051
codex-mcp-server = { path = "mcp-server" }
5152
codex-ollama = { path = "ollama" }
5253
codex-protocol = { path = "protocol" }
54+
codex-lmstudio = { path = "lmstudio" }
5355
codex-rmcp-client = { path = "rmcp-client" }
5456
codex-protocol-ts = { path = "protocol-ts" }
5557
codex-responses-api-proxy = { path = "responses-api-proxy" }
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
use clap::ValueEnum;
2+
3+
/// CLI flag values for selecting the Codex runtime backend.
4+
#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)]
5+
#[value(rename_all = "kebab-case")]
6+
pub enum BackendCliArg {
7+
/// Use the default OpenAI backend.
8+
Openai,
9+
/// Use the bundled open-source Ollama integration.
10+
Oss,
11+
/// Use a local LM Studio instance.
12+
Lmstudio,
13+
}
14+
15+
impl BackendCliArg {
16+
/// Returns the model provider key associated with this backend, if any.
17+
pub fn provider_key(self) -> Option<&'static str> {
18+
match self {
19+
BackendCliArg::Openai => None,
20+
BackendCliArg::Oss => Some(codex_core::BUILT_IN_OSS_MODEL_PROVIDER_ID),
21+
BackendCliArg::Lmstudio => Some(codex_core::BUILT_IN_LM_STUDIO_MODEL_PROVIDER_ID),
22+
}
23+
}
24+
25+
pub fn is_oss(self) -> bool {
26+
matches!(self, BackendCliArg::Oss)
27+
}
28+
29+
pub fn is_lmstudio(self) -> bool {
30+
matches!(self, BackendCliArg::Lmstudio)
31+
}
32+
33+
pub fn is_local(self) -> bool {
34+
matches!(self, BackendCliArg::Oss | BackendCliArg::Lmstudio)
35+
}
36+
}

codex-rs/common/src/lib.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,9 @@
1+
#[cfg(feature = "cli")]
2+
mod backend_cli_arg;
3+
4+
#[cfg(feature = "cli")]
5+
pub use backend_cli_arg::BackendCliArg;
6+
17
#[cfg(feature = "cli")]
28
mod approval_mode_cli_arg;
39

codex-rs/core/src/lib.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,12 @@ pub mod parse_command;
3939
mod truncate;
4040
mod unified_exec;
4141
mod user_instructions;
42+
pub use model_provider_info::BUILT_IN_LM_STUDIO_MODEL_PROVIDER_ID;
4243
pub use model_provider_info::BUILT_IN_OSS_MODEL_PROVIDER_ID;
4344
pub use model_provider_info::ModelProviderInfo;
4445
pub use model_provider_info::WireApi;
4546
pub use model_provider_info::built_in_model_providers;
47+
pub use model_provider_info::create_lmstudio_provider_with_base_url;
4648
pub use model_provider_info::create_oss_provider_with_base_url;
4749
mod conversation_manager;
4850
mod event_mapping;

codex-rs/core/src/model_provider_info.rs

Lines changed: 85 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -248,17 +248,20 @@ impl ModelProviderInfo {
248248
}
249249

250250
const DEFAULT_OLLAMA_PORT: u32 = 11434;
251+
const DEFAULT_LM_STUDIO_PORT: u32 = 1234;
251252

252253
pub const BUILT_IN_OSS_MODEL_PROVIDER_ID: &str = "oss";
254+
pub const BUILT_IN_LM_STUDIO_MODEL_PROVIDER_ID: &str = "lmstudio";
253255

254256
/// Built-in default provider list.
255257
pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
256258
use ModelProviderInfo as P;
257259

258260
// We do not want to be in the business of adjucating which third-party
259261
// providers are bundled with Codex CLI, so we only include the OpenAI and
260-
// open source ("oss") providers by default. Users are encouraged to add to
261-
// `model_providers` in config.toml to add their own providers.
262+
// local open source providers (Ollama "oss" and LM Studio) by default. Users
263+
// are encouraged to add to `model_providers` in config.toml to add their own
264+
// providers.
262265
[
263266
(
264267
"openai",
@@ -300,6 +303,10 @@ pub fn built_in_model_providers() -> HashMap<String, ModelProviderInfo> {
300303
},
301304
),
302305
(BUILT_IN_OSS_MODEL_PROVIDER_ID, create_oss_provider()),
306+
(
307+
BUILT_IN_LM_STUDIO_MODEL_PROVIDER_ID,
308+
create_lmstudio_provider(),
309+
),
303310
]
304311
.into_iter()
305312
.map(|(k, v)| (k.to_string(), v))
@@ -344,6 +351,45 @@ pub fn create_oss_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
344351
}
345352
}
346353

354+
pub fn create_lmstudio_provider() -> ModelProviderInfo {
355+
let base_url = match std::env::var("CODEX_LM_STUDIO_BASE_URL")
356+
.ok()
357+
.filter(|v| !v.trim().is_empty())
358+
{
359+
Some(url) => url,
360+
None => format!(
361+
"http://localhost:{port}/v1",
362+
port = std::env::var("CODEX_LM_STUDIO_PORT")
363+
.ok()
364+
.filter(|v| !v.trim().is_empty())
365+
.and_then(|v| v.parse::<u32>().ok())
366+
.unwrap_or(DEFAULT_LM_STUDIO_PORT)
367+
),
368+
};
369+
370+
create_lmstudio_provider_with_base_url(&base_url)
371+
}
372+
373+
pub fn create_lmstudio_provider_with_base_url(base_url: &str) -> ModelProviderInfo {
374+
ModelProviderInfo {
375+
name: "LM Studio".into(),
376+
base_url: Some(base_url.into()),
377+
env_key: None,
378+
env_key_instructions: Some(
379+
"Launch LM Studio and enable the local inference server (Preferences → Developer → Enable local server)."
380+
.into(),
381+
),
382+
wire_api: WireApi::Chat,
383+
query_params: None,
384+
http_headers: None,
385+
env_http_headers: None,
386+
request_max_retries: None,
387+
stream_max_retries: None,
388+
stream_idle_timeout_ms: None,
389+
requires_openai_auth: false,
390+
}
391+
}
392+
347393
fn matches_azure_responses_base_url(base_url: &str) -> bool {
348394
let base = base_url.to_ascii_lowercase();
349395
const AZURE_MARKERS: [&str; 5] = [
@@ -386,6 +432,43 @@ base_url = "http://localhost:11434/v1"
386432
assert_eq!(expected_provider, provider);
387433
}
388434

435+
#[test]
436+
fn test_deserialize_lmstudio_model_provider_toml() {
437+
let provider_toml = r#"
438+
name = "LM Studio"
439+
base_url = "http://localhost:1234/v1"
440+
"#;
441+
let expected_provider = ModelProviderInfo {
442+
name: "LM Studio".into(),
443+
base_url: Some("http://localhost:1234/v1".into()),
444+
env_key: None,
445+
env_key_instructions: None,
446+
wire_api: WireApi::Chat,
447+
query_params: None,
448+
http_headers: None,
449+
env_http_headers: None,
450+
request_max_retries: None,
451+
stream_max_retries: None,
452+
stream_idle_timeout_ms: None,
453+
requires_openai_auth: false,
454+
};
455+
456+
let provider: ModelProviderInfo = toml::from_str(provider_toml).unwrap();
457+
assert_eq!(expected_provider, provider);
458+
}
459+
460+
#[test]
461+
fn test_create_lmstudio_provider_with_base_url() {
462+
let provider = create_lmstudio_provider_with_base_url("http://localhost:9999/v1");
463+
assert_eq!(provider.name, "LM Studio");
464+
assert_eq!(
465+
provider.base_url.as_deref(),
466+
Some("http://localhost:9999/v1")
467+
);
468+
assert_eq!(provider.wire_api, WireApi::Chat);
469+
assert!(!provider.requires_openai_auth);
470+
}
471+
389472
#[test]
390473
fn test_deserialize_azure_model_provider_toml() {
391474
let azure_provider_toml = r#"

codex-rs/exec/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ codex-common = { workspace = true, features = [
2525
"sandbox_summary",
2626
] }
2727
codex-core = { workspace = true }
28+
codex-lmstudio = { workspace = true }
2829
codex-ollama = { workspace = true }
2930
codex-protocol = { workspace = true }
3031
owo-colors = { workspace = true }

codex-rs/exec/src/cli.rs

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
use clap::Parser;
22
use clap::ValueEnum;
3+
use codex_common::BackendCliArg;
34
use codex_common::CliConfigOverrides;
45
use std::path::PathBuf;
56

@@ -18,7 +19,11 @@ pub struct Cli {
1819
#[arg(long, short = 'm')]
1920
pub model: Option<String>,
2021

21-
#[arg(long = "oss", default_value_t = false)]
22+
/// Select the runtime backend Codex should connect to.
23+
#[arg(long = "backend", value_enum, conflicts_with = "oss")]
24+
pub backend: Option<BackendCliArg>,
25+
26+
#[arg(long = "oss", default_value_t = false, conflicts_with = "backend")]
2227
pub oss: bool,
2328

2429
/// Select the sandbox policy to use when executing model-generated shell

0 commit comments

Comments
 (0)