From 8c4fa5ec80f63fb37ef8445cd3b6e6624f794f92 Mon Sep 17 00:00:00 2001 From: Chris Busillo Date: Sun, 8 Mar 2026 04:27:39 -0400 Subject: [PATCH 1/2] fix(agents): refresh provider model presets Anthropic's stale Sonnet slug kicked off this refresh. Update the built-in Anthropic, Gemini, and Qwen presets to current provider model IDs, keep legacy slugs resolving through aliases and validation upgrades, and align docs/examples/tests with the new defaults. --- code-rs/core/prompt_coder.md | 2 +- code-rs/core/src/agent_defaults.rs | 44 ++++++++++++++----------- code-rs/core/src/agent_tool.rs | 6 ++-- code-rs/core/src/config.rs | 18 +++++----- code-rs/core/src/config/validation.rs | 27 +++++++++++---- code-rs/core/src/slash_commands.rs | 8 ++--- code-rs/core/tests/gemini_model_args.rs | 19 ++++++----- code-rs/tui/src/chatwidget.rs | 8 ++--- config.toml.example | 4 +-- docs/agents.md | 4 +-- docs/config.md | 4 +-- docs/slash-commands.md | 8 ++--- 12 files changed, 88 insertions(+), 64 deletions(-) diff --git a/code-rs/core/prompt_coder.md b/code-rs/core/prompt_coder.md index 34c1f3dc568..9ebd63de4a5 100644 --- a/code-rs/core/prompt_coder.md +++ b/code-rs/core/prompt_coder.md @@ -60,7 +60,7 @@ agent { "task": "Implement JWT middleware (RS256) with key rotation and unit/integration tests. Preserve existing OAuth flows. Provide README usage snippet.", "context": "Service: services/api (Rust Axum). Secrets via env. CI: `cargo test --all`.", "files": ["services/api", "services/api/src", "services/api/Cargo.toml"], - "models": ["code-gpt-5.4","claude-sonnet-4.5","gemini-3-flash"], + "models": ["code-gpt-5.4","claude-sonnet-4.6","gemini-3-flash-preview"], "output": "Middleware + passing tests + README snippet", "write": true // Allow changes - will launch every agent in a separate worktree } diff --git a/code-rs/core/src/agent_defaults.rs b/code-rs/core/src/agent_defaults.rs index 5dd9f0411a6..51082a56843 100644 --- a/code-rs/core/src/agent_defaults.rs +++ b/code-rs/core/src/agent_defaults.rs @@ -39,14 +39,14 @@ pub const DEFAULT_AGENT_NAMES: &[&str] = &[ "code-gpt-5.3-codex", "code-gpt-5.3-codex-spark", "claude-opus-4.6", - "gemini-3-pro", + "gemini-3.1-pro-preview", // Straightforward / cost-aware "code-gpt-5.1-codex-mini", - "claude-sonnet-4.5", - "gemini-3-flash", + "claude-sonnet-4.6", + "gemini-3-flash-preview", // Mixed/general and alternates "claude-haiku-4.5", - "qwen-3-coder", + "qwen3-coder-plus", "cloud-gpt-5.1-codex-max", ]; @@ -179,7 +179,7 @@ const AGENT_MODEL_SPECS: &[AgentModelSpec] = &[ cli: "claude", read_only_args: CLAUDE_OPUS_READ_ONLY, write_args: CLAUDE_OPUS_WRITE, - model_args: &["--model", "opus"], + model_args: &["--model", "claude-opus-4-6"], description: "Higher-capacity Claude model for complex reasoning; use when you want the strongest Claude.", enabled_by_default: true, aliases: &["claude-opus", "claude-opus-4.1", "claude-opus-4.5"], @@ -188,15 +188,15 @@ const AGENT_MODEL_SPECS: &[AgentModelSpec] = &[ pro_only: false, }, AgentModelSpec { - slug: "claude-sonnet-4.5", + slug: "claude-sonnet-4.6", family: "claude", cli: "claude", read_only_args: CLAUDE_SONNET_READ_ONLY, write_args: CLAUDE_SONNET_WRITE, - model_args: &["--model", "sonnet"], + model_args: &["--model", "claude-sonnet-4-6"], description: "Balanced Claude model for implementation and debugging; a solid default when you want Claude.", enabled_by_default: true, - aliases: &["claude", "claude-sonnet"], + aliases: &["claude", "claude-sonnet", "claude-sonnet-4.5"], gating_env: None, is_frontline: false, pro_only: false, @@ -207,7 +207,7 @@ const AGENT_MODEL_SPECS: &[AgentModelSpec] = &[ cli: "claude", read_only_args: CLAUDE_HAIKU_READ_ONLY, write_args: CLAUDE_HAIKU_WRITE, - model_args: &["--model", "haiku"], + model_args: &["--model", "claude-haiku-4-5"], description: "Fast Claude model for simple tasks, drafts, and quick iterations; pick when latency matters.", enabled_by_default: true, aliases: &["claude-haiku"], @@ -216,15 +216,16 @@ const AGENT_MODEL_SPECS: &[AgentModelSpec] = &[ pro_only: false, }, AgentModelSpec { - slug: "gemini-3-pro", + slug: "gemini-3.1-pro-preview", family: "gemini", cli: "gemini", read_only_args: GEMINI_PRO_READ_ONLY, write_args: GEMINI_PRO_WRITE, - model_args: &["--model", "pro"], - description: "Higher-capacity Gemini model for harder tasks; use when gemini-3-flash misses details.", + model_args: &["--model", "gemini-3.1-pro-preview"], + description: "Higher-capacity Gemini preview for harder tasks; use when gemini-3-flash-preview misses details.", enabled_by_default: true, aliases: &[ + "gemini-3-pro", "gemini-3-pro-preview", "gemini-3", "gemini3", @@ -236,29 +237,34 @@ const AGENT_MODEL_SPECS: &[AgentModelSpec] = &[ pro_only: false, }, AgentModelSpec { - slug: "gemini-3-flash", + slug: "gemini-3-flash-preview", family: "gemini", cli: "gemini", read_only_args: GEMINI_FLASH_READ_ONLY, write_args: GEMINI_FLASH_WRITE, - model_args: &["--model", "flash"], - description: "Primary Gemini default for most tasks; fast and low-cost with near gemini-3-pro quality.", + model_args: &["--model", "gemini-3-flash-preview"], + description: "Primary Gemini preview default for most tasks; fast and low-cost with near gemini-3.1-pro-preview quality.", enabled_by_default: true, - aliases: &["gemini", "gemini-flash", "gemini-2.5-flash"], + aliases: &[ + "gemini", + "gemini-flash", + "gemini-3-flash", + "gemini-2.5-flash", + ], gating_env: None, is_frontline: false, pro_only: false, }, AgentModelSpec { - slug: "qwen-3-coder", + slug: "qwen3-coder-plus", family: "qwen", cli: "qwen", read_only_args: QWEN_3_CODER_READ_ONLY, write_args: QWEN_3_CODER_WRITE, - model_args: &["-m", "qwen-3-coder"], + model_args: &["-m", "qwen3-coder-plus"], description: "Fast and capable alternative; useful as a second opinion or for cross-checking.", enabled_by_default: true, - aliases: &["qwen", "qwen3"], + aliases: &["qwen", "qwen3", "qwen-3-coder"], gating_env: None, is_frontline: false, pro_only: false, diff --git a/code-rs/core/src/agent_tool.rs b/code-rs/core/src/agent_tool.rs index 7c3f02365da..7e057af569d 100644 --- a/code-rs/core/src/agent_tool.rs +++ b/code-rs/core/src/agent_tool.rs @@ -2417,7 +2417,7 @@ pub fn create_agent_tool(allowed_models: &[String]) -> OpenAiTool { }, }), description: Some( - "Optional array of model names (e.g., ['code-gpt-5.4','claude-sonnet-4.5','code-gpt-5.3-codex-spark','gemini-3-flash'])".to_string(), + "Optional array of model names (e.g., ['code-gpt-5.4','claude-sonnet-4.6','code-gpt-5.3-codex-spark','gemini-3-flash-preview'])".to_string(), ), }, ); @@ -2975,7 +2975,7 @@ mod tests { } let cfg = AgentConfig { - name: "claude-sonnet-4.5".to_string(), + name: "claude-sonnet-4.6".to_string(), command: "claude".to_string(), args: Vec::new(), read_only: true, @@ -2989,7 +2989,7 @@ mod tests { let output = execute_model_with_permissions( "agent-test", - "claude-sonnet-4.5", + "claude-sonnet-4.6", "ok", true, None, diff --git a/code-rs/core/src/config.rs b/code-rs/core/src/config.rs index cade56e5339..626b84d5e09 100644 --- a/code-rs/core/src/config.rs +++ b/code-rs/core/src/config.rs @@ -3083,9 +3083,9 @@ model_verbosity = "high" assert!(enabled_names.contains("code-gpt-5.3-codex")); assert!(enabled_names.contains("code-gpt-5.4")); - assert!(enabled_names.contains("claude-sonnet-4.5")); - assert!(enabled_names.contains("gemini-3-pro")); - assert!(enabled_names.contains("qwen-3-coder")); + assert!(enabled_names.contains("claude-sonnet-4.6")); + assert!(enabled_names.contains("gemini-3.1-pro-preview")); + assert!(enabled_names.contains("qwen3-coder-plus")); Ok(()) } @@ -3296,20 +3296,20 @@ mod agent_merge_tests { fn gemini_alias_and_canonical_dedupe_prefers_last_state() { let agents = vec![ agent("gemini-2.5-pro", "gemini", true), - agent("gemini-3-pro", "gemini", false), + agent("gemini-3.1-pro-preview", "gemini", false), ]; let merged = merge_with_default_agents(agents); let gemini = merged .iter() - .find(|a| a.name.eq_ignore_ascii_case("gemini-3-pro")) + .find(|a| a.name.eq_ignore_ascii_case("gemini-3.1-pro-preview")) .expect("gemini present"); assert!(!gemini.enabled, "later canonical disable should win"); assert_eq!( merged .iter() - .filter(|a| a.name.eq_ignore_ascii_case("gemini-3-pro")) + .filter(|a| a.name.eq_ignore_ascii_case("gemini-3.1-pro-preview")) .count(), 1, "should dedupe gemini alias/canonical" @@ -3319,21 +3319,21 @@ mod agent_merge_tests { #[test] fn gemini_alias_disable_overrides_prior_canonical_enable() { let agents = vec![ - agent("gemini-3-pro", "gemini", true), + agent("gemini-3.1-pro-preview", "gemini", true), agent("gemini-2.5-pro", "gemini", false), ]; let merged = merge_with_default_agents(agents); let gemini = merged .iter() - .find(|a| a.name.eq_ignore_ascii_case("gemini-3-pro")) + .find(|a| a.name.eq_ignore_ascii_case("gemini-3.1-pro-preview")) .expect("gemini present"); assert!(!gemini.enabled, "later alias disable should win"); assert_eq!( merged .iter() - .filter(|a| a.name.eq_ignore_ascii_case("gemini-3-pro")) + .filter(|a| a.name.eq_ignore_ascii_case("gemini-3.1-pro-preview")) .count(), 1, "should dedupe gemini alias/canonical" diff --git a/code-rs/core/src/config/validation.rs b/code-rs/core/src/config/validation.rs index b37eb4bfae1..bf311fe5b73 100644 --- a/code-rs/core/src/config/validation.rs +++ b/code-rs/core/src/config/validation.rs @@ -199,14 +199,29 @@ fn upgrade_legacy_model_slug(slug: &str) -> Option { return Some("claude-opus-4.6".to_string()); } - // Upgrade Gemini 2.5 Pro to Gemini 3 Pro (or preview alias) - if slug.eq_ignore_ascii_case("gemini-2.5-pro") || slug.eq_ignore_ascii_case("gemini-3-pro-preview") { - return Some("gemini-3-pro".to_string()); + // Upgrade Anthropic Sonnet 4.5 to 4.6. + if slug.eq_ignore_ascii_case("claude-sonnet-4.5") { + return Some("claude-sonnet-4.6".to_string()); } - // Upgrade Gemini 2.5 Flash to Gemini 3 Flash - if slug.eq_ignore_ascii_case("gemini-2.5-flash") { - return Some("gemini-3-flash".to_string()); + // Upgrade Gemini Pro slugs to the latest preview track. + if slug.eq_ignore_ascii_case("gemini-2.5-pro") + || slug.eq_ignore_ascii_case("gemini-3-pro") + || slug.eq_ignore_ascii_case("gemini-3-pro-preview") + { + return Some("gemini-3.1-pro-preview".to_string()); + } + + // Upgrade Gemini Flash slugs to the latest preview track. + if slug.eq_ignore_ascii_case("gemini-2.5-flash") + || slug.eq_ignore_ascii_case("gemini-3-flash") + { + return Some("gemini-3-flash-preview".to_string()); + } + + // Upgrade the older Qwen coder slug to the current plus line. + if slug.eq_ignore_ascii_case("qwen-3-coder") { + return Some("qwen3-coder-plus".to_string()); } // Keep codex variants on their existing line; upgrades are surfaced via the diff --git a/code-rs/core/src/slash_commands.rs b/code-rs/core/src/slash_commands.rs index be4d4fc104b..e95f6b11b64 100644 --- a/code-rs/core/src/slash_commands.rs +++ b/code-rs/core/src/slash_commands.rs @@ -297,9 +297,9 @@ mod tests { let defaults = get_default_models(); assert!(defaults.iter().any(|v| v == "code-gpt-5.4")); assert!(defaults.iter().any(|v| v == "code-gpt-5.3-codex")); - assert!(!defaults.iter().any(|v| v == "qwen-3-coder")); - assert!(!defaults.iter().any(|v| v == "gemini-3-flash")); - assert!(!defaults.iter().any(|v| v == "claude-sonnet-4.5")); + assert!(!defaults.iter().any(|v| v == "qwen3-coder-plus")); + assert!(!defaults.iter().any(|v| v == "gemini-3-flash-preview")); + assert!(!defaults.iter().any(|v| v == "claude-sonnet-4.6")); restore_var("PATH", orig_path); restore_var("HOME", orig_home); @@ -334,7 +334,7 @@ mod tests { } let defaults = get_default_models(); - assert!(defaults.iter().any(|v| v == "claude-sonnet-4.5")); + assert!(defaults.iter().any(|v| v == "claude-sonnet-4.6")); restore_var("PATH", orig_path); restore_var("HOME", orig_home); diff --git a/code-rs/core/tests/gemini_model_args.rs b/code-rs/core/tests/gemini_model_args.rs index 7d204a7c9eb..a25d129a9b7 100644 --- a/code-rs/core/tests/gemini_model_args.rs +++ b/code-rs/core/tests/gemini_model_args.rs @@ -2,19 +2,22 @@ use code_core::agent_defaults::agent_model_spec; #[test] fn gemini_specs_use_long_model_flag() { - let pro = agent_model_spec("gemini-3-pro").expect("spec present"); - assert_eq!(pro.model_args, ["--model", "pro"]); + let pro = agent_model_spec("gemini-3.1-pro-preview").expect("spec present"); + assert_eq!(pro.model_args, ["--model", "gemini-3.1-pro-preview"]); // The shorthand `gemini` is treated as the primary Gemini default. let primary = agent_model_spec("gemini").expect("alias present"); - assert_eq!(primary.slug, "gemini-3-flash"); - assert_eq!(primary.model_args, ["--model", "flash"]); + assert_eq!(primary.slug, "gemini-3-flash-preview"); + assert_eq!(primary.model_args, ["--model", "gemini-3-flash-preview"]); - // Legacy shorthand and older slugs should resolve to the newest Gemini 3 Pro. + // Legacy shorthand and older slugs should resolve to the newest Gemini Pro preview. let legacy_pro = agent_model_spec("gemini-2.5-pro").expect("spec present via alias"); - assert_eq!(legacy_pro.slug, "gemini-3-pro"); + assert_eq!(legacy_pro.slug, "gemini-3.1-pro-preview"); + + let prior_preview = agent_model_spec("gemini-3-pro").expect("prior preview alias present"); + assert_eq!(prior_preview.slug, "gemini-3.1-pro-preview"); let legacy_flash = agent_model_spec("gemini-2.5-flash").expect("spec present via alias"); - assert_eq!(legacy_flash.slug, "gemini-3-flash"); - assert_eq!(legacy_flash.model_args, ["--model", "flash"]); + assert_eq!(legacy_flash.slug, "gemini-3-flash-preview"); + assert_eq!(legacy_flash.model_args, ["--model", "gemini-3-flash-preview"]); } diff --git a/code-rs/tui/src/chatwidget.rs b/code-rs/tui/src/chatwidget.rs index 61f31649761..4be9707a8a6 100644 --- a/code-rs/tui/src/chatwidget.rs +++ b/code-rs/tui/src/chatwidget.rs @@ -31618,7 +31618,7 @@ use code_core::protocol::OrderMeta; let (rows, _commands) = chat.collect_agents_overview_rows(); let qwen = rows .iter() - .find(|row| row.name == "qwen-3-coder") + .find(|row| row.name == "qwen3-coder-plus") .expect("qwen row present"); assert_eq!(qwen.enabled, qwen.installed); @@ -33814,8 +33814,8 @@ use code_core::protocol::OrderMeta; write: false, write_requested: Some(false), models: Some(vec![ - "claude-sonnet-4.5".to_string(), - "gemini-3-pro".to_string(), + "claude-sonnet-4.6".to_string(), + "gemini-3.1-pro-preview".to_string(), ]), }]; chat.auto_state.pending_agent_timing = Some(AutoTurnAgentsTiming::Blocking); @@ -33829,7 +33829,7 @@ use code_core::protocol::OrderMeta; assert!(message.contains("Run diagnostics")); assert!(message.contains("Please run agent.create")); assert!(message.contains("write: false")); - assert!(message.contains("Models: [claude-sonnet-4.5, gemini-3-pro]")); + assert!(message.contains("Models: [claude-sonnet-4.6, gemini-3.1-pro-preview]")); assert!(message.contains("Draft alternative fix")); assert!(message.contains("Focus on parser module")); assert!(message.contains("agent.wait")); diff --git a/config.toml.example b/config.toml.example index 7983e1a7945..5ff7281144c 100644 --- a/config.toml.example +++ b/config.toml.example @@ -43,7 +43,7 @@ read-only = false description = "Qwen Coder assistant (tracks latest default model)" # No default -m: let the CLI choose its current default model # To pin a model explicitly, export QWEN_MODEL or add here, e.g.: -# args = ["-m", "qwen-3-coder", "-y"] +# args = ["-m", "qwen3-coder-plus", "-y"] args = ["-y"] # Optional environment variables (either name works; mirrored automatically): # env = { QWEN_API_KEY = "your-key", DASHSCOPE_API_KEY = "your-key" } @@ -55,7 +55,7 @@ enabled = true read-only = true description = "Qwen Coder assistant in read-only mode" # No -y in read-only; omit -m to track latest default. To pin: -# args = ["-m", "qwen-3-coder"] +# args = ["-m", "qwen3-coder-plus"] args = [] [[agents]] diff --git a/docs/agents.md b/docs/agents.md index 7f813a1d799..b73c08617b9 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -19,9 +19,9 @@ instructions = "Preamble added to this agent’s prompt" Field recap: `name` (slug/alias), `command` (absolute paths ok), `args*` (RO/RW lists override base), `env`, `read_only`, `enabled`, optional `description` and `instructions`. ### Built-in defaults -If no `[[agents]]` are configured, Code advertises built-ins (gated by env `CODE_ENABLE_CLOUD_AGENT_MODEL` for cloud variants): `code-gpt-5.4`, `code-gpt-5.3-codex`, `claude-opus-4.6`, `gemini-3-pro`, `code-gpt-5.1-codex-mini`, `claude-sonnet-4.5`, `gemini-3-flash`, `claude-haiku-4.5`, `qwen-3-coder`, `cloud-gpt-5.1-codex-max`. Built-ins strip any user `--model/-m` flags to avoid conflicts and inject their own. +If no `[[agents]]` are configured, Code advertises built-ins (gated by env `CODE_ENABLE_CLOUD_AGENT_MODEL` for cloud variants): `code-gpt-5.4`, `code-gpt-5.3-codex`, `code-gpt-5.3-codex-spark`, `claude-opus-4.6`, `gemini-3.1-pro-preview`, `code-gpt-5.1-codex-mini`, `claude-sonnet-4.6`, `gemini-3-flash-preview`, `claude-haiku-4.5`, `qwen3-coder-plus`, `cloud-gpt-5.1-codex-max`. Built-ins strip any user `--model/-m` flags to avoid conflicts and inject their own. -Tip: `gemini` resolves to `gemini-3-flash` (fast/cheap). Use `gemini-3-pro` when you want the higher-capacity Gemini option. +Tip: `gemini` resolves to `gemini-3-flash-preview` (fast/cheap). Use `gemini-3.1-pro-preview` when you want the higher-capacity Gemini preview. ## Subagents (`[[subagents.commands]]`) ```toml diff --git a/docs/config.md b/docs/config.md index 6ce1e707dc1..cfdef2c0a98 100644 --- a/docs/config.md +++ b/docs/config.md @@ -198,7 +198,7 @@ approval_policy = "never" Use `[[agents]]` blocks to register additional CLI programs that Code can launch as peers. Each block maps a short `name` (referenced elsewhere in the config) to the command to execute, optional default flags, and environment variables. -> **Note:** Built-in model slugs (for example `code-gpt-5.4`, `claude-sonnet-4.5`) automatically inject the correct `--model` or `-m` flag. To avoid conflicting arguments, Code strips any `--model`/`-m` flags you place in `args`, `args_read_only`, or `args_write` before launching the agent. If you need a new model variant, add a slug in `code-rs/core/src/agent_defaults.rs` (or set an environment variable consumed by the CLI) rather than pinning the flag here. +> **Note:** Built-in model slugs (for example `code-gpt-5.4`, `claude-sonnet-4.6`) automatically inject the correct `--model` or `-m` flag. To avoid conflicting arguments, Code strips any `--model`/`-m` flags you place in `args`, `args_read_only`, or `args_write` before launching the agent. If you need a new model variant, add a slug in `code-rs/core/src/agent_defaults.rs` (or set an environment variable consumed by the CLI) rather than pinning the flag here. ```toml [[agents]] @@ -449,7 +449,7 @@ tool_timeout_sec = 30 Sub-agents are orchestrated helper workflows you can trigger with slash commands (for example `/plan`, `/solve`, `/code`). Each entry under `[[subagents.commands]]` defines the slash command name, whether spawned agents run in read-only mode, which `agents` to launch, and extra guidance for both the orchestrator (Code) and the individual agents. -By default (when no `[[agents]]` are configured) Code advertises these model slugs for multi-agent runs: `code-gpt-5.4`, `code-gpt-5.3-codex`, `claude-opus-4.6`, `gemini-3-pro`, `code-gpt-5.1-codex-mini`, `claude-sonnet-4.5`, `gemini-3-flash`, `claude-haiku-4.5`, and `qwen-3-coder`. The cloud counterpart, `cloud-gpt-5.1-codex-max`, only appears when `CODE_ENABLE_CLOUD_AGENT_MODEL=1` is set. (`gemini` resolves to `gemini-3-flash`.) You can override the list by defining `[[agents]]` entries or by specifying `agents = [ … ]` on a given `[[subagents.commands]]` entry. +By default (when no `[[agents]]` are configured) Code advertises these model slugs for multi-agent runs: `code-gpt-5.4`, `code-gpt-5.3-codex`, `code-gpt-5.3-codex-spark`, `claude-opus-4.6`, `gemini-3.1-pro-preview`, `code-gpt-5.1-codex-mini`, `claude-sonnet-4.6`, `gemini-3-flash-preview`, `claude-haiku-4.5`, and `qwen3-coder-plus`. The cloud counterpart, `cloud-gpt-5.1-codex-max`, only appears when `CODE_ENABLE_CLOUD_AGENT_MODEL=1` is set. (`gemini` resolves to `gemini-3-flash-preview`.) You can override the list by defining `[[agents]]` entries or by specifying `agents = [ … ]` on a given `[[subagents.commands]]` entry. ```toml [[subagents.commands]] diff --git a/docs/slash-commands.md b/docs/slash-commands.md index 3d7ab6fb7ab..18f167fc70b 100644 --- a/docs/slash-commands.md +++ b/docs/slash-commands.md @@ -117,9 +117,9 @@ Implementation Notes `code-rs/core/src/slash_commands.rs`. When no `[[agents]]` are configured, the orchestrator advertises the following model slugs to the LLM for multi-agent runs: `code-gpt-5.4`, - `code-gpt-5.3-codex`, `claude-opus-4.6`, `gemini-3-pro`, - `code-gpt-5.1-codex-mini`, `claude-sonnet-4.5`, `gemini-3-flash`, - `claude-haiku-4.5`, and `qwen-3-coder` (with + `code-gpt-5.3-codex`, `code-gpt-5.3-codex-spark`, `claude-opus-4.6`, + `gemini-3.1-pro-preview`, `code-gpt-5.1-codex-mini`, `claude-sonnet-4.6`, + `gemini-3-flash-preview`, `claude-haiku-4.5`, and `qwen3-coder-plus` (with `cloud-gpt-5.1-codex-max` gated by `CODE_ENABLE_CLOUD_AGENT_MODEL`). (`gemini` - resolves to `gemini-3-flash`.) You can replace or pin this set via + resolves to `gemini-3-flash-preview`.) You can replace or pin this set via `[[agents]]` or per-command `[[subagents.commands]].agents`. From 38d3e7ed5d3bcc5325013ad581264f15c49530fc Mon Sep 17 00:00:00 2001 From: Chris Busillo Date: Sun, 8 Mar 2026 04:38:22 -0400 Subject: [PATCH 2/2] fix(tui/settings): defer 1M toggle until close Stage session fast-mode and context-mode changes while the model settings overlay is open, then apply them on close. This prevents the 1M context toggle from reconfiguring the session and compacting history while the user is still inside settings. Add regression tests covering deferred apply and no-op restores. --- .../src/bottom_pane/model_selection_view.rs | 150 +++++++++++++++++- .../tui/src/chatwidget/settings_overlay.rs | 6 + 2 files changed, 150 insertions(+), 6 deletions(-) diff --git a/code-rs/tui/src/bottom_pane/model_selection_view.rs b/code-rs/tui/src/bottom_pane/model_selection_view.rs index fff7b8dc68f..eab099cb411 100644 --- a/code-rs/tui/src/bottom_pane/model_selection_view.rs +++ b/code-rs/tui/src/bottom_pane/model_selection_view.rs @@ -144,6 +144,11 @@ pub(crate) struct ModelSelectionView { current_effort: ReasoningEffort, current_service_tier: Option, current_context_mode: Option, + baseline_service_tier: Option, + baseline_context_mode: Option, + defer_session_mode_toggles_until_close: bool, + staged_service_tier: Option>, + staged_context_mode: Option>, use_chat_model: bool, app_event_tx: AppEventSender, is_complete: bool, @@ -191,6 +196,11 @@ impl ModelSelectionView { current_effort, current_service_tier, current_context_mode, + baseline_service_tier: current_service_tier, + baseline_context_mode: current_context_mode, + defer_session_mode_toggles_until_close: false, + staged_service_tier: None, + staged_context_mode: None, use_chat_model, app_event_tx, is_complete: false, @@ -198,6 +208,35 @@ impl ModelSelectionView { } } + pub(crate) fn defer_session_mode_toggles_until_close(&mut self) { + self.defer_session_mode_toggles_until_close = true; + } + + pub(crate) fn flush_deferred_session_updates(&mut self) { + if let Some(service_tier) = self.staged_service_tier.take() { + if service_tier != self.baseline_service_tier { + let _ = self + .app_event_tx + .send(AppEvent::UpdateServiceTierSelection { service_tier }); + self.baseline_service_tier = service_tier; + } + } + + if let Some(context_mode) = self.staged_context_mode.take() { + if context_mode != self.baseline_context_mode { + let _ = self + .app_event_tx + .send(AppEvent::UpdateSessionContextModeSelection { context_mode }); + self.baseline_context_mode = context_mode; + } + } + } + + fn should_defer_session_mode_toggles(&self) -> bool { + self.defer_session_mode_toggles_until_close + && matches!(self.target, ModelSelectionTarget::Session) + } + pub(crate) fn update_presets(&mut self, presets: Vec) { let include_fast_mode = self.target.supports_fast_mode(); let include_context_mode = self.target.supports_fast_mode(); @@ -439,9 +478,13 @@ impl ModelSelectionView { Some(ServiceTier::Fast) }; self.current_service_tier = next_service_tier; - let _ = self.app_event_tx.send(AppEvent::UpdateServiceTierSelection { - service_tier: next_service_tier, - }); + if self.should_defer_session_mode_toggles() { + self.staged_service_tier = Some(next_service_tier); + } else { + let _ = self.app_event_tx.send(AppEvent::UpdateServiceTierSelection { + service_tier: next_service_tier, + }); + } return; } EntryKind::ContextMode => { @@ -451,9 +494,13 @@ impl ModelSelectionView { Some(ContextMode::Auto) => Some(ContextMode::Disabled), }; self.current_context_mode = next_context_mode; - let _ = self.app_event_tx.send(AppEvent::UpdateSessionContextModeSelection { - context_mode: next_context_mode, - }); + if self.should_defer_session_mode_toggles() { + self.staged_context_mode = Some(next_context_mode); + } else { + let _ = self.app_event_tx.send(AppEvent::UpdateSessionContextModeSelection { + context_mode: next_context_mode, + }); + } return; } EntryKind::FollowChat => { @@ -1387,6 +1434,97 @@ mod tests { assert!(!view.is_complete()); } + #[test] + fn deferred_context_mode_waits_until_close() { + let presets = vec![make_preset("gpt-5.4")]; + let (tx, rx) = mpsc::channel::(); + let mut view = ModelSelectionView::new( + presets, + "gpt-5.4".to_string(), + ReasoningEffort::Low, + None, + Some(ContextMode::Auto), + false, + ModelSelectionTarget::Session, + AppEventSender::new(tx), + ); + view.defer_session_mode_toggles_until_close(); + + let _ = view.handle_key_event_direct(KeyEvent::new(KeyCode::Down, KeyModifiers::NONE)); + let _ = view.handle_key_event_direct(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + + assert!(rx.try_recv().is_err(), "event should wait until close"); + + view.flush_deferred_session_updates(); + + let event = rx.try_recv().expect("context mode event after close"); + assert!(matches!( + event, + AppEvent::UpdateSessionContextModeSelection { + context_mode: Some(ContextMode::Disabled) + } + )); + assert!(!view.is_complete()); + } + + #[test] + fn deferred_fast_mode_waits_until_close() { + let presets = vec![make_preset("gpt-5.4")]; + let (tx, rx) = mpsc::channel::(); + let mut view = ModelSelectionView::new( + presets, + "gpt-5.4".to_string(), + ReasoningEffort::Low, + None, + None, + false, + ModelSelectionTarget::Session, + AppEventSender::new(tx), + ); + view.defer_session_mode_toggles_until_close(); + + let _ = view.handle_key_event_direct(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + + assert!(rx.try_recv().is_err(), "event should wait until close"); + + view.flush_deferred_session_updates(); + + let event = rx.try_recv().expect("service tier event after close"); + assert!(matches!( + event, + AppEvent::UpdateServiceTierSelection { + service_tier: Some(ServiceTier::Fast) + } + )); + assert!(!view.is_complete()); + } + + #[test] + fn deferred_context_mode_skips_event_when_restored_before_close() { + let presets = vec![make_preset("gpt-5.4")]; + let (tx, rx) = mpsc::channel::(); + let mut view = ModelSelectionView::new( + presets, + "gpt-5.4".to_string(), + ReasoningEffort::Low, + None, + Some(ContextMode::Auto), + false, + ModelSelectionTarget::Session, + AppEventSender::new(tx), + ); + view.defer_session_mode_toggles_until_close(); + + let _ = view.handle_key_event_direct(KeyEvent::new(KeyCode::Down, KeyModifiers::NONE)); + let _ = view.handle_key_event_direct(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + let _ = view.handle_key_event_direct(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + let _ = view.handle_key_event_direct(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + + view.flush_deferred_session_updates(); + + assert!(rx.try_recv().is_err(), "restoring the original mode should emit nothing"); + } + #[test] fn model_selection_shows_unavailable_context_hint_for_unsupported_model() { let presets = vec![make_preset("gpt-5.3-codex")]; diff --git a/code-rs/tui/src/chatwidget/settings_overlay.rs b/code-rs/tui/src/chatwidget/settings_overlay.rs index b4aea76d077..b662c390216 100644 --- a/code-rs/tui/src/chatwidget/settings_overlay.rs +++ b/code-rs/tui/src/chatwidget/settings_overlay.rs @@ -176,6 +176,8 @@ pub(crate) struct ModelSettingsContent { impl ModelSettingsContent { pub(crate) fn new(view: ModelSelectionView) -> Self { + let mut view = view; + view.defer_session_mode_toggles_until_close(); Self { view } } } @@ -192,6 +194,10 @@ impl SettingsContent for ModelSettingsContent { fn is_complete(&self) -> bool { self.view.is_complete() } + + fn on_close(&mut self) { + self.view.flush_deferred_session_updates(); + } } pub(crate) struct ThemeSettingsContent {