fix: recompute Copilot api_mode after model switch

Recomputes GitHub Copilot api_mode from the selected model in the
shared /model switch path.  Before this change, Copilot could carry a
stale codex_responses mode forward from a GPT-5 selection into a later
Claude model switch, causing unsupported_api_for_model errors.

Cherry-picked from #10533 by @helix4u with:
- Comment specificity (Provider-specific → Copilot api_mode override)
- Fix pre-existing duplicate opencode-go in set literal
- Extract test mock helper to reduce duplication
- Add GPT-5 → GPT-5 regression test (keeps codex_responses)
This commit is contained in:
helix4u 2026-04-16 13:45:24 +05:30 committed by kshitij
parent 0cf7d570e2
commit 4093982f19
2 changed files with 107 additions and 3 deletions

View file

@ -457,6 +457,7 @@ def switch_model(
ModelSwitchResult with all information the caller needs. ModelSwitchResult with all information the caller needs.
""" """
from hermes_cli.models import ( from hermes_cli.models import (
copilot_model_api_mode,
detect_provider_for_model, detect_provider_for_model,
validate_requested_model, validate_requested_model,
opencode_model_api_mode, opencode_model_api_mode,
@ -714,8 +715,12 @@ def switch_model(
if validation.get("corrected_model"): if validation.get("corrected_model"):
new_model = validation["corrected_model"] new_model = validation["corrected_model"]
# --- Copilot api_mode override ---
if target_provider in {"copilot", "github-copilot"}:
api_mode = copilot_model_api_mode(new_model, api_key=api_key)
# --- OpenCode api_mode override --- # --- OpenCode api_mode override ---
if target_provider in {"opencode-zen", "opencode-go", "opencode", "opencode-go"}: if target_provider in {"opencode-zen", "opencode-go", "opencode"}:
api_mode = opencode_model_api_mode(target_provider, new_model) api_mode = opencode_model_api_mode(target_provider, new_model)
# --- Determine api_mode if not already set --- # --- Determine api_mode if not already set ---
@ -1098,5 +1103,3 @@ def list_authenticated_providers(
results.sort(key=lambda r: (not r["is_current"], -r["total_models"])) results.sort(key=lambda r: (not r["is_current"], -r["total_models"]))
return results return results

View file

@ -0,0 +1,101 @@
"""Regression tests for Copilot api_mode recomputation during /model switch.
When switching models within the Copilot provider (e.g. GPT-5 Claude),
the stale api_mode from resolve_runtime_provider must be overridden with
a fresh value computed from the *new* model. Without the fix, Claude
requests went through the Responses API and failed with
``unsupported_api_for_model``.
"""
from unittest.mock import patch
from hermes_cli.model_switch import switch_model
_MOCK_VALIDATION = {
"accepted": True,
"persist": True,
"recognized": True,
"message": None,
}
def _run_copilot_switch(
raw_input: str,
current_provider: str = "copilot",
current_model: str = "gpt-5.4",
explicit_provider: str = "",
runtime_api_mode: str = "codex_responses",
):
"""Run switch_model with Copilot mocks and return the result."""
with (
patch("hermes_cli.model_switch.resolve_alias", return_value=None),
patch("hermes_cli.model_switch.list_provider_models", return_value=[]),
patch(
"hermes_cli.runtime_provider.resolve_runtime_provider",
return_value={
"api_key": "ghu_test_token",
"base_url": "https://api.githubcopilot.com",
"api_mode": runtime_api_mode,
},
),
patch(
"hermes_cli.models.validate_requested_model",
return_value=_MOCK_VALIDATION,
),
patch("hermes_cli.model_switch.get_model_info", return_value=None),
patch("hermes_cli.model_switch.get_model_capabilities", return_value=None),
patch("hermes_cli.models.detect_provider_for_model", return_value=None),
):
return switch_model(
raw_input=raw_input,
current_provider=current_provider,
current_model=current_model,
explicit_provider=explicit_provider,
)
def test_same_provider_copilot_switch_recomputes_api_mode():
"""GPT-5 → Claude on copilot: api_mode must flip to chat_completions."""
result = _run_copilot_switch(
raw_input="claude-opus-4.6",
current_provider="copilot",
current_model="gpt-5.4",
)
assert result.success, f"switch_model failed: {result.error_message}"
assert result.new_model == "claude-opus-4.6"
assert result.target_provider == "copilot"
assert result.api_mode == "chat_completions"
def test_explicit_copilot_switch_uses_selected_model_api_mode():
"""Cross-provider switch to copilot: api_mode from new model, not stale runtime."""
result = _run_copilot_switch(
raw_input="claude-opus-4.6",
current_provider="openrouter",
current_model="anthropic/claude-sonnet-4.6",
explicit_provider="copilot",
)
assert result.success, f"switch_model failed: {result.error_message}"
assert result.new_model == "claude-opus-4.6"
assert result.target_provider == "github-copilot"
assert result.api_mode == "chat_completions"
def test_copilot_gpt5_keeps_codex_responses():
"""GPT-5 → GPT-5 on copilot: api_mode must stay codex_responses."""
result = _run_copilot_switch(
raw_input="gpt-5.4-mini",
current_provider="copilot",
current_model="gpt-5.4",
runtime_api_mode="codex_responses",
)
assert result.success, f"switch_model failed: {result.error_message}"
assert result.new_model == "gpt-5.4-mini"
assert result.target_provider == "copilot"
# gpt-5.4-mini is a GPT-5 variant — should use codex_responses
# (gpt-5-mini is the special case that uses chat_completions)
assert result.api_mode == "codex_responses"