fix: repair OpenCode model routing and selection (#4508)

OpenCode Zen and Go are mixed-API-surface providers — different models
behind them use different API surfaces (GPT on Zen uses codex_responses,
Claude on Zen uses anthropic_messages, MiniMax on Go uses
anthropic_messages, GLM/Kimi on Go use chat_completions).

Changes:
- Add normalize_opencode_model_id() and opencode_model_api_mode() to
  models.py for model ID normalization and API surface routing
- Add _provider_supports_explicit_api_mode() to runtime_provider.py
  to prevent stale api_mode from leaking across provider switches
- Wire opencode routing into all three api_mode resolution paths:
  pool entry, api_key provider, and explicit runtime
- Add api_mode field to ModelSwitchResult for propagation through the
  switch pipeline
- Consolidate _PROVIDER_MODELS from main.py into models.py (single
  source of truth, eliminates duplicate dict)
- Add opencode normalization to setup wizard and model picker flows
- Add opencode block to _normalize_model_for_provider in CLI
- Add opencode-zen/go fallback model lists to setup.py

Tests: 160 targeted tests pass (26 new tests covering normalization,
api_mode routing per provider/model, persistence, and setup wizard
normalization).

Based on PR #3017 by SaM13997.

Co-authored-by: SaM13997 <139419381+SaM13997@users.noreply.github.com>
This commit is contained in:
Teknium 2026-04-02 09:36:24 -07:00 committed by GitHub
parent f4f64c413f
commit 28a073edc6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 381 additions and 84 deletions

View file

@ -9,7 +9,9 @@ from hermes_cli.models import (
fetch_api_models,
github_model_reasoning_efforts,
normalize_copilot_model_id,
normalize_opencode_model_id,
normalize_provider,
opencode_model_api_mode,
parse_model_input,
probe_api_models,
provider_label,
@ -339,6 +341,28 @@ class TestCopilotNormalization:
}]
assert copilot_model_api_mode("gpt-5.4", catalog=catalog) == "codex_responses"
def test_normalize_opencode_model_id_strips_provider_prefix(self):
assert normalize_opencode_model_id("opencode-go", "opencode-go/kimi-k2.5") == "kimi-k2.5"
assert normalize_opencode_model_id("opencode-zen", "opencode-zen/claude-sonnet-4-6") == "claude-sonnet-4-6"
assert normalize_opencode_model_id("opencode-go", "glm-5") == "glm-5"
def test_opencode_zen_api_modes_match_docs(self):
assert opencode_model_api_mode("opencode-zen", "gpt-5.4") == "codex_responses"
assert opencode_model_api_mode("opencode-zen", "gpt-5.3-codex") == "codex_responses"
assert opencode_model_api_mode("opencode-zen", "opencode-zen/gpt-5.4") == "codex_responses"
assert opencode_model_api_mode("opencode-zen", "claude-sonnet-4-6") == "anthropic_messages"
assert opencode_model_api_mode("opencode-zen", "opencode-zen/claude-sonnet-4-6") == "anthropic_messages"
assert opencode_model_api_mode("opencode-zen", "gemini-3-flash") == "chat_completions"
assert opencode_model_api_mode("opencode-zen", "minimax-m2.5") == "chat_completions"
def test_opencode_go_api_modes_match_docs(self):
assert opencode_model_api_mode("opencode-go", "glm-5") == "chat_completions"
assert opencode_model_api_mode("opencode-go", "opencode-go/glm-5") == "chat_completions"
assert opencode_model_api_mode("opencode-go", "kimi-k2.5") == "chat_completions"
assert opencode_model_api_mode("opencode-go", "opencode-go/kimi-k2.5") == "chat_completions"
assert opencode_model_api_mode("opencode-go", "minimax-m2.5") == "anthropic_messages"
assert opencode_model_api_mode("opencode-go", "opencode-go/minimax-m2.5") == "anthropic_messages"
# -- validate — format checks -----------------------------------------------