mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-30 01:41:43 +00:00
fix: trust user-selected models with OpenAI Codex provider
The Codex model normalization was rejecting any model without 'codex' in its name, forcing a fallback to gpt-5.3-codex. This blocked models like gpt-5.4 that the Codex API actually supports. The fix simplifies _normalize_model_for_provider() to two operations: 1. Strip provider prefixes (API needs bare slugs) 2. Replace the *untouched default* model with a Codex-compatible one If the user explicitly chose a model — any model — we trust them and let the API be the judge. No allowlists, no slug checks. Also removes the 'codex not in slug' filter from _read_cache_models() so the local cache preserves all API-available models. Inspired by OpenClaw's approach which explicitly lists non-codex models (gpt-5.4, gpt-5.2) as valid Codex models.
This commit is contained in:
parent
cf63b2471f
commit
f996d7950b
4 changed files with 194 additions and 56 deletions
|
|
@ -197,10 +197,10 @@ def test_codex_provider_replaces_incompatible_default_model(monkeypatch):
|
|||
assert shell.model == "gpt-5.2-codex"
|
||||
|
||||
|
||||
def test_codex_provider_replaces_incompatible_envvar_model(monkeypatch):
|
||||
"""Exact scenario from #651: LLM_MODEL is set to a non-Codex model and
|
||||
provider resolves to openai-codex. The model must be replaced and a
|
||||
warning printed since the user explicitly chose it."""
|
||||
def test_codex_provider_trusts_explicit_envvar_model(monkeypatch):
|
||||
"""When the user explicitly sets LLM_MODEL, we trust their choice and
|
||||
let the API be the judge — even if it's a non-OpenAI model. Only
|
||||
provider prefixes are stripped; the bare model passes through."""
|
||||
cli = _import_cli()
|
||||
|
||||
monkeypatch.setenv("LLM_MODEL", "claude-opus-4-6")
|
||||
|
|
@ -217,18 +217,14 @@ def test_codex_provider_replaces_incompatible_envvar_model(monkeypatch):
|
|||
|
||||
monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
|
||||
monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.codex_models.get_codex_model_ids",
|
||||
lambda access_token=None: ["gpt-5.2-codex", "gpt-5.1-codex-mini"],
|
||||
)
|
||||
|
||||
shell = cli.HermesCLI(compact=True, max_turns=1)
|
||||
|
||||
assert shell._model_is_default is False
|
||||
assert shell._ensure_runtime_credentials() is True
|
||||
assert shell.provider == "openai-codex"
|
||||
assert "claude" not in shell.model
|
||||
assert shell.model == "gpt-5.2-codex"
|
||||
# User explicitly chose this model — it passes through untouched
|
||||
assert shell.model == "claude-opus-4-6"
|
||||
|
||||
|
||||
def test_codex_provider_preserves_explicit_codex_model(monkeypatch):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue