diff --git a/hermes_cli/model_switch.py b/hermes_cli/model_switch.py index 41fbe36de..bb57177e7 100644 --- a/hermes_cli/model_switch.py +++ b/hermes_cli/model_switch.py @@ -112,7 +112,10 @@ MODEL_ALIASES: dict[str, ModelIdentity] = { # OpenAI "gpt5": ModelIdentity("openai", "gpt-5"), "gpt": ModelIdentity("openai", "gpt"), - "codex": ModelIdentity("openai", "codex"), + # "codex" used to point at a now-vanished openai/codex slug. The live + # catalogs expose GPT-5 Codex family models instead, so resolve the alias + # to the actual codex-branded family. + "codex": ModelIdentity("openai", "gpt-5.3-codex"), "o3": ModelIdentity("openai", "o3"), "o4": ModelIdentity("openai", "o4"), diff --git a/tests/hermes_cli/test_model_switch_codex_alias.py b/tests/hermes_cli/test_model_switch_codex_alias.py new file mode 100644 index 000000000..92ccb5f42 --- /dev/null +++ b/tests/hermes_cli/test_model_switch_codex_alias.py @@ -0,0 +1,73 @@ +"""Regression tests for the /model codex alias. + +The short alias used to point at a nonexistent openai/codex catalog entry, +which made `/model codex` fail even though Codex models are available. +""" + +from unittest.mock import patch + +from hermes_cli.model_switch import switch_model + + +_MOCK_VALIDATION = { + "accepted": True, + "persist": True, + "recognized": True, + "message": None, +} + + +@patch("hermes_cli.model_switch.get_model_info", return_value=None) +@patch("hermes_cli.model_switch.get_model_capabilities", return_value=None) +@patch("hermes_cli.models.detect_provider_for_model", return_value=None) +@patch("hermes_cli.models.validate_requested_model", return_value=_MOCK_VALIDATION) +def test_codex_alias_resolves_on_openai_codex( + _validate, + _detect, + _capabilities, + _info, +): + """/model codex should resolve to a real Codex model on openai-codex.""" + with patch( + "hermes_cli.model_switch.list_provider_models", + return_value=["gpt-5.4-mini", "gpt-5.4", "gpt-5.3-codex", "gpt-5.2-codex"], + ): + result = switch_model( + raw_input="codex", + current_provider="openai-codex", + current_model="gpt-5.4", + current_base_url="https://chatgpt.com/backend-api/codex", + current_api_key="***", + ) + + assert result.success is True + assert result.target_provider == "openai-codex" + assert result.new_model == "gpt-5.3-codex" + + +@patch("hermes_cli.model_switch.get_model_info", return_value=None) +@patch("hermes_cli.model_switch.get_model_capabilities", return_value=None) +@patch("hermes_cli.models.detect_provider_for_model", return_value=None) +@patch("hermes_cli.models.validate_requested_model", return_value=_MOCK_VALIDATION) +def test_codex_alias_resolves_on_openrouter( + _validate, + _detect, + _capabilities, + _info, +): + """The alias should also work when switching through openrouter.""" + with patch( + "hermes_cli.model_switch.list_provider_models", + return_value=["openai/gpt-5.4", "openai/gpt-5.4-mini", "openai/gpt-5.3-codex"], + ): + result = switch_model( + raw_input="codex", + current_provider="openrouter", + current_model="openai/gpt-5.4", + current_base_url="https://openrouter.ai/api/v1", + current_api_key="***", + ) + + assert result.success is True + assert result.target_provider == "openrouter" + assert result.new_model == "openai/gpt-5.3-codex"