mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-27 01:11:40 +00:00
fix: fall back to provider's default model when model config is empty (#8303)
When a user configures a provider (e.g. `hermes auth add openai-codex`) but never selects a model via `hermes model`, the gateway and CLI would pass an empty model string to the API, causing: 'Codex Responses request model must be a non-empty string' Now both gateway (_resolve_session_agent_runtime) and CLI (_ensure_runtime_credentials) detect an empty model and fill it from the provider's first catalog entry in _PROVIDER_MODELS. This covers all providers that have a static model list (openai-codex, anthropic, gemini, copilot, etc.). The fix is conservative: it only triggers when model is truly empty and a known provider was resolved. Explicit model choices are never overridden.
This commit is contained in:
parent
17c72f176d
commit
45e60904c6
4 changed files with 167 additions and 0 deletions
120
tests/test_empty_model_fallback.py
Normal file
120
tests/test_empty_model_fallback.py
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
"""Tests for empty model fallback — when provider is configured but model is missing."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
import pytest
|
||||
|
||||
|
||||
class TestGetDefaultModelForProvider:
|
||||
"""Unit tests for hermes_cli.models.get_default_model_for_provider."""
|
||||
|
||||
def test_known_provider_returns_first_model(self):
|
||||
from hermes_cli.models import get_default_model_for_provider
|
||||
result = get_default_model_for_provider("openai-codex")
|
||||
# Should return first model from _PROVIDER_MODELS["openai-codex"]
|
||||
assert result
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_openrouter_returns_empty(self):
|
||||
"""OpenRouter uses dynamic model fetch, no static catalog entry."""
|
||||
from hermes_cli.models import get_default_model_for_provider
|
||||
# OpenRouter is not in _PROVIDER_MODELS — it uses live fetching
|
||||
result = get_default_model_for_provider("openrouter")
|
||||
assert result == ""
|
||||
|
||||
def test_unknown_provider_returns_empty(self):
|
||||
from hermes_cli.models import get_default_model_for_provider
|
||||
assert get_default_model_for_provider("nonexistent-provider") == ""
|
||||
|
||||
def test_custom_provider_returns_empty(self):
|
||||
"""Custom provider has no model catalog — should return empty."""
|
||||
from hermes_cli.models import get_default_model_for_provider
|
||||
# Custom providers don't have entries in _PROVIDER_MODELS
|
||||
assert get_default_model_for_provider("some-random-custom") == ""
|
||||
|
||||
|
||||
class TestGatewayEmptyModelFallback:
|
||||
"""Test that _resolve_session_agent_runtime fills in empty model from provider catalog."""
|
||||
|
||||
def test_empty_model_filled_from_provider(self):
|
||||
"""When config has no model but provider is openai-codex, use first codex model."""
|
||||
from gateway.run import GatewayRunner
|
||||
|
||||
runner = object.__new__(GatewayRunner)
|
||||
runner._session_model_overrides = {}
|
||||
|
||||
# Mock _resolve_gateway_model to return empty string
|
||||
# Mock _resolve_runtime_agent_kwargs to return openai-codex provider
|
||||
with patch("gateway.run._resolve_gateway_model", return_value=""), \
|
||||
patch("gateway.run._resolve_runtime_agent_kwargs", return_value={
|
||||
"provider": "openai-codex",
|
||||
"api_key": "test-key",
|
||||
"base_url": "https://chatgpt.com/backend-api/codex",
|
||||
"api_mode": "codex_responses",
|
||||
}):
|
||||
model, kwargs = runner._resolve_session_agent_runtime()
|
||||
|
||||
# Model should have been filled in from provider catalog
|
||||
assert model, "Model should not be empty when provider is known"
|
||||
assert isinstance(model, str)
|
||||
assert kwargs["provider"] == "openai-codex"
|
||||
|
||||
def test_nonempty_model_not_overridden(self):
|
||||
"""When config has a model set, don't override it."""
|
||||
from gateway.run import GatewayRunner
|
||||
|
||||
runner = object.__new__(GatewayRunner)
|
||||
runner._session_model_overrides = {}
|
||||
|
||||
with patch("gateway.run._resolve_gateway_model", return_value="gpt-5.4"), \
|
||||
patch("gateway.run._resolve_runtime_agent_kwargs", return_value={
|
||||
"provider": "openai-codex",
|
||||
"api_key": "test-key",
|
||||
"base_url": "https://chatgpt.com/backend-api/codex",
|
||||
"api_mode": "codex_responses",
|
||||
}):
|
||||
model, kwargs = runner._resolve_session_agent_runtime()
|
||||
|
||||
assert model == "gpt-5.4", "Explicit model should not be overridden"
|
||||
|
||||
def test_empty_model_no_provider_stays_empty(self):
|
||||
"""When both model and provider are empty, model stays empty."""
|
||||
from gateway.run import GatewayRunner
|
||||
|
||||
runner = object.__new__(GatewayRunner)
|
||||
runner._session_model_overrides = {}
|
||||
|
||||
with patch("gateway.run._resolve_gateway_model", return_value=""), \
|
||||
patch("gateway.run._resolve_runtime_agent_kwargs", return_value={
|
||||
"provider": "",
|
||||
"api_key": "test-key",
|
||||
"base_url": "https://example.com",
|
||||
"api_mode": "chat_completions",
|
||||
}):
|
||||
model, kwargs = runner._resolve_session_agent_runtime()
|
||||
|
||||
# Can't fill in a default without knowing the provider
|
||||
assert model == ""
|
||||
|
||||
|
||||
class TestResolveGatewayModel:
|
||||
"""Test _resolve_gateway_model reads model from config correctly."""
|
||||
|
||||
def test_returns_default_key(self):
|
||||
from gateway.run import _resolve_gateway_model
|
||||
assert _resolve_gateway_model({"model": {"default": "gpt-5.4"}}) == "gpt-5.4"
|
||||
|
||||
def test_returns_model_key_fallback(self):
|
||||
from gateway.run import _resolve_gateway_model
|
||||
assert _resolve_gateway_model({"model": {"model": "gpt-5.4"}}) == "gpt-5.4"
|
||||
|
||||
def test_returns_empty_when_missing(self):
|
||||
from gateway.run import _resolve_gateway_model
|
||||
assert _resolve_gateway_model({"model": {}}) == ""
|
||||
|
||||
def test_returns_empty_when_no_model_section(self):
|
||||
from gateway.run import _resolve_gateway_model
|
||||
assert _resolve_gateway_model({}) == ""
|
||||
|
||||
def test_string_model_config(self):
|
||||
from gateway.run import _resolve_gateway_model
|
||||
assert _resolve_gateway_model({"model": "my-model"}) == "my-model"
|
||||
Loading…
Add table
Add a link
Reference in a new issue