mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
fix: fall back to provider's default model when model config is empty (#8303)
When a user configures a provider (e.g. `hermes auth add openai-codex`) but never selects a model via `hermes model`, the gateway and CLI would pass an empty model string to the API, causing: 'Codex Responses request model must be a non-empty string' Now both gateway (_resolve_session_agent_runtime) and CLI (_ensure_runtime_credentials) detect an empty model and fill it from the provider's first catalog entry in _PROVIDER_MODELS. This covers all providers that have a static model list (openai-codex, anthropic, gemini, copilot, etc.). The fix is conservative: it only triggers when model is truly empty and a known provider was resolved. Explicit model choices are never overridden.
This commit is contained in:
parent
17c72f176d
commit
45e60904c6
4 changed files with 167 additions and 0 deletions
16
cli.py
16
cli.py
|
|
@ -2735,6 +2735,22 @@ class HermesCLI:
|
|||
if runtime_model and isinstance(runtime_model, str):
|
||||
self.model = runtime_model
|
||||
|
||||
# If model is still empty (e.g. user ran `hermes auth add openai-codex`
|
||||
# without `hermes model`), fall back to the provider's first catalog
|
||||
# model so the API call doesn't fail with "model must be non-empty".
|
||||
if not self.model and resolved_provider:
|
||||
try:
|
||||
from hermes_cli.models import get_default_model_for_provider
|
||||
_default = get_default_model_for_provider(resolved_provider)
|
||||
if _default:
|
||||
self.model = _default
|
||||
logger.info(
|
||||
"No model configured — defaulting to %s for provider %s",
|
||||
_default, resolved_provider,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Normalize model for the resolved provider (e.g. swap non-Codex
|
||||
# models when provider is openai-codex). Fixes #651.
|
||||
model_changed = self._normalize_model_for_provider(resolved_provider)
|
||||
|
|
|
|||
|
|
@ -900,6 +900,23 @@ class GatewayRunner:
|
|||
model, runtime_kwargs = self._apply_session_model_override(
|
||||
resolved_session_key, model, runtime_kwargs
|
||||
)
|
||||
|
||||
# When the config has no model.default but a provider was resolved
|
||||
# (e.g. user ran `hermes auth add openai-codex` without `hermes model`),
|
||||
# fall back to the provider's first catalog model so the API call
|
||||
# doesn't fail with "model must be a non-empty string".
|
||||
if not model and runtime_kwargs.get("provider"):
|
||||
try:
|
||||
from hermes_cli.models import get_default_model_for_provider
|
||||
model = get_default_model_for_provider(runtime_kwargs["provider"])
|
||||
if model:
|
||||
logger.info(
|
||||
"No model configured — defaulting to %s for provider %s",
|
||||
model, runtime_kwargs["provider"],
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return model, runtime_kwargs
|
||||
|
||||
def _resolve_turn_agent_config(self, user_message: str, model: str, runtime_kwargs: dict) -> dict:
|
||||
|
|
|
|||
|
|
@ -546,6 +546,20 @@ _PROVIDER_ALIASES = {
|
|||
}
|
||||
|
||||
|
||||
def get_default_model_for_provider(provider: str) -> str:
|
||||
"""Return the default model for a provider, or empty string if unknown.
|
||||
|
||||
Uses the first entry in _PROVIDER_MODELS as the default. This is the
|
||||
model a user would be offered first in the ``hermes model`` picker.
|
||||
|
||||
Used as a fallback when the user has configured a provider but never
|
||||
selected a model (e.g. ``hermes auth add openai-codex`` without
|
||||
``hermes model``).
|
||||
"""
|
||||
models = _PROVIDER_MODELS.get(provider, [])
|
||||
return models[0] if models else ""
|
||||
|
||||
|
||||
def _openrouter_model_is_free(pricing: Any) -> bool:
|
||||
"""Return True when both prompt and completion pricing are zero."""
|
||||
if not isinstance(pricing, dict):
|
||||
|
|
|
|||
120
tests/test_empty_model_fallback.py
Normal file
120
tests/test_empty_model_fallback.py
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
"""Tests for empty model fallback — when provider is configured but model is missing."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
import pytest
|
||||
|
||||
|
||||
class TestGetDefaultModelForProvider:
|
||||
"""Unit tests for hermes_cli.models.get_default_model_for_provider."""
|
||||
|
||||
def test_known_provider_returns_first_model(self):
|
||||
from hermes_cli.models import get_default_model_for_provider
|
||||
result = get_default_model_for_provider("openai-codex")
|
||||
# Should return first model from _PROVIDER_MODELS["openai-codex"]
|
||||
assert result
|
||||
assert isinstance(result, str)
|
||||
|
||||
def test_openrouter_returns_empty(self):
|
||||
"""OpenRouter uses dynamic model fetch, no static catalog entry."""
|
||||
from hermes_cli.models import get_default_model_for_provider
|
||||
# OpenRouter is not in _PROVIDER_MODELS — it uses live fetching
|
||||
result = get_default_model_for_provider("openrouter")
|
||||
assert result == ""
|
||||
|
||||
def test_unknown_provider_returns_empty(self):
|
||||
from hermes_cli.models import get_default_model_for_provider
|
||||
assert get_default_model_for_provider("nonexistent-provider") == ""
|
||||
|
||||
def test_custom_provider_returns_empty(self):
|
||||
"""Custom provider has no model catalog — should return empty."""
|
||||
from hermes_cli.models import get_default_model_for_provider
|
||||
# Custom providers don't have entries in _PROVIDER_MODELS
|
||||
assert get_default_model_for_provider("some-random-custom") == ""
|
||||
|
||||
|
||||
class TestGatewayEmptyModelFallback:
|
||||
"""Test that _resolve_session_agent_runtime fills in empty model from provider catalog."""
|
||||
|
||||
def test_empty_model_filled_from_provider(self):
|
||||
"""When config has no model but provider is openai-codex, use first codex model."""
|
||||
from gateway.run import GatewayRunner
|
||||
|
||||
runner = object.__new__(GatewayRunner)
|
||||
runner._session_model_overrides = {}
|
||||
|
||||
# Mock _resolve_gateway_model to return empty string
|
||||
# Mock _resolve_runtime_agent_kwargs to return openai-codex provider
|
||||
with patch("gateway.run._resolve_gateway_model", return_value=""), \
|
||||
patch("gateway.run._resolve_runtime_agent_kwargs", return_value={
|
||||
"provider": "openai-codex",
|
||||
"api_key": "test-key",
|
||||
"base_url": "https://chatgpt.com/backend-api/codex",
|
||||
"api_mode": "codex_responses",
|
||||
}):
|
||||
model, kwargs = runner._resolve_session_agent_runtime()
|
||||
|
||||
# Model should have been filled in from provider catalog
|
||||
assert model, "Model should not be empty when provider is known"
|
||||
assert isinstance(model, str)
|
||||
assert kwargs["provider"] == "openai-codex"
|
||||
|
||||
def test_nonempty_model_not_overridden(self):
|
||||
"""When config has a model set, don't override it."""
|
||||
from gateway.run import GatewayRunner
|
||||
|
||||
runner = object.__new__(GatewayRunner)
|
||||
runner._session_model_overrides = {}
|
||||
|
||||
with patch("gateway.run._resolve_gateway_model", return_value="gpt-5.4"), \
|
||||
patch("gateway.run._resolve_runtime_agent_kwargs", return_value={
|
||||
"provider": "openai-codex",
|
||||
"api_key": "test-key",
|
||||
"base_url": "https://chatgpt.com/backend-api/codex",
|
||||
"api_mode": "codex_responses",
|
||||
}):
|
||||
model, kwargs = runner._resolve_session_agent_runtime()
|
||||
|
||||
assert model == "gpt-5.4", "Explicit model should not be overridden"
|
||||
|
||||
def test_empty_model_no_provider_stays_empty(self):
|
||||
"""When both model and provider are empty, model stays empty."""
|
||||
from gateway.run import GatewayRunner
|
||||
|
||||
runner = object.__new__(GatewayRunner)
|
||||
runner._session_model_overrides = {}
|
||||
|
||||
with patch("gateway.run._resolve_gateway_model", return_value=""), \
|
||||
patch("gateway.run._resolve_runtime_agent_kwargs", return_value={
|
||||
"provider": "",
|
||||
"api_key": "test-key",
|
||||
"base_url": "https://example.com",
|
||||
"api_mode": "chat_completions",
|
||||
}):
|
||||
model, kwargs = runner._resolve_session_agent_runtime()
|
||||
|
||||
# Can't fill in a default without knowing the provider
|
||||
assert model == ""
|
||||
|
||||
|
||||
class TestResolveGatewayModel:
|
||||
"""Test _resolve_gateway_model reads model from config correctly."""
|
||||
|
||||
def test_returns_default_key(self):
|
||||
from gateway.run import _resolve_gateway_model
|
||||
assert _resolve_gateway_model({"model": {"default": "gpt-5.4"}}) == "gpt-5.4"
|
||||
|
||||
def test_returns_model_key_fallback(self):
|
||||
from gateway.run import _resolve_gateway_model
|
||||
assert _resolve_gateway_model({"model": {"model": "gpt-5.4"}}) == "gpt-5.4"
|
||||
|
||||
def test_returns_empty_when_missing(self):
|
||||
from gateway.run import _resolve_gateway_model
|
||||
assert _resolve_gateway_model({"model": {}}) == ""
|
||||
|
||||
def test_returns_empty_when_no_model_section(self):
|
||||
from gateway.run import _resolve_gateway_model
|
||||
assert _resolve_gateway_model({}) == ""
|
||||
|
||||
def test_string_model_config(self):
|
||||
from gateway.run import _resolve_gateway_model
|
||||
assert _resolve_gateway_model({"model": "my-model"}) == "my-model"
|
||||
Loading…
Add table
Add a link
Reference in a new issue