fix(custom-providers): propagate model field from config to runtime so API receives the correct model name

Fixes #7828

When a custom_providers entry carries a `model` field, that value was
silently dropped by `_get_named_custom_provider` and
`_resolve_named_custom_runtime`.  Callers received a runtime dict with
`base_url`, `api_key`, and `api_mode` — but no `model`.

As a result, `hermes chat --model <provider-name>` sent the *provider
name* (e.g. "my-dashscope-provider") as the model string to the API
instead of the configured model (e.g. "qwen3.6-plus"), producing:

    Error code: 400 - {'error': {'message': 'Model Not Exist'}}

Setting the provider as the *default* model in config.yaml worked
because that path writes `model.default` and the agent reads it back
directly, bypassing the broken runtime resolution path.

Changes:

1. hermes_cli/runtime_provider.py — _get_named_custom_provider()
   Reads `entry.get("model")` and includes it in the result dict so
   the value is available to callers.

2. hermes_cli/runtime_provider.py — _resolve_named_custom_runtime()
   Propagates `custom_provider["model"]` into the returned runtime dict.

3. cli.py — _ensure_runtime_credentials()
   After resolving runtime, if `runtime["model"]` is set, assign it to
   `self.model` so the AIAgent is initialised with the correct model
   name rather than the provider name the user typed on the CLI.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
0xFrank-eth 2026-04-11 22:53:08 +03:00 committed by Teknium
parent b0892375cd
commit 0e6354df50
2 changed files with 18 additions and 1 deletions

9
cli.py
View file

@ -2710,6 +2710,15 @@ class HermesCLI:
self.api_key = api_key
self.base_url = base_url
# When a custom_provider entry carries an explicit `model` field,
# use it as the effective model name. Without this, running
# `hermes chat --model <provider-name>` sends the provider name
# (e.g. "my-provider") as the model string to the API instead of
# the configured model (e.g. "qwen3.6-plus"), causing 400 errors.
runtime_model = runtime.get("model")
if runtime_model and isinstance(runtime_model, str):
self.model = runtime_model
# Normalize model for the resolved provider (e.g. swap non-Codex
# models when provider is openai-codex). Fixes #651.
model_changed = self._normalize_model_for_provider(resolved_provider)

View file

@ -304,6 +304,9 @@ def _get_named_custom_provider(requested_provider: str) -> Optional[Dict[str, An
api_mode = _parse_api_mode(entry.get("api_mode"))
if api_mode:
result["api_mode"] = api_mode
model_name = str(entry.get("model", "") or "").strip()
if model_name:
result["model"] = model_name
return result
return None
@ -339,7 +342,7 @@ def _resolve_named_custom_runtime(
]
api_key = next((candidate for candidate in api_key_candidates if has_usable_secret(candidate)), "")
return {
result = {
"provider": "custom",
"api_mode": custom_provider.get("api_mode")
or _detect_api_mode_for_url(base_url)
@ -348,6 +351,11 @@ def _resolve_named_custom_runtime(
"api_key": api_key or "no-key-required",
"source": f"custom_provider:{custom_provider.get('name', requested_provider)}",
}
# Propagate the model name so callers can override self.model when the
# provider name differs from the actual model string the API expects.
if custom_provider.get("model"):
result["model"] = custom_provider["model"]
return result
def _resolve_openrouter_runtime(