mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-25 00:51:20 +00:00
fix(config): accept 'model' key as alias for 'default' in model config (#3603)
Users intuitively write model: { model: my-model } instead of
model: { default: my-model } and it silently falls back to the
hardcoded default. Now both spellings work across all three config
consumers: runtime_provider, CLI, and gateway.
Co-authored-by: ygd58 <ygd58@users.noreply.github.com>
This commit is contained in:
parent
9a364f2805
commit
e4480ff426
5 changed files with 11 additions and 3 deletions
|
|
@ -7,6 +7,7 @@
|
|||
# =============================================================================
|
||||
model:
|
||||
# Default model to use (can be overridden with --model flag)
|
||||
# Both "default" and "model" work as the key name here.
|
||||
default: "anthropic/claude-opus-4.6"
|
||||
|
||||
# Inference provider selection:
|
||||
|
|
|
|||
2
cli.py
2
cli.py
|
|
@ -1078,7 +1078,7 @@ class HermesCLI:
|
|||
# authoritative. This avoids conflicts in multi-agent setups where
|
||||
# env vars would stomp each other.
|
||||
_model_config = CLI_CONFIG.get("model", {})
|
||||
_config_model = _model_config.get("default", "") if isinstance(_model_config, dict) else (_model_config or "")
|
||||
_config_model = (_model_config.get("default") or _model_config.get("model") or "") if isinstance(_model_config, dict) else (_model_config or "")
|
||||
_FALLBACK_MODEL = "anthropic/claude-opus-4.6"
|
||||
self.model = model or _config_model or _FALLBACK_MODEL
|
||||
# Auto-detect model from local server if still on fallback
|
||||
|
|
|
|||
|
|
@ -288,7 +288,7 @@ def _resolve_gateway_model(config: dict | None = None) -> str:
|
|||
if isinstance(model_cfg, str):
|
||||
model = model_cfg
|
||||
elif isinstance(model_cfg, dict):
|
||||
model = model_cfg.get("default", model)
|
||||
model = model_cfg.get("default") or model_cfg.get("model") or model
|
||||
return model
|
||||
|
||||
|
||||
|
|
@ -2093,7 +2093,7 @@ class GatewayRunner:
|
|||
if isinstance(_model_cfg, str):
|
||||
_hyg_model = _model_cfg
|
||||
elif isinstance(_model_cfg, dict):
|
||||
_hyg_model = _model_cfg.get("default", _hyg_model)
|
||||
_hyg_model = _model_cfg.get("default") or _model_cfg.get("model") or _hyg_model
|
||||
# Read explicit context_length override from model config
|
||||
# (same as run_agent.py lines 995-1005)
|
||||
_raw_ctx = _model_cfg.get("context_length")
|
||||
|
|
|
|||
|
|
@ -63,6 +63,9 @@ def _get_model_config() -> Dict[str, Any]:
|
|||
model_cfg = config.get("model")
|
||||
if isinstance(model_cfg, dict):
|
||||
cfg = dict(model_cfg)
|
||||
# Accept "model" as alias for "default" (users intuitively write model.model)
|
||||
if not cfg.get("default") and cfg.get("model"):
|
||||
cfg["default"] = cfg["model"]
|
||||
default = (cfg.get("default") or "").strip()
|
||||
base_url = (cfg.get("base_url") or "").strip()
|
||||
is_local = "localhost" in base_url or "127.0.0.1" in base_url
|
||||
|
|
|
|||
|
|
@ -95,6 +95,10 @@ You need at least one way to connect to an LLM. Use `hermes model` to switch pro
|
|||
| **Hugging Face** | `HF_TOKEN` in `~/.hermes/.env` (provider: `huggingface`, aliases: `hf`) |
|
||||
| **Custom Endpoint** | `hermes model` (saved in `config.yaml`) or `OPENAI_BASE_URL` + `OPENAI_API_KEY` in `~/.hermes/.env` |
|
||||
|
||||
:::tip Model key alias
|
||||
In the `model:` config section, you can use either `default:` or `model:` as the key name for your model ID. Both `model: { default: my-model }` and `model: { model: my-model }` work identically.
|
||||
:::
|
||||
|
||||
:::info Codex Note
|
||||
The OpenAI Codex provider authenticates via device code (open a URL, enter a code). Hermes stores the resulting credentials in its own auth store under `~/.hermes/auth.json` and can import existing Codex CLI credentials from `~/.codex/auth.json` when present. No Codex CLI installation is required.
|
||||
:::
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue