refactor: remove /model slash command from CLI and gateway (#3080)

The /model command is removed from both the interactive CLI and
messenger gateway (Telegram/Discord/Slack/WhatsApp). Users can
still change models via 'hermes model' CLI subcommand or by
editing config.yaml directly.

Removed:
- CommandDef entry from COMMAND_REGISTRY
- CLI process_command() handler and model autocomplete logic
- Gateway _handle_model_command() and dispatch
- SlashCommandCompleter model_completer_provider parameter
- Two-stage Tab completion and ghost text for /model
- All /model-specific tests

Unaffected:
- /provider command (read-only, shows current model + providers)
- ACP adapter _cmd_model (separate system for VS Code/Zed/JetBrains)
- model_switch.py module (used by ACP)
- 'hermes model' CLI subcommand

Author: Teknium
This commit is contained in:
Teknium 2026-03-25 17:03:05 -07:00 committed by GitHub
parent 0cfc1f88a3
commit 9783c9d5c1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 5 additions and 626 deletions

133
cli.py
View file

@ -3003,10 +3003,10 @@ class HermesCLI:
print(f" {remaining} message(s) remaining in history.")
def _show_model_and_providers(self):
"""Unified /model and /provider display.
"""Show current model + provider and list all authenticated providers.
Shows current model + provider, then lists all authenticated
providers with their available models so users can switch easily.
providers with their available models.
"""
from hermes_cli.models import (
curated_models_for_provider, list_available_providers,
@ -3055,9 +3055,9 @@ class HermesCLI:
print(f" endpoint: {custom_url}")
if is_active:
print(f" model: {self.model} ← current")
print(f" (use /model custom:<model-name>)")
print(f" (use hermes model to change)")
else:
print(f" (use /model {p['id']}:<model-name>)")
print(f" (use hermes model to change)")
print()
if unauthed:
@ -3066,15 +3066,7 @@ class HermesCLI:
print(f" Run: hermes setup")
print()
print(" Switch model: /model <model-name>")
print(" Switch provider: /model <provider>:<model-name>")
if authed and len(authed) > 1:
# Show a concrete example with a non-active provider
other = next((p for p in authed if p["id"] != current), authed[0])
other_models = curated_models_for_provider(other["id"])
if other_models:
example_model = other_models[0][0]
print(f" Example: /model {other['id']}:{example_model}")
print(" To change model or provider, use: hermes model")
def _handle_prompt_command(self, cmd: str):
"""Handle the /prompt command to view or set system prompt."""
@ -3643,91 +3635,6 @@ class HermesCLI:
_cprint(" Session database not available.")
elif canonical == "new":
self.new_session()
elif canonical == "model":
# Use original case so model names like "Anthropic/Claude-Opus-4" are preserved
parts = cmd_original.split(maxsplit=1)
if len(parts) > 1:
from hermes_cli.model_switch import switch_model, switch_to_custom_provider
raw_input = parts[1].strip()
# Handle bare "/model custom" — switch to custom provider
# and auto-detect the model from the endpoint.
if raw_input.strip().lower() == "custom":
result = switch_to_custom_provider()
if result.success:
self.model = result.model
self.requested_provider = "custom"
self.provider = "custom"
self.api_key = result.api_key
self.base_url = result.base_url
self.agent = None
save_config_value("model.default", result.model)
save_config_value("model.provider", "custom")
save_config_value("model.base_url", result.base_url)
print(f"(^_^)b Model changed to: {result.model} [provider: Custom]")
print(f" Endpoint: {result.base_url}")
print(f" Status: connected (model auto-detected)")
else:
print(f"(>_<) {result.error_message}")
return True
# Core model-switching pipeline (shared with gateway)
current_provider = self.provider or self.requested_provider or "openrouter"
result = switch_model(
raw_input,
current_provider,
current_base_url=self.base_url or "",
current_api_key=self.api_key or "",
)
if not result.success:
print(f"(>_<) {result.error_message}")
if "Did you mean" not in result.error_message:
print(f" Model unchanged: {self.model}")
if "credentials" not in result.error_message.lower():
print(" Tip: Use /model to see available models, /provider to see providers")
else:
self.model = result.new_model
self.agent = None # Force re-init
if result.provider_changed:
self.requested_provider = result.target_provider
self.provider = result.target_provider
self.api_key = result.api_key
self.base_url = result.base_url
provider_note = f" [provider: {result.provider_label}]" if result.provider_changed else ""
if result.persist:
saved_model = save_config_value("model.default", result.new_model)
if result.provider_changed:
save_config_value("model.provider", result.target_provider)
# Persist base_url for custom endpoints; clear
# when switching away from custom (#2562 Phase 2).
if result.base_url and "openrouter.ai" not in (result.base_url or ""):
save_config_value("model.base_url", result.base_url)
else:
save_config_value("model.base_url", None)
if saved_model:
print(f"(^_^)b Model changed to: {result.new_model}{provider_note} (saved to config)")
else:
print(f"(^_^) Model changed to: {result.new_model}{provider_note} (this session only)")
else:
print(f"(^_^) Model changed to: {result.new_model}{provider_note} (this session only)")
if result.warning_message:
print(f" Reason: {result.warning_message}")
print(" Note: Model will revert on restart. Use a verified model to save to config.")
# Show endpoint info for custom providers
if result.is_custom_target:
endpoint = result.base_url or self.base_url or "custom endpoint"
print(f" Endpoint: {endpoint}")
if not result.provider_changed:
print(f" Tip: To switch providers, use /model provider:model")
print(f" e.g. /model openai-codex:gpt-5.2-codex")
else:
self._show_model_and_providers()
elif canonical == "provider":
self._show_model_and_providers()
elif canonical == "prompt":
@ -6231,10 +6138,6 @@ class HermesCLI:
return
# Accept the selected completion
buf.apply_completion(completion)
# If text now looks like "/model provider:", re-trigger completions
text = buf.document.text_before_cursor
if text.startswith("/model ") and text.endswith(":"):
buf.start_completion()
elif buf.suggestion and buf.suggestion.text:
# No completion menu, but there's a ghost text auto-suggestion — accept it
buf.insert_text(buf.suggestion.text)
@ -6529,35 +6432,9 @@ class HermesCLI:
# Create the input area with multiline (shift+enter), autocomplete, and paste handling
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
def _get_model_completer_info() -> dict:
"""Return provider/model info for /model autocomplete."""
try:
from hermes_cli.models import (
_PROVIDER_LABELS, normalize_provider, provider_model_ids,
)
current = getattr(cli_ref, "provider", None) or getattr(cli_ref, "requested_provider", "openrouter")
current = normalize_provider(current)
# Provider map: id -> label (only providers with known models)
providers = {}
for pid, plabel in _PROVIDER_LABELS.items():
providers[pid] = plabel
def models_for(provider_name: str) -> list[str]:
norm = normalize_provider(provider_name)
return provider_model_ids(norm)
return {
"current_provider": current,
"providers": providers,
"models_for": models_for,
}
except Exception:
return {}
_completer = SlashCommandCompleter(
skill_commands_provider=lambda: _skill_commands,
model_completer_provider=_get_model_completer_info,
)
input_area = TextArea(
height=Dimension(min=1, max=8, preferred=1),

View file

@ -1670,9 +1670,6 @@ class GatewayRunner:
if canonical == "stop":
return await self._handle_stop_command(event)
if canonical == "model":
return await self._handle_model_command(event)
if canonical == "reasoning":
return await self._handle_reasoning_command(event)
@ -2764,196 +2761,6 @@ class GatewayRunner:
pass
return "\n".join(lines)
async def _handle_model_command(self, event: MessageEvent) -> str:
"""Handle /model command - show or change the current model."""
import yaml
from hermes_cli.models import (
curated_models_for_provider,
normalize_provider,
_PROVIDER_LABELS,
)
args = event.get_command_args().strip()
config_path = _hermes_home / 'config.yaml'
# Resolve current model and provider from config
current = os.getenv("HERMES_MODEL") or "anthropic/claude-opus-4.6"
current_provider = "openrouter"
try:
if config_path.exists():
with open(config_path, encoding="utf-8") as f:
cfg = yaml.safe_load(f) or {}
model_cfg = cfg.get("model", {})
if isinstance(model_cfg, str):
current = model_cfg
elif isinstance(model_cfg, dict):
current = model_cfg.get("default", current)
current_provider = model_cfg.get("provider", current_provider)
except Exception:
pass
# Resolve "auto" to the actual provider using credential detection
current_provider = normalize_provider(current_provider)
if current_provider == "auto":
try:
from hermes_cli.auth import resolve_provider as _resolve_provider
current_provider = _resolve_provider(current_provider)
except Exception:
current_provider = "openrouter"
# Detect custom endpoint: provider resolved to openrouter but a custom
# base URL is configured — the user set up a custom endpoint.
if current_provider == "openrouter" and os.getenv("OPENAI_BASE_URL", "").strip():
current_provider = "custom"
if not args:
# If a fallback model is active, show it instead of config
if self._effective_model:
eff_provider = self._effective_provider or 'unknown'
eff_label = _PROVIDER_LABELS.get(eff_provider, eff_provider)
cfg_label = _PROVIDER_LABELS.get(current_provider, current_provider)
lines = [
f"🤖 **Active model:** `{self._effective_model}` (fallback)",
f"**Provider:** {eff_label}",
f"**Primary model** (`{current}` via {cfg_label}) is rate-limited.",
"",
]
lines.append("To change: `/model model-name`")
lines.append("Switch provider: `/model provider:model-name`")
return "\n".join(lines)
provider_label = _PROVIDER_LABELS.get(current_provider, current_provider)
lines = [
f"🤖 **Current model:** `{current}`",
f"**Provider:** {provider_label}",
]
# Show custom endpoint URL when using a custom provider
if current_provider == "custom":
from hermes_cli.models import _get_custom_base_url
custom_url = _get_custom_base_url() or os.getenv("OPENAI_BASE_URL", "")
if custom_url:
lines.append(f"**Endpoint:** `{custom_url}`")
lines.append("")
curated = curated_models_for_provider(current_provider)
if curated:
lines.append(f"**Available models ({provider_label}):**")
for mid, desc in curated:
marker = "" if mid == current else ""
label = f" _{desc}_" if desc else ""
lines.append(f"• `{mid}`{label}{marker}")
lines.append("")
lines.append("To change: `/model model-name`")
lines.append("Switch provider: `/model provider-name` or `/model provider:model-name`")
return "\n".join(lines)
# Handle bare "/model custom" — switch to custom provider
# and auto-detect the model from the endpoint.
if args.strip().lower() == "custom":
from hermes_cli.model_switch import switch_to_custom_provider
cust_result = switch_to_custom_provider()
if not cust_result.success:
return f"⚠️ {cust_result.error_message}"
try:
user_config = {}
if config_path.exists():
with open(config_path, encoding="utf-8") as f:
user_config = yaml.safe_load(f) or {}
if "model" not in user_config or not isinstance(user_config["model"], dict):
user_config["model"] = {}
user_config["model"]["default"] = cust_result.model
user_config["model"]["provider"] = "custom"
user_config["model"]["base_url"] = cust_result.base_url
with open(config_path, 'w', encoding="utf-8") as f:
yaml.dump(user_config, f, default_flow_style=False, sort_keys=False)
except Exception as e:
return f"⚠️ Failed to save model change: {e}"
os.environ["HERMES_MODEL"] = cust_result.model
os.environ["HERMES_INFERENCE_PROVIDER"] = "custom"
self._effective_model = None
self._effective_provider = None
return (
f"🤖 Model changed to `{cust_result.model}` (saved to config)\n"
f"**Provider:** Custom\n"
f"**Endpoint:** `{cust_result.base_url}`\n"
f"_Model auto-detected from endpoint. Takes effect on next message._"
)
# Core model-switching pipeline (shared with CLI)
from hermes_cli.model_switch import switch_model
# Resolve current base_url for is_custom detection
_resolved_base = ""
try:
from hermes_cli.runtime_provider import resolve_runtime_provider as _rtp
_resolved_base = _rtp(requested=current_provider).get("base_url", "")
except Exception:
pass
result = switch_model(
args,
current_provider,
current_base_url=_resolved_base,
current_api_key=os.getenv("OPENROUTER_API_KEY") or os.getenv("OPENAI_API_KEY") or "",
)
if not result.success:
msg = result.error_message
tip = "\n\nUse `/model` to see available models, `/provider` to see providers" if "Did you mean" not in msg else ""
return f"⚠️ {msg}{tip}"
# Persist to config only if validation approves
if result.persist:
try:
user_config = {}
if config_path.exists():
with open(config_path, encoding="utf-8") as f:
user_config = yaml.safe_load(f) or {}
if "model" not in user_config or not isinstance(user_config["model"], dict):
user_config["model"] = {}
user_config["model"]["default"] = result.new_model
if result.provider_changed:
user_config["model"]["provider"] = result.target_provider
# Persist base_url for custom endpoints; clear when
# switching away from custom (#2562 Phase 2).
if result.base_url and "openrouter.ai" not in (result.base_url or ""):
user_config["model"]["base_url"] = result.base_url
else:
user_config["model"].pop("base_url", None)
with open(config_path, 'w', encoding="utf-8") as f:
yaml.dump(user_config, f, default_flow_style=False, sort_keys=False)
except Exception as e:
return f"⚠️ Failed to save model change: {e}"
# Set env vars so the next agent run picks up the change
os.environ["HERMES_MODEL"] = result.new_model
if result.provider_changed:
os.environ["HERMES_INFERENCE_PROVIDER"] = result.target_provider
provider_note = f"\n**Provider:** {result.provider_label}" if result.provider_changed else ""
warning = ""
if result.warning_message:
warning = f"\n⚠️ {result.warning_message}"
persist_note = "saved to config" if result.persist else "this session only — will revert on restart"
# Clear fallback state since user explicitly chose a model
self._effective_model = None
self._effective_provider = None
# Show endpoint info for custom providers
custom_hint = ""
if result.is_custom_target:
endpoint = result.base_url or _resolved_base or "custom endpoint"
custom_hint = f"\n**Endpoint:** `{endpoint}`"
if not result.provider_changed:
custom_hint += (
"\n_To switch providers, use_ `/model provider:model`"
"\n_e.g._ `/model openrouter:anthropic/claude-sonnet-4`"
)
return f"🤖 Model changed to `{result.new_model}` ({persist_note}){provider_note}{warning}{custom_hint}\n_(takes effect on next message)_"
async def _handle_provider_command(self, event: MessageEvent) -> str:
"""Handle /provider command - show available providers."""
import yaml

View file

@ -78,8 +78,6 @@ COMMAND_REGISTRY: list[CommandDef] = [
# Configuration
CommandDef("config", "Show current configuration", "Configuration",
cli_only=True),
CommandDef("model", "Show or change the current model", "Configuration",
args_hint="[name]"),
CommandDef("provider", "Show available providers and current provider",
"Configuration"),
CommandDef("prompt", "View/set custom system prompt", "Configuration",
@ -330,29 +328,8 @@ class SlashCommandCompleter(Completer):
def __init__(
self,
skill_commands_provider: Callable[[], Mapping[str, dict[str, Any]]] | None = None,
model_completer_provider: Callable[[], dict[str, Any]] | None = None,
) -> None:
self._skill_commands_provider = skill_commands_provider
# model_completer_provider returns {"current_provider": str,
# "providers": {id: label, ...}, "models_for": callable(provider) -> list[str]}
self._model_completer_provider = model_completer_provider
self._model_info_cache: dict[str, Any] | None = None
self._model_info_cache_time: float = 0
def _get_model_info(self) -> dict[str, Any]:
"""Get cached model/provider info for /model autocomplete."""
import time
now = time.monotonic()
if self._model_info_cache is not None and now - self._model_info_cache_time < 60:
return self._model_info_cache
if self._model_completer_provider is None:
return {}
try:
self._model_info_cache = self._model_completer_provider() or {}
self._model_info_cache_time = now
except Exception:
self._model_info_cache = self._model_info_cache or {}
return self._model_info_cache
def _iter_skill_commands(self) -> Mapping[str, dict[str, Any]]:
if self._skill_commands_provider is None:
@ -591,52 +568,6 @@ class SlashCommandCompleter(Completer):
sub_text = parts[1] if len(parts) > 1 else ""
sub_lower = sub_text.lower()
# /model gets two-stage completion:
# Stage 1: provider names (with : suffix)
# Stage 2: after "provider:", list that provider's models
if base_cmd == "/model" and " " not in sub_text:
info = self._get_model_info()
if info:
current_prov = info.get("current_provider", "")
providers = info.get("providers", {})
models_for = info.get("models_for")
if ":" in sub_text:
# Stage 2: "anthropic:cl" → models for anthropic
prov_part, model_part = sub_text.split(":", 1)
model_lower = model_part.lower()
if models_for:
try:
prov_models = models_for(prov_part)
except Exception:
prov_models = []
for mid in prov_models:
if mid.lower().startswith(model_lower) and mid.lower() != model_lower:
full = f"{prov_part}:{mid}"
yield Completion(
full,
start_position=-len(sub_text),
display=mid,
)
else:
# Stage 1: providers sorted: non-current first, current last
for pid, plabel in sorted(
providers.items(),
key=lambda kv: (kv[0] == current_prov, kv[0]),
):
display_name = f"{pid}:"
if display_name.lower().startswith(sub_lower):
meta = f"({plabel})" if plabel != pid else ""
if pid == current_prov:
meta = f"(current — {plabel})" if plabel != pid else "(current)"
yield Completion(
display_name,
start_position=-len(sub_text),
display=display_name,
display_meta=meta,
)
return
# Static subcommand completions
if " " not in sub_text and base_cmd in SUBCOMMANDS:
for sub in SUBCOMMANDS[base_cmd]:
@ -718,32 +649,6 @@ class SlashCommandAutoSuggest(AutoSuggest):
sub_text = parts[1] if len(parts) > 1 else ""
sub_lower = sub_text.lower()
# /model gets two-stage ghost text
if base_cmd == "/model" and " " not in sub_text and self._completer:
info = self._completer._get_model_info()
if info:
providers = info.get("providers", {})
models_for = info.get("models_for")
current_prov = info.get("current_provider", "")
if ":" in sub_text:
# Stage 2: after provider:, suggest model
prov_part, model_part = sub_text.split(":", 1)
model_lower = model_part.lower()
if models_for:
try:
for mid in models_for(prov_part):
if mid.lower().startswith(model_lower) and mid.lower() != model_lower:
return Suggestion(mid[len(model_part):])
except Exception:
pass
else:
# Stage 1: suggest provider name with :
for pid in sorted(providers, key=lambda p: (p == current_prov, p)):
candidate = f"{pid}:"
if candidate.lower().startswith(sub_lower) and candidate.lower() != sub_lower:
return Suggestion(candidate[len(sub_text):])
# Static subcommands
if base_cmd in SUBCOMMANDS and SUBCOMMANDS[base_cmd]:
if " " not in sub_text:

View file

@ -389,72 +389,6 @@ class TestSubcommandCompletion:
assert completions == []
# ── Two-stage /model completion ─────────────────────────────────────────
def _model_completer() -> SlashCommandCompleter:
"""Build a completer with mock model/provider info."""
return SlashCommandCompleter(
model_completer_provider=lambda: {
"current_provider": "openrouter",
"providers": {
"anthropic": "Anthropic",
"openrouter": "OpenRouter",
"nous": "Nous Research",
},
"models_for": lambda p: {
"anthropic": ["claude-sonnet-4-20250514", "claude-opus-4-20250414"],
"openrouter": ["anthropic/claude-sonnet-4", "google/gemini-2.5-pro"],
"nous": ["hermes-3-llama-3.1-405b"],
}.get(p, []),
}
)
class TestModelCompletion:
def test_stage1_shows_providers(self):
completions = _completions(_model_completer(), "/model ")
texts = {c.text for c in completions}
assert "anthropic:" in texts
assert "openrouter:" in texts
assert "nous:" in texts
def test_stage1_current_provider_last(self):
completions = _completions(_model_completer(), "/model ")
texts = [c.text for c in completions]
assert texts[-1] == "openrouter:"
def test_stage1_current_provider_labeled(self):
completions = _completions(_model_completer(), "/model ")
for c in completions:
if c.text == "openrouter:":
assert "current" in c.display_meta_text.lower()
break
else:
raise AssertionError("openrouter: not found in completions")
def test_stage1_prefix_filters(self):
completions = _completions(_model_completer(), "/model an")
texts = {c.text for c in completions}
assert texts == {"anthropic:"}
def test_stage2_shows_models(self):
completions = _completions(_model_completer(), "/model anthropic:")
texts = {c.text for c in completions}
assert "anthropic:claude-sonnet-4-20250514" in texts
assert "anthropic:claude-opus-4-20250414" in texts
def test_stage2_prefix_filters_models(self):
completions = _completions(_model_completer(), "/model anthropic:claude-s")
texts = {c.text for c in completions}
assert "anthropic:claude-sonnet-4-20250514" in texts
assert "anthropic:claude-opus-4-20250414" not in texts
def test_stage2_no_model_provider_returns_empty(self):
completions = _completions(SlashCommandCompleter(), "/model ")
assert completions == []
# ── Ghost text (SlashCommandAutoSuggest) ────────────────────────────────
@ -492,15 +426,3 @@ class TestGhostText:
def test_no_suggestion_for_non_slash(self):
assert _suggestion("hello") is None
def test_model_stage1_ghost_text(self):
"""/model a → 'nthropic:'"""
completer = _model_completer()
assert _suggestion("/model a", completer=completer) == "nthropic:"
def test_model_stage2_ghost_text(self):
"""/model anthropic:cl → rest of first matching model"""
completer = _model_completer()
s = _suggestion("/model anthropic:cl", completer=completer)
assert s is not None
assert s.startswith("aude-")

View file

@ -1,132 +0,0 @@
"""Regression tests for the `/model` slash command in the interactive CLI."""
from unittest.mock import patch, MagicMock
from cli import HermesCLI
class TestModelCommand:
def _make_cli(self):
cli_obj = HermesCLI.__new__(HermesCLI)
cli_obj.model = "anthropic/claude-opus-4.6"
cli_obj.agent = object()
cli_obj.provider = "openrouter"
cli_obj.requested_provider = "openrouter"
cli_obj.base_url = "https://openrouter.ai/api/v1"
cli_obj.api_key = "test-key"
cli_obj._explicit_api_key = None
cli_obj._explicit_base_url = None
return cli_obj
def test_valid_model_from_api_saved_to_config(self, capsys):
cli_obj = self._make_cli()
with patch("hermes_cli.models.fetch_api_models",
return_value=["anthropic/claude-sonnet-4.5", "openai/gpt-5.4"]), \
patch("cli.save_config_value", return_value=True) as save_mock:
cli_obj.process_command("/model anthropic/claude-sonnet-4.5")
output = capsys.readouterr().out
assert "saved to config" in output
assert cli_obj.model == "anthropic/claude-sonnet-4.5"
save_mock.assert_called_once_with("model.default", "anthropic/claude-sonnet-4.5")
def test_unlisted_model_accepted_with_warning(self, capsys):
cli_obj = self._make_cli()
with patch("hermes_cli.models.fetch_api_models",
return_value=["anthropic/claude-opus-4.6"]), \
patch("cli.save_config_value") as save_mock:
cli_obj.process_command("/model anthropic/fake-model")
output = capsys.readouterr().out
assert "not found" in output or "Model changed" in output
assert cli_obj.model == "anthropic/fake-model" # accepted
def test_api_unreachable_accepts_and_persists(self, capsys):
cli_obj = self._make_cli()
with patch("hermes_cli.models.fetch_api_models", return_value=None), \
patch("cli.save_config_value") as save_mock:
cli_obj.process_command("/model anthropic/claude-sonnet-next")
output = capsys.readouterr().out
assert "saved to config" in output
assert cli_obj.model == "anthropic/claude-sonnet-next"
save_mock.assert_called_once()
def test_no_slash_model_accepted_with_warning(self, capsys):
cli_obj = self._make_cli()
with patch("hermes_cli.models.fetch_api_models",
return_value=["openai/gpt-5.4"]) as fetch_mock, \
patch("cli.save_config_value") as save_mock:
cli_obj.process_command("/model gpt-5.4")
output = capsys.readouterr().out
# Auto-detection remaps bare model names to proper OpenRouter slugs
assert cli_obj.model == "openai/gpt-5.4"
def test_validation_crash_falls_back_to_save(self, capsys):
cli_obj = self._make_cli()
with patch("hermes_cli.models.validate_requested_model",
side_effect=RuntimeError("boom")), \
patch("cli.save_config_value", return_value=True) as save_mock:
cli_obj.process_command("/model anthropic/claude-sonnet-4.5")
output = capsys.readouterr().out
assert "saved to config" in output
assert cli_obj.model == "anthropic/claude-sonnet-4.5"
save_mock.assert_called_once()
def test_show_model_when_no_argument(self, capsys):
cli_obj = self._make_cli()
cli_obj.process_command("/model")
output = capsys.readouterr().out
assert "anthropic/claude-opus-4.6" in output
assert "OpenRouter" in output
assert "Authenticated providers" in output or "Switch model" in output
assert "provider" in output and "model" in output
# -- provider switching tests -------------------------------------------
def test_provider_colon_model_switches_provider(self, capsys):
cli_obj = self._make_cli()
with patch("hermes_cli.runtime_provider.resolve_runtime_provider", return_value={
"provider": "zai",
"api_key": "zai-key",
"base_url": "https://api.z.ai/api/paas/v4",
}), \
patch("hermes_cli.models.fetch_api_models",
return_value=["glm-5", "glm-4.7"]), \
patch("cli.save_config_value", return_value=True) as save_mock:
cli_obj.process_command("/model zai:glm-5")
output = capsys.readouterr().out
assert "glm-5" in output
assert "provider:" in output.lower() or "Z.AI" in output
assert cli_obj.model == "glm-5"
assert cli_obj.provider == "zai"
assert cli_obj.base_url == "https://api.z.ai/api/paas/v4"
# Model, provider, and base_url should be saved
assert save_mock.call_count == 3
save_calls = [c.args for c in save_mock.call_args_list]
assert ("model.default", "glm-5") in save_calls
assert ("model.provider", "zai") in save_calls
# base_url is also persisted on provider change (Phase 2 fix)
assert any(c[0] == "model.base_url" for c in save_calls)
def test_provider_switch_fails_on_bad_credentials(self, capsys):
cli_obj = self._make_cli()
with patch("hermes_cli.runtime_provider.resolve_runtime_provider",
side_effect=Exception("No API key found")):
cli_obj.process_command("/model nous:hermes-3")
output = capsys.readouterr().out
assert "Could not resolve credentials" in output
assert cli_obj.model == "anthropic/claude-opus-4.6" # unchanged
assert cli_obj.provider == "openrouter" # unchanged