mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-05 02:31:47 +00:00
refactor: unify setup wizard provider selection with hermes model
setup_model_provider() had 800+ lines of duplicated provider handling that reimplemented the same credential prompting, OAuth flows, and model selection that hermes model already provides via the _model_flow_* functions. Every new provider had to be added in both places, and the two implementations diverged in config persistence (setup.py did raw YAML writes, _set_model_provider, and _update_config_for_provider depending on the provider — main.py used its own load/save cycle). This caused the #4172 bug: _model_flow_custom saved config to disk but the wizard's final save_config(config) overwrote it with stale values. Fix: extract the core of cmd_model() into select_provider_and_model() and have setup_model_provider() call it. After the call, re-sync the wizard's config dict from disk. Deletes ~800 lines of duplicated provider handling from setup.py. Also fixes cmd_model() double-AuthError crash on fresh installs with no API keys configured.
This commit is contained in:
parent
89d8127772
commit
491e79bca9
4 changed files with 283 additions and 1418 deletions
|
|
@ -1,6 +1,8 @@
|
|||
"""Tests for setup_model_provider — verifies the delegation to
|
||||
select_provider_and_model() and config dict sync."""
|
||||
import json
|
||||
|
||||
from hermes_cli.auth import _update_config_for_provider, get_active_provider
|
||||
from hermes_cli.auth import get_active_provider
|
||||
from hermes_cli.config import load_config, save_config
|
||||
from hermes_cli.setup import setup_model_provider
|
||||
|
||||
|
|
@ -23,270 +25,198 @@ def _clear_provider_env(monkeypatch):
|
|||
monkeypatch.delenv(key, raising=False)
|
||||
|
||||
|
||||
def _stub_tts(monkeypatch):
|
||||
"""Stub out TTS prompts so setup_model_provider doesn't block."""
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", lambda q, c, d=0: (
|
||||
_maybe_keep_current_tts(q, c) if _maybe_keep_current_tts(q, c) is not None
|
||||
else d
|
||||
))
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *a, **kw: False)
|
||||
|
||||
def test_nous_oauth_setup_keeps_current_model_when_syncing_disk_provider(
|
||||
tmp_path, monkeypatch
|
||||
):
|
||||
|
||||
def _write_model_config(tmp_path, provider, base_url="", model_name="test-model"):
|
||||
"""Simulate what a _model_flow_* function writes to disk."""
|
||||
cfg = load_config()
|
||||
m = cfg.get("model")
|
||||
if not isinstance(m, dict):
|
||||
m = {"default": m} if m else {}
|
||||
cfg["model"] = m
|
||||
m["provider"] = provider
|
||||
if base_url:
|
||||
m["base_url"] = base_url
|
||||
if model_name:
|
||||
m["default"] = model_name
|
||||
save_config(cfg)
|
||||
|
||||
|
||||
def test_setup_delegates_to_select_provider_and_model(tmp_path, monkeypatch):
|
||||
"""setup_model_provider calls select_provider_and_model and syncs config."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
return 1 # Nous Portal
|
||||
if question == "Configure vision:":
|
||||
return len(choices) - 1
|
||||
if question == "Select default model:":
|
||||
assert choices[-1] == "Keep current (anthropic/claude-opus-4.6)"
|
||||
return len(choices) - 1
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
def fake_select():
|
||||
_write_model_config(tmp_path, "custom", "http://localhost:11434/v1", "qwen3.5:32b")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
|
||||
def _fake_login_nous(*args, **kwargs):
|
||||
auth_path = tmp_path / "auth.json"
|
||||
auth_path.write_text(json.dumps({"active_provider": "nous", "providers": {}}))
|
||||
_update_config_for_provider("nous", "https://inference.example.com/v1")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.auth._login_nous", _fake_login_nous)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.auth.resolve_nous_runtime_credentials",
|
||||
lambda *args, **kwargs: {
|
||||
"base_url": "https://inference.example.com/v1",
|
||||
"api_key": "nous-key",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.auth.fetch_nous_models",
|
||||
lambda *args, **kwargs: ["gemini-3-flash"],
|
||||
)
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
reloaded = load_config()
|
||||
assert isinstance(reloaded["model"], dict)
|
||||
assert reloaded["model"]["provider"] == "custom"
|
||||
assert reloaded["model"]["base_url"] == "http://localhost:11434/v1"
|
||||
assert reloaded["model"]["default"] == "qwen3.5:32b"
|
||||
|
||||
|
||||
def test_setup_syncs_openrouter_from_disk(tmp_path, monkeypatch):
|
||||
"""When select_provider_and_model saves OpenRouter config to disk,
|
||||
the wizard's config dict picks it up."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
assert isinstance(config.get("model"), str) # fresh install
|
||||
|
||||
def fake_select():
|
||||
_write_model_config(tmp_path, "openrouter", model_name="anthropic/claude-opus-4.6")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
reloaded = load_config()
|
||||
assert isinstance(reloaded["model"], dict)
|
||||
assert reloaded["model"]["provider"] == "openrouter"
|
||||
|
||||
|
||||
def test_setup_syncs_nous_from_disk(tmp_path, monkeypatch):
|
||||
"""Nous OAuth writes config to disk; wizard config dict must pick it up."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
|
||||
def fake_select():
|
||||
_write_model_config(tmp_path, "nous", "https://inference.example.com/v1", "gemini-3-flash")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
reloaded = load_config()
|
||||
assert isinstance(reloaded["model"], dict)
|
||||
assert reloaded["model"]["provider"] == "nous"
|
||||
assert reloaded["model"]["base_url"] == "https://inference.example.com/v1"
|
||||
assert reloaded["model"]["default"] == "anthropic/claude-opus-4.6"
|
||||
|
||||
|
||||
def test_custom_setup_clears_active_oauth_provider(tmp_path, monkeypatch):
|
||||
def test_setup_custom_providers_synced(tmp_path, monkeypatch):
|
||||
"""custom_providers written by select_provider_and_model must survive."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
|
||||
auth_path = tmp_path / "auth.json"
|
||||
auth_path.write_text(json.dumps({"active_provider": "nous", "providers": {}}))
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
return 3
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
def fake_select():
|
||||
_write_model_config(tmp_path, "custom", "http://localhost:8080/v1", "llama3")
|
||||
cfg = load_config()
|
||||
cfg["custom_providers"] = [{"name": "Local", "base_url": "http://localhost:8080/v1"}]
|
||||
save_config(cfg)
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
|
||||
# _model_flow_custom uses builtins.input (URL, key, model, context_length)
|
||||
input_values = iter([
|
||||
"https://custom.example/v1",
|
||||
"custom-api-key",
|
||||
"custom/model",
|
||||
"", # context_length (blank = auto-detect)
|
||||
])
|
||||
monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values))
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *args, **kwargs: None)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.models.probe_api_models",
|
||||
lambda api_key, base_url: {"models": ["m"], "probed_url": base_url + "/models"},
|
||||
)
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
|
||||
# Core assertion: switching to custom endpoint clears OAuth provider
|
||||
assert get_active_provider() is None
|
||||
|
||||
# Simulate what the real setup wizard does: save_config(config) AFTER
|
||||
# setup_model_provider returns. This is the step that previously
|
||||
# overwrote model.provider/base_url (#4172).
|
||||
save_config(config)
|
||||
|
||||
reloaded = load_config()
|
||||
assert isinstance(reloaded.get("model"), dict), (
|
||||
"model should be a dict after custom setup, not "
|
||||
+ repr(type(reloaded.get("model")))
|
||||
)
|
||||
assert reloaded["model"].get("provider") == "custom"
|
||||
assert reloaded["model"].get("default") == "custom/model"
|
||||
assert "custom.example" in reloaded["model"].get("base_url", "")
|
||||
assert reloaded.get("custom_providers") == [{"name": "Local", "base_url": "http://localhost:8080/v1"}]
|
||||
|
||||
|
||||
def test_custom_setup_preserves_provider_after_wizard_save_config(
|
||||
tmp_path, monkeypatch
|
||||
):
|
||||
"""Regression test for #4172: the setup wizard's final save_config(config)
|
||||
must not overwrite model.provider/base_url that _model_flow_custom set.
|
||||
|
||||
Simulates the full flow:
|
||||
1. load config (fresh install — model is a string)
|
||||
2. setup_model_provider picks custom
|
||||
3. wizard calls save_config(config) afterward
|
||||
4. verify resolve_requested_provider returns "custom"
|
||||
"""
|
||||
def test_setup_cancel_preserves_existing_config(tmp_path, monkeypatch):
|
||||
"""When the user cancels provider selection, existing config is preserved."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
# Pre-set a provider
|
||||
_write_model_config(tmp_path, "openrouter", model_name="gpt-4o")
|
||||
|
||||
config = load_config()
|
||||
# Sanity: fresh install has model as a string
|
||||
assert isinstance(config.get("model"), str) or config.get("model") is None
|
||||
assert config["model"]["provider"] == "openrouter"
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
return 3 # Custom endpoint
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
def fake_select():
|
||||
pass # user cancelled — nothing written to disk
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
|
||||
input_values = iter([
|
||||
"http://localhost:11434/v1", # Ollama URL
|
||||
"", # no API key (local)
|
||||
"qwen3.5:32b", # model name
|
||||
"", # context length (auto-detect)
|
||||
])
|
||||
monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values))
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *a, **kw: False)
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *a, **kw: None)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.models.probe_api_models",
|
||||
lambda api_key, base_url: {"models": ["qwen3.5:32b"], "probed_url": base_url + "/models"},
|
||||
)
|
||||
|
||||
# Full wizard cycle
|
||||
setup_model_provider(config)
|
||||
save_config(config) # ← this is what the real wizard does
|
||||
|
||||
# Verify config on disk
|
||||
reloaded = load_config()
|
||||
assert isinstance(reloaded["model"], dict)
|
||||
assert reloaded["model"]["provider"] == "custom"
|
||||
assert reloaded["model"]["base_url"] == "http://localhost:11434/v1"
|
||||
assert reloaded["model"]["default"] == "qwen3.5:32b"
|
||||
assert "api_mode" not in reloaded["model"]
|
||||
|
||||
# Verify the runtime resolver sees "custom", not "auto"
|
||||
from hermes_cli.runtime_provider import resolve_requested_provider
|
||||
assert resolve_requested_provider() == "custom"
|
||||
|
||||
|
||||
def test_custom_setup_no_model_name_still_preserves_endpoint(
|
||||
tmp_path, monkeypatch
|
||||
):
|
||||
"""When the user enters a URL and key but skips the model name,
|
||||
model.provider and model.base_url must still survive the wizard's
|
||||
final save_config(config)."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
return 3
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
|
||||
input_values = iter([
|
||||
"http://192.168.1.50:8080/v1", # URL
|
||||
"my-key", # API key
|
||||
"", # no model name
|
||||
"", # context length
|
||||
])
|
||||
monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values))
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *a, **kw: False)
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *a, **kw: None)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.models.probe_api_models",
|
||||
lambda api_key, base_url: {"models": None, "probed_url": base_url + "/models"},
|
||||
)
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
reloaded = load_config()
|
||||
assert isinstance(reloaded["model"], dict)
|
||||
assert reloaded["model"]["provider"] == "custom"
|
||||
assert reloaded["model"]["base_url"] == "http://192.168.1.50:8080/v1"
|
||||
assert reloaded["model"]["provider"] == "openrouter"
|
||||
assert reloaded["model"]["default"] == "gpt-4o"
|
||||
|
||||
|
||||
def test_setup_exception_in_select_gracefully_handled(tmp_path, monkeypatch):
|
||||
"""If select_provider_and_model raises, setup continues with existing config."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
|
||||
def fake_select():
|
||||
raise RuntimeError("something broke")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
# Should not raise
|
||||
setup_model_provider(config)
|
||||
|
||||
|
||||
def test_setup_keyboard_interrupt_gracefully_handled(tmp_path, monkeypatch):
|
||||
"""KeyboardInterrupt during provider selection is handled."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
|
||||
def fake_select():
|
||||
raise KeyboardInterrupt()
|
||||
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
|
||||
|
||||
def test_codex_setup_uses_runtime_access_token_for_live_model_list(tmp_path, monkeypatch):
|
||||
"""Codex model list fetching uses the runtime access token."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
monkeypatch.setenv("OPENROUTER_API_KEY", "or-test-key")
|
||||
_clear_provider_env(monkeypatch)
|
||||
monkeypatch.setenv("OPENROUTER_API_KEY", "or-test-key")
|
||||
|
||||
config = load_config()
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
return 2 # OpenAI Codex
|
||||
if question == "Select default model:":
|
||||
return 0
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
def fake_select():
|
||||
_write_model_config(tmp_path, "openai-codex", "https://api.openai.com/v1", "gpt-4o")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.auth._login_openai_codex", lambda *args, **kwargs: None)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.auth.resolve_codex_runtime_credentials",
|
||||
lambda *args, **kwargs: {
|
||||
"base_url": "https://chatgpt.com/backend-api/codex",
|
||||
"api_key": "codex-access-token",
|
||||
},
|
||||
)
|
||||
|
||||
captured = {}
|
||||
|
||||
def _fake_get_codex_model_ids(access_token=None):
|
||||
captured["access_token"] = access_token
|
||||
return ["gpt-5.2-codex", "gpt-5.2"]
|
||||
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.codex_models.get_codex_model_ids",
|
||||
_fake_get_codex_model_ids,
|
||||
)
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
reloaded = load_config()
|
||||
|
||||
assert captured["access_token"] == "codex-access-token"
|
||||
assert isinstance(reloaded["model"], dict)
|
||||
assert reloaded["model"]["provider"] == "openai-codex"
|
||||
assert reloaded["model"]["default"] == "gpt-5.2-codex"
|
||||
assert reloaded["model"]["base_url"] == "https://chatgpt.com/backend-api/codex"
|
||||
|
|
|
|||
|
|
@ -1,9 +1,14 @@
|
|||
"""Regression tests for interactive setup provider/model persistence."""
|
||||
"""Regression tests for interactive setup provider/model persistence.
|
||||
|
||||
Since setup_model_provider delegates to select_provider_and_model()
|
||||
from hermes_cli.main, these tests mock the delegation point and verify
|
||||
that the setup wizard correctly syncs config from disk after the call.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from hermes_cli.config import load_config, save_config, save_env_value
|
||||
from hermes_cli.setup import _print_setup_summary, setup_model_provider
|
||||
from hermes_cli.setup import setup_model_provider
|
||||
|
||||
|
||||
def _maybe_keep_current_tts(question, choices):
|
||||
|
|
@ -13,19 +18,6 @@ def _maybe_keep_current_tts(question, choices):
|
|||
return len(choices) - 1
|
||||
|
||||
|
||||
def _read_env(home):
|
||||
env_path = home / ".env"
|
||||
data = {}
|
||||
if not env_path.exists():
|
||||
return data
|
||||
for line in env_path.read_text().splitlines():
|
||||
if not line or line.startswith("#") or "=" not in line:
|
||||
continue
|
||||
k, v = line.split("=", 1)
|
||||
data[k] = v
|
||||
return data
|
||||
|
||||
|
||||
def _clear_provider_env(monkeypatch):
|
||||
for key in (
|
||||
"HERMES_INFERENCE_PROVIDER",
|
||||
|
|
@ -44,429 +36,173 @@ def _clear_provider_env(monkeypatch):
|
|||
monkeypatch.delenv(key, raising=False)
|
||||
|
||||
|
||||
def _stub_tts(monkeypatch):
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", lambda q, c, d=0: (
|
||||
_maybe_keep_current_tts(q, c) if _maybe_keep_current_tts(q, c) is not None
|
||||
else d
|
||||
))
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *a, **kw: False)
|
||||
|
||||
|
||||
def _write_model_config(provider, base_url="", model_name="test-model"):
|
||||
"""Simulate what a _model_flow_* function writes to disk."""
|
||||
cfg = load_config()
|
||||
m = cfg.get("model")
|
||||
if not isinstance(m, dict):
|
||||
m = {"default": m} if m else {}
|
||||
cfg["model"] = m
|
||||
m["provider"] = provider
|
||||
if base_url:
|
||||
m["base_url"] = base_url
|
||||
else:
|
||||
m.pop("base_url", None)
|
||||
if model_name:
|
||||
m["default"] = model_name
|
||||
m.pop("api_mode", None)
|
||||
save_config(cfg)
|
||||
|
||||
|
||||
def test_setup_keep_current_custom_from_config_does_not_fall_through(tmp_path, monkeypatch):
|
||||
"""Keep-current custom should not fall through to the generic model menu."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
save_env_value("OPENAI_BASE_URL", "https://example.invalid/v1")
|
||||
save_env_value("OPENAI_API_KEY", "custom-key")
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
# Pre-set custom provider
|
||||
_write_model_config("custom", "http://localhost:8080/v1", "local-model")
|
||||
|
||||
config = load_config()
|
||||
config["model"] = {
|
||||
"default": "custom/model",
|
||||
"provider": "custom",
|
||||
"base_url": "https://example.invalid/v1",
|
||||
}
|
||||
save_config(config)
|
||||
assert config["model"]["provider"] == "custom"
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
assert choices[-1] == "Keep current (Custom: https://example.invalid/v1)"
|
||||
return len(choices) - 1
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError("Model menu should not appear for keep-current custom")
|
||||
def fake_select():
|
||||
pass # user chose "cancel" or "keep current"
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
|
||||
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
reloaded = load_config()
|
||||
assert isinstance(reloaded["model"], dict)
|
||||
assert reloaded["model"]["provider"] == "custom"
|
||||
assert reloaded["model"]["default"] == "custom/model"
|
||||
assert reloaded["model"]["base_url"] == "https://example.invalid/v1"
|
||||
assert reloaded["model"]["base_url"] == "http://localhost:8080/v1"
|
||||
|
||||
|
||||
def test_setup_custom_endpoint_saves_working_v1_base_url(tmp_path, monkeypatch):
|
||||
def test_setup_keep_current_config_provider_uses_provider_specific_model_menu(
|
||||
tmp_path, monkeypatch
|
||||
):
|
||||
"""Keeping current provider preserves the config on disk."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
_write_model_config("zai", "https://open.bigmodel.cn/api/paas/v4", "glm-5")
|
||||
|
||||
config = load_config()
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
return 3 # Custom endpoint
|
||||
if question == "Configure vision:":
|
||||
return len(choices) - 1 # Skip
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
def fake_select():
|
||||
pass # keep current
|
||||
|
||||
# _model_flow_custom uses builtins.input (URL, key, model, context_length)
|
||||
input_values = iter([
|
||||
"http://localhost:8000",
|
||||
"local-key",
|
||||
"llm",
|
||||
"", # context_length (blank = auto-detect)
|
||||
])
|
||||
monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values))
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
|
||||
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *args, **kwargs: None)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.models.probe_api_models",
|
||||
lambda api_key, base_url: {
|
||||
"models": ["llm"],
|
||||
"probed_url": "http://localhost:8000/v1/models",
|
||||
"resolved_base_url": "http://localhost:8000/v1",
|
||||
"suggested_base_url": "http://localhost:8000/v1",
|
||||
"used_fallback": True,
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
env = _read_env(tmp_path)
|
||||
|
||||
# _model_flow_custom saves config to disk (base_url in config, not .env)
|
||||
reloaded = load_config()
|
||||
model_cfg = reloaded.get("model", {})
|
||||
if isinstance(model_cfg, dict):
|
||||
assert model_cfg.get("provider") == "custom"
|
||||
assert model_cfg.get("default") == "llm"
|
||||
assert model_cfg.get("base_url") == "http://localhost:8000/v1"
|
||||
|
||||
|
||||
def test_setup_keep_current_config_provider_uses_provider_specific_model_menu(tmp_path, monkeypatch):
|
||||
"""Keep-current should respect config-backed providers, not fall back to OpenRouter."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
config["model"] = {
|
||||
"default": "claude-opus-4-6",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
save_config(config)
|
||||
|
||||
captured = {"provider_choices": None, "model_choices": None}
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
captured["provider_choices"] = list(choices)
|
||||
assert choices[-1] == "Keep current (Anthropic)"
|
||||
return len(choices) - 1
|
||||
if question == "Configure vision:":
|
||||
assert question == "Configure vision:"
|
||||
assert choices[-1] == "Skip for now"
|
||||
return len(choices) - 1
|
||||
if question == "Select default model:":
|
||||
captured["model_choices"] = list(choices)
|
||||
return len(choices) - 1 # keep current model
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
|
||||
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.models.provider_model_ids", lambda provider: [])
|
||||
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: [])
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
assert captured["provider_choices"] is not None
|
||||
assert captured["model_choices"] is not None
|
||||
assert captured["model_choices"][0] == "claude-opus-4-6"
|
||||
assert "anthropic/claude-opus-4.6 (recommended)" not in captured["model_choices"]
|
||||
|
||||
|
||||
def test_setup_keep_current_anthropic_can_configure_openai_vision_default(tmp_path, monkeypatch):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
config["model"] = {
|
||||
"default": "claude-opus-4-6",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
save_config(config)
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
assert choices[-1] == "Keep current (Anthropic)"
|
||||
return len(choices) - 1
|
||||
if question == "Configure vision:":
|
||||
return 1
|
||||
if question == "Select vision model:":
|
||||
assert choices[-1] == "Use default (gpt-4o-mini)"
|
||||
return len(choices) - 1
|
||||
if question == "Select default model:":
|
||||
assert choices[-1] == "Keep current (claude-opus-4-6)"
|
||||
return len(choices) - 1
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.setup.prompt",
|
||||
lambda message, *args, **kwargs: "sk-openai" if "OpenAI API key" in message else "",
|
||||
)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
|
||||
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.models.provider_model_ids", lambda provider: [])
|
||||
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: [])
|
||||
|
||||
setup_model_provider(config)
|
||||
env = _read_env(tmp_path)
|
||||
|
||||
assert env.get("OPENAI_API_KEY") == "sk-openai"
|
||||
assert env.get("AUXILIARY_VISION_MODEL") == "gpt-4o-mini"
|
||||
# Vision base URL saved to config.yaml, not .env
|
||||
reloaded = load_config()
|
||||
vision_cfg = reloaded.get("auxiliary", {}).get("vision", {})
|
||||
assert vision_cfg.get("base_url") == "https://api.openai.com/v1"
|
||||
assert isinstance(reloaded["model"], dict)
|
||||
assert reloaded["model"]["provider"] == "zai"
|
||||
|
||||
|
||||
def test_setup_copilot_uses_gh_auth_and_saves_provider(tmp_path, monkeypatch):
|
||||
"""Copilot provider saves correctly through delegation."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
assert choices[14] == "GitHub Copilot (uses GITHUB_TOKEN or gh auth token)"
|
||||
return 14
|
||||
if question == "Select default model:":
|
||||
assert "gpt-4.1" in choices
|
||||
assert "gpt-5.4" in choices
|
||||
return choices.index("gpt-5.4")
|
||||
if question == "Select reasoning effort:":
|
||||
assert "low" in choices
|
||||
assert "high" in choices
|
||||
return choices.index("high")
|
||||
if question == "Configure vision:":
|
||||
return len(choices) - 1
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
def fake_select():
|
||||
_write_model_config("copilot", "https://models.github.ai/inference/v1", "gpt-4o")
|
||||
|
||||
def fake_prompt(message, *args, **kwargs):
|
||||
raise AssertionError(f"Unexpected prompt call: {message}")
|
||||
|
||||
def fake_get_auth_status(provider_id):
|
||||
if provider_id == "copilot":
|
||||
return {"logged_in": True}
|
||||
return {"logged_in": False}
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt", fake_prompt)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
|
||||
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.auth.get_auth_status", fake_get_auth_status)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.auth.resolve_api_key_provider_credentials",
|
||||
lambda provider_id: {
|
||||
"provider": provider_id,
|
||||
"api_key": "gh-cli-token",
|
||||
"base_url": "https://api.githubcopilot.com",
|
||||
"source": "gh auth token",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.models.fetch_github_model_catalog",
|
||||
lambda api_key: [
|
||||
{
|
||||
"id": "gpt-4.1",
|
||||
"capabilities": {"type": "chat", "supports": {}},
|
||||
"supported_endpoints": ["/chat/completions"],
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.4",
|
||||
"capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}},
|
||||
"supported_endpoints": ["/responses"],
|
||||
},
|
||||
],
|
||||
)
|
||||
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
env = _read_env(tmp_path)
|
||||
reloaded = load_config()
|
||||
|
||||
assert env.get("GITHUB_TOKEN") is None
|
||||
assert isinstance(reloaded["model"], dict)
|
||||
assert reloaded["model"]["provider"] == "copilot"
|
||||
assert reloaded["model"]["base_url"] == "https://api.githubcopilot.com"
|
||||
assert reloaded["model"]["default"] == "gpt-5.4"
|
||||
assert reloaded["model"]["api_mode"] == "codex_responses"
|
||||
assert reloaded["agent"]["reasoning_effort"] == "high"
|
||||
|
||||
|
||||
def test_setup_copilot_acp_uses_model_picker_and_saves_provider(tmp_path, monkeypatch):
|
||||
"""Copilot ACP provider saves correctly through delegation."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
config = load_config()
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
assert choices[15] == "GitHub Copilot ACP (spawns `copilot --acp --stdio`)"
|
||||
return 15
|
||||
if question == "Select default model:":
|
||||
assert "gpt-4.1" in choices
|
||||
assert "gpt-5.4" in choices
|
||||
return choices.index("gpt-5.4")
|
||||
if question == "Configure vision:":
|
||||
return len(choices) - 1
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
def fake_select():
|
||||
_write_model_config("copilot-acp", "", "claude-sonnet-4")
|
||||
|
||||
def fake_prompt(message, *args, **kwargs):
|
||||
raise AssertionError(f"Unexpected prompt call: {message}")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt", fake_prompt)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
|
||||
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.auth.get_auth_status", lambda provider_id: {"logged_in": provider_id == "copilot-acp"})
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.auth.resolve_api_key_provider_credentials",
|
||||
lambda provider_id: {
|
||||
"provider": "copilot",
|
||||
"api_key": "gh-cli-token",
|
||||
"base_url": "https://api.githubcopilot.com",
|
||||
"source": "gh auth token",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.models.fetch_github_model_catalog",
|
||||
lambda api_key: [
|
||||
{
|
||||
"id": "gpt-4.1",
|
||||
"capabilities": {"type": "chat", "supports": {}},
|
||||
"supported_endpoints": ["/chat/completions"],
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.4",
|
||||
"capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}},
|
||||
"supported_endpoints": ["/responses"],
|
||||
},
|
||||
],
|
||||
)
|
||||
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
reloaded = load_config()
|
||||
|
||||
assert isinstance(reloaded["model"], dict)
|
||||
assert reloaded["model"]["provider"] == "copilot-acp"
|
||||
assert reloaded["model"]["base_url"] == "acp://copilot"
|
||||
assert reloaded["model"]["default"] == "gpt-5.4"
|
||||
assert reloaded["model"]["api_mode"] == "chat_completions"
|
||||
|
||||
|
||||
def test_setup_switch_custom_to_codex_clears_custom_endpoint_and_updates_config(tmp_path, monkeypatch):
|
||||
"""Switching from custom to Codex should clear custom endpoint overrides."""
|
||||
def test_setup_switch_custom_to_codex_clears_custom_endpoint_and_updates_config(
|
||||
tmp_path, monkeypatch
|
||||
):
|
||||
"""Switching from custom to codex updates config correctly."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
save_env_value("OPENAI_BASE_URL", "https://example.invalid/v1")
|
||||
save_env_value("OPENAI_API_KEY", "sk-custom")
|
||||
save_env_value("OPENROUTER_API_KEY", "sk-or")
|
||||
# Start with custom
|
||||
_write_model_config("custom", "http://localhost:11434/v1", "qwen3.5:32b")
|
||||
|
||||
config = load_config()
|
||||
config["model"] = {
|
||||
"default": "custom/model",
|
||||
"provider": "custom",
|
||||
"base_url": "https://example.invalid/v1",
|
||||
}
|
||||
save_config(config)
|
||||
assert config["model"]["provider"] == "custom"
|
||||
|
||||
def fake_prompt_choice(question, choices, default=0):
|
||||
if question == "Select your inference provider:":
|
||||
return 2 # OpenAI Codex
|
||||
if question == "Select default model:":
|
||||
return 0
|
||||
tts_idx = _maybe_keep_current_tts(question, choices)
|
||||
if tts_idx is not None:
|
||||
return tts_idx
|
||||
raise AssertionError(f"Unexpected prompt_choice call: {question}")
|
||||
def fake_select():
|
||||
_write_model_config("openai-codex", "https://api.openai.com/v1", "gpt-4o")
|
||||
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
|
||||
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
|
||||
monkeypatch.setattr("hermes_cli.auth.get_active_provider", lambda: None)
|
||||
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
|
||||
monkeypatch.setattr("hermes_cli.auth._login_openai_codex", lambda *args, **kwargs: None)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.auth.resolve_codex_runtime_credentials",
|
||||
lambda *args, **kwargs: {
|
||||
"base_url": "https://chatgpt.com/backend-api/codex",
|
||||
"api_key": "codex-...oken",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"hermes_cli.codex_models.get_codex_model_ids",
|
||||
lambda **kwargs: ["openai/gpt-5.3-codex", "openai/gpt-5-codex-mini"],
|
||||
)
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
env = _read_env(tmp_path)
|
||||
reloaded = load_config()
|
||||
|
||||
# OPENAI_BASE_URL is no longer written/cleared in .env — config is authoritative
|
||||
assert isinstance(reloaded["model"], dict)
|
||||
assert reloaded["model"]["provider"] == "openai-codex"
|
||||
assert reloaded["model"]["default"] == "openai/gpt-5.3-codex"
|
||||
assert reloaded["model"]["base_url"] == "https://chatgpt.com/backend-api/codex"
|
||||
assert reloaded["model"]["default"] == "gpt-4o"
|
||||
|
||||
|
||||
def test_setup_summary_marks_codex_auth_as_vision_available(tmp_path, monkeypatch, capsys):
|
||||
def test_setup_switch_preserves_non_model_config(tmp_path, monkeypatch):
|
||||
"""Provider switch preserves other config sections (terminal, display, etc.)."""
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
_stub_tts(monkeypatch)
|
||||
|
||||
(tmp_path / "auth.json").write_text(
|
||||
'{"active_provider":"openai-codex","providers":{"openai-codex":{"tokens":{"access_token": "***", "refresh_token": "***"}}}}'
|
||||
)
|
||||
config = load_config()
|
||||
config["terminal"]["timeout"] = 999
|
||||
save_config(config)
|
||||
|
||||
monkeypatch.setattr("shutil.which", lambda _name: None)
|
||||
config = load_config()
|
||||
|
||||
_print_setup_summary(load_config(), tmp_path)
|
||||
output = capsys.readouterr().out
|
||||
def fake_select():
|
||||
_write_model_config("openrouter", model_name="gpt-4o")
|
||||
|
||||
assert "Vision (image analysis)" in output
|
||||
assert "missing run 'hermes setup' to configure" not in output
|
||||
assert "Mixture of Agents" in output
|
||||
assert "missing OPENROUTER_API_KEY" in output
|
||||
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
|
||||
|
||||
setup_model_provider(config)
|
||||
save_config(config)
|
||||
|
||||
def test_setup_summary_marks_anthropic_auth_as_vision_available(tmp_path, monkeypatch, capsys):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
_clear_provider_env(monkeypatch)
|
||||
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-api03-key")
|
||||
monkeypatch.setattr("shutil.which", lambda _name: None)
|
||||
monkeypatch.setattr("agent.auxiliary_client.get_available_vision_backends", lambda: ["anthropic"])
|
||||
|
||||
_print_setup_summary(load_config(), tmp_path)
|
||||
output = capsys.readouterr().out
|
||||
|
||||
assert "Vision (image analysis)" in output
|
||||
assert "missing run 'hermes setup' to configure" not in output
|
||||
reloaded = load_config()
|
||||
assert reloaded["terminal"]["timeout"] == 999
|
||||
assert reloaded["model"]["provider"] == "openrouter"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue